Engine#

Optimizers#

Optimizers class.

class nerfstudio.engine.optimizers.AdamOptimizerConfig(_target: ~typing.Type = <class 'torch.optim.adam.Adam'>, lr: float = 0.0005, eps: float = 1e-08, max_norm: ~typing.Optional[float] = None, weight_decay: float = 0)[source]#

Bases: OptimizerConfig

Basic optimizer config with Adam

weight_decay: float = 0#

The weight decay to use.

class nerfstudio.engine.optimizers.OptimizerConfig(_target: ~typing.Type = <class 'torch.optim.adam.Adam'>, lr: float = 0.0005, eps: float = 1e-08, max_norm: ~typing.Optional[float] = None)[source]#

Bases: PrintableConfig

Basic optimizer config with RAdam

eps: float = 1e-08#

The epsilon value to use.

lr: float = 0.0005#

The learning rate to use.

max_norm: Optional[float] = None#

The max norm to use for gradient clipping.

setup(params) Optimizer[source]#

Returns the instantiated object using the config.

class nerfstudio.engine.optimizers.Optimizers(config: Dict[str, Any], param_groups: Dict[str, List[Parameter]])[source]#

Bases: object

A set of optimizers.

Parameters:
  • config – The optimizer configuration object.

  • param_groups – A dictionary of parameter groups to optimize.

load_optimizers(loaded_state: Dict[str, Any]) None[source]#

Helper to load the optimizer state from previous checkpoint

Parameters:

loaded_state – the state from the previous checkpoint

optimizer_scaler_step_all(grad_scaler: GradScaler) None[source]#

Take an optimizer step using a grad scaler.

Parameters:

grad_scaler – GradScaler to use

optimizer_step(param_group_name: str) None[source]#

Fetch and step corresponding optimizer.

Parameters:

param_group_name – name of optimizer to step forward

optimizer_step_all() None[source]#

Run step for all optimizers.

scheduler_step(param_group_name: str) None[source]#

Fetch and step corresponding scheduler.

Parameters:

param_group_name – name of scheduler to step forward

scheduler_step_all(step: int) None[source]#

Run step for all schedulers.

Parameters:

step – the current step

zero_grad_all() None[source]#

Zero the gradients for all optimizer parameters.

class nerfstudio.engine.optimizers.RAdamOptimizerConfig(_target: ~typing.Type = <class 'torch.optim.radam.RAdam'>, lr: float = 0.0005, eps: float = 1e-08, max_norm: ~typing.Optional[float] = None, weight_decay: float = 0)[source]#

Bases: OptimizerConfig

Basic optimizer config with RAdam

weight_decay: float = 0#

The weight decay to use.

Schedulers#

Scheduler Classes

class nerfstudio.engine.schedulers.CosineDecayScheduler(config: SchedulerConfig)[source]#

Bases: Scheduler

Cosine decay scheduler with linear warmup

get_scheduler(optimizer: Optimizer, lr_init: float) LRScheduler[source]#

Abstract method that returns a scheduler object.

Parameters:
  • optimizer – The optimizer to use.

  • lr_init – The initial learning rate.

Returns:

The scheduler object.

class nerfstudio.engine.schedulers.CosineDecaySchedulerConfig(_target: ~typing.Type = <factory>, warm_up_end: int = 5000, learning_rate_alpha: float = 0.05, max_steps: int = 300000)[source]#

Bases: SchedulerConfig

Config for cosine decay schedule

learning_rate_alpha: float = 0.05#

Learning rate alpha value

max_steps: int = 300000#

The maximum number of steps.

warm_up_end: int = 5000#

Iteration number where warmp ends

class nerfstudio.engine.schedulers.ExponentialDecayScheduler(config: SchedulerConfig)[source]#

Bases: Scheduler

Exponential decay scheduler with linear warmup. Scheduler first ramps up to lr_init in warmup_steps steps, then exponentially decays to lr_final in max_steps steps.

get_scheduler(optimizer: Optimizer, lr_init: float) LRScheduler[source]#

Abstract method that returns a scheduler object.

Parameters:
  • optimizer – The optimizer to use.

  • lr_init – The initial learning rate.

Returns:

The scheduler object.

class nerfstudio.engine.schedulers.ExponentialDecaySchedulerConfig(_target: ~typing.Type = <factory>, lr_pre_warmup: float = 1e-08, lr_final: ~typing.Optional[float] = None, warmup_steps: int = 0, max_steps: int = 100000, ramp: ~typing.Literal['linear', 'cosine'] = 'cosine')[source]#

Bases: SchedulerConfig

Config for exponential decay scheduler with warmup

lr_final: Optional[float] = None#

Final learning rate. If not provided, it will be set to the optimizers learning rate.

lr_pre_warmup: float = 1e-08#

Learning rate before warmup.

max_steps: int = 100000#

The maximum number of steps.

ramp: Literal['linear', 'cosine'] = 'cosine'#

The ramp function to use during the warmup.

warmup_steps: int = 0#

Number of warmup steps.

class nerfstudio.engine.schedulers.MultiStepScheduler(config: SchedulerConfig)[source]#

Bases: Scheduler

Multi step scheduler where lr decays by gamma every milestone

get_scheduler(optimizer: Optimizer, lr_init: float) LRScheduler[source]#

Abstract method that returns a scheduler object.

Parameters:
  • optimizer – The optimizer to use.

  • lr_init – The initial learning rate.

Returns:

The scheduler object.

class nerfstudio.engine.schedulers.MultiStepSchedulerConfig(_target: ~typing.Type = <factory>, max_steps: int = 1000000, gamma: float = 0.33, milestones: ~typing.Tuple[int, ...] = (500000, 750000, 900000))[source]#

Bases: SchedulerConfig

Config for multi step scheduler where lr decays by gamma every milestone

gamma: float = 0.33#

The learning rate decay factor.

max_steps: int = 1000000#

The maximum number of steps.

milestones: Tuple[int, ...] = (500000, 750000, 900000)#

The milestone steps at which to decay the learning rate.

class nerfstudio.engine.schedulers.Scheduler(config: SchedulerConfig)[source]#

Bases: object

Base scheduler

abstract get_scheduler(optimizer: Optimizer, lr_init: float) LRScheduler[source]#

Abstract method that returns a scheduler object.

Parameters:
  • optimizer – The optimizer to use.

  • lr_init – The initial learning rate.

Returns:

The scheduler object.

class nerfstudio.engine.schedulers.SchedulerConfig(_target: ~typing.Type = <factory>)[source]#

Bases: InstantiateConfig

Basic scheduler config