← Back

Module autograd

trait GradFn

Gradient function trait - nodes in computation graph

Source: autograd.joule:8

fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>;

Compute gradients given upstream gradient

Source: autograd.joule:10

fn inputs(&self) -> &[&Tensor];

Get input tensors that need gradients

Source: autograd.joule:13

fn name(&self) -> &str;

Name for debugging

Source: autograd.joule:16

struct ComputationGraph

Computation graph for automatic differentiation

Source: autograd.joule:20

struct GraphNode

A node in the computation graph

Source: autograd.joule:30

fn new() -> Self

Create a new computation graph

Source: autograd.joule:39

fn register_leaf(&mut self, tensor: &mut Tensor) -> u64

Source: autograd.joule:49

fn register_op(

Source: autograd.joule:68

fn backward(&mut self, output_id: u64, grad_output: Option<Tensor>)

Source: autograd.joule:92

fn get_grad(&self, tensor_id: u64) -> Option<&Tensor>

Get gradient for a tensor

Source: autograd.joule:146

fn zero_grad(&mut self)

Clear all gradients

Source: autograd.joule:152

fn clear(&mut self)

Clear entire graph

Source: autograd.joule:159

fn current_graph() -> &'static std::cell::RefCell<ComputationGraph>

Get current computation graph

Source: autograd.joule:173

fn grad(output: &Tensor, inputs: &[&Tensor]) -> Vec<Tensor>

Source: autograd.joule:187

fn grad_single(output: &Tensor, input: &Tensor) -> Tensor

Compute gradient with respect to single input

Source: autograd.joule:196

struct NoGrad;

Disable gradient computation (for inference)

Source: autograd.joule:201

fn new() -> Self

Create no-grad context

Source: autograd.joule:205

fn drop(&mut self)

Source: autograd.joule:212

fn is_grad_enabled() -> bool

Check if gradient computation is enabled

Source: autograd.joule:222

fn set_grad_enabled(enabled: bool)

Set gradient computation state

Source: autograd.joule:227

fn no_grad<F, T>(f: F) -> T

Source: autograd.joule:233

fn enable_grad<F, T>(f: F) -> T

Execute closure with gradients enabled

Source: autograd.joule:242

struct AddGradFn

Addition gradient

Source: autograd.joule:258

fn new(a: &Tensor, b: &Tensor) -> Self

Source: autograd.joule:264

fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>

Source: autograd.joule:273

fn inputs(&self) -> &[&Tensor]

Source: autograd.joule:280

fn name(&self) -> &str

Source: autograd.joule:281

struct MulGradFn

Multiplication gradient

Source: autograd.joule:285

fn new(a: &Tensor, b: &Tensor) -> Self

Source: autograd.joule:291

fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>

Source: autograd.joule:300

fn inputs(&self) -> &[&Tensor]

Source: autograd.joule:307

fn name(&self) -> &str

Source: autograd.joule:308

struct MatMulGradFn

Matrix multiplication gradient

Source: autograd.joule:312

fn new(a: &Tensor, b: &Tensor) -> Self

Source: autograd.joule:318

fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>

Source: autograd.joule:327

fn inputs(&self) -> &[&Tensor]

Source: autograd.joule:335

fn name(&self) -> &str

Source: autograd.joule:336

struct PowGradFn

Power gradient

Source: autograd.joule:340

fn new(base: &Tensor, exponent: f64) -> Self

Source: autograd.joule:346

fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>

Source: autograd.joule:355

fn inputs(&self) -> &[&Tensor]

Source: autograd.joule:363

fn name(&self) -> &str

Source: autograd.joule:364

struct SumGradFn

Sum gradient

Source: autograd.joule:368

fn new(input: &Tensor, dims: Option<Vec<i64>>, keepdim: bool) -> Self

Source: autograd.joule:375

fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>

Source: autograd.joule:385

fn inputs(&self) -> &[&Tensor]

Source: autograd.joule:402

fn name(&self) -> &str

Source: autograd.joule:403

struct MeanGradFn

Mean gradient

Source: autograd.joule:407

fn new(input: &Tensor, dims: Option<Vec<i64>>, keepdim: bool) -> Self

Source: autograd.joule:415

fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>

Source: autograd.joule:432

fn inputs(&self) -> &[&Tensor]

Source: autograd.joule:443

fn name(&self) -> &str

Source: autograd.joule:444

struct ReluGradFn

ReLU gradient

Source: autograd.joule:448

fn new(input: &Tensor) -> Self

Source: autograd.joule:453

fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>

Source: autograd.joule:461

fn inputs(&self) -> &[&Tensor]

Source: autograd.joule:466

fn name(&self) -> &str

Source: autograd.joule:467

struct SigmoidGradFn

Sigmoid gradient

Source: autograd.joule:471

fn new(output: &Tensor) -> Self

Source: autograd.joule:476

fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>

Source: autograd.joule:484

fn inputs(&self) -> &[&Tensor]

Source: autograd.joule:491

fn name(&self) -> &str

Source: autograd.joule:492

struct TanhGradFn

Tanh gradient

Source: autograd.joule:496

fn new(output: &Tensor) -> Self

Source: autograd.joule:501

fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>

Source: autograd.joule:509

fn inputs(&self) -> &[&Tensor]

Source: autograd.joule:516

fn name(&self) -> &str

Source: autograd.joule:517

struct SoftmaxGradFn

Softmax gradient

Source: autograd.joule:521

fn new(output: &Tensor, dim: i64) -> Self

Source: autograd.joule:527

fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>

Source: autograd.joule:536

fn inputs(&self) -> &[&Tensor]

Source: autograd.joule:544

fn name(&self) -> &str

Source: autograd.joule:545

struct LogSoftmaxGradFn

Log softmax gradient

Source: autograd.joule:549

fn new(output: &Tensor, dim: i64) -> Self

Source: autograd.joule:555

fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>

Source: autograd.joule:564

fn inputs(&self) -> &[&Tensor]

Source: autograd.joule:572

fn name(&self) -> &str

Source: autograd.joule:573

struct ExpGradFn

Exp gradient

Source: autograd.joule:577

fn new(output: &Tensor) -> Self

Source: autograd.joule:582

fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>

Source: autograd.joule:590

fn inputs(&self) -> &[&Tensor]

Source: autograd.joule:595

fn name(&self) -> &str

Source: autograd.joule:596

struct LogGradFn

Log gradient

Source: autograd.joule:600

fn new(input: &Tensor) -> Self

Source: autograd.joule:605

fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>

Source: autograd.joule:613

fn inputs(&self) -> &[&Tensor]

Source: autograd.joule:618

fn name(&self) -> &str

Source: autograd.joule:619

fn maybe_reduce_grad(grad: &Tensor, target_shape: &[usize]) -> Tensor

Reduce gradient to match target shape (handles broadcasting)

Source: autograd.joule:627

fn jacobian(output: &Tensor, input: &Tensor) -> Tensor

Source: autograd.joule:662

fn gradcheck(

Source: autograd.joule:685