trait GradFn
Gradient function trait - nodes in computation graph
Source: autograd.joule:8
trait GradFnGradient function trait - nodes in computation graph
Source: autograd.joule:8
fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>;Compute gradients given upstream gradient
Source: autograd.joule:10
fn inputs(&self) -> &[&Tensor];Get input tensors that need gradients
Source: autograd.joule:13
fn name(&self) -> &str;Name for debugging
Source: autograd.joule:16
struct ComputationGraphComputation graph for automatic differentiation
Source: autograd.joule:20
struct GraphNodeA node in the computation graph
Source: autograd.joule:30
fn new() -> SelfCreate a new computation graph
Source: autograd.joule:39
fn register_leaf(&mut self, tensor: &mut Tensor) -> u64Source: autograd.joule:49
fn register_op(Source: autograd.joule:68
fn backward(&mut self, output_id: u64, grad_output: Option<Tensor>)Source: autograd.joule:92
fn get_grad(&self, tensor_id: u64) -> Option<&Tensor>Get gradient for a tensor
Source: autograd.joule:146
fn zero_grad(&mut self)Clear all gradients
Source: autograd.joule:152
fn clear(&mut self)Clear entire graph
Source: autograd.joule:159
fn current_graph() -> &'static std::cell::RefCell<ComputationGraph>Get current computation graph
Source: autograd.joule:173
fn grad(output: &Tensor, inputs: &[&Tensor]) -> Vec<Tensor>Source: autograd.joule:187
fn grad_single(output: &Tensor, input: &Tensor) -> TensorCompute gradient with respect to single input
Source: autograd.joule:196
struct NoGrad;Disable gradient computation (for inference)
Source: autograd.joule:201
fn new() -> SelfCreate no-grad context
Source: autograd.joule:205
fn drop(&mut self)Source: autograd.joule:212
fn is_grad_enabled() -> boolCheck if gradient computation is enabled
Source: autograd.joule:222
fn set_grad_enabled(enabled: bool)Set gradient computation state
Source: autograd.joule:227
fn no_grad<F, T>(f: F) -> TSource: autograd.joule:233
fn enable_grad<F, T>(f: F) -> TExecute closure with gradients enabled
Source: autograd.joule:242
struct AddGradFnAddition gradient
Source: autograd.joule:258
fn new(a: &Tensor, b: &Tensor) -> SelfSource: autograd.joule:264
fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>Source: autograd.joule:273
fn inputs(&self) -> &[&Tensor]Source: autograd.joule:280
fn name(&self) -> &strSource: autograd.joule:281
struct MulGradFnMultiplication gradient
Source: autograd.joule:285
fn new(a: &Tensor, b: &Tensor) -> SelfSource: autograd.joule:291
fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>Source: autograd.joule:300
fn inputs(&self) -> &[&Tensor]Source: autograd.joule:307
fn name(&self) -> &strSource: autograd.joule:308
struct MatMulGradFnMatrix multiplication gradient
Source: autograd.joule:312
fn new(a: &Tensor, b: &Tensor) -> SelfSource: autograd.joule:318
fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>Source: autograd.joule:327
fn inputs(&self) -> &[&Tensor]Source: autograd.joule:335
fn name(&self) -> &strSource: autograd.joule:336
struct PowGradFnPower gradient
Source: autograd.joule:340
fn new(base: &Tensor, exponent: f64) -> SelfSource: autograd.joule:346
fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>Source: autograd.joule:355
fn inputs(&self) -> &[&Tensor]Source: autograd.joule:363
fn name(&self) -> &strSource: autograd.joule:364
struct SumGradFnSum gradient
Source: autograd.joule:368
fn new(input: &Tensor, dims: Option<Vec<i64>>, keepdim: bool) -> SelfSource: autograd.joule:375
fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>Source: autograd.joule:385
fn inputs(&self) -> &[&Tensor]Source: autograd.joule:402
fn name(&self) -> &strSource: autograd.joule:403
struct MeanGradFnMean gradient
Source: autograd.joule:407
fn new(input: &Tensor, dims: Option<Vec<i64>>, keepdim: bool) -> SelfSource: autograd.joule:415
fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>Source: autograd.joule:432
fn inputs(&self) -> &[&Tensor]Source: autograd.joule:443
fn name(&self) -> &strSource: autograd.joule:444
struct ReluGradFnReLU gradient
Source: autograd.joule:448
fn new(input: &Tensor) -> SelfSource: autograd.joule:453
fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>Source: autograd.joule:461
fn inputs(&self) -> &[&Tensor]Source: autograd.joule:466
fn name(&self) -> &strSource: autograd.joule:467
struct SigmoidGradFnSigmoid gradient
Source: autograd.joule:471
fn new(output: &Tensor) -> SelfSource: autograd.joule:476
fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>Source: autograd.joule:484
fn inputs(&self) -> &[&Tensor]Source: autograd.joule:491
fn name(&self) -> &strSource: autograd.joule:492
struct TanhGradFnTanh gradient
Source: autograd.joule:496
fn new(output: &Tensor) -> SelfSource: autograd.joule:501
fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>Source: autograd.joule:509
fn inputs(&self) -> &[&Tensor]Source: autograd.joule:516
fn name(&self) -> &strSource: autograd.joule:517
struct SoftmaxGradFnSoftmax gradient
Source: autograd.joule:521
fn new(output: &Tensor, dim: i64) -> SelfSource: autograd.joule:527
fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>Source: autograd.joule:536
fn inputs(&self) -> &[&Tensor]Source: autograd.joule:544
fn name(&self) -> &strSource: autograd.joule:545
struct LogSoftmaxGradFnLog softmax gradient
Source: autograd.joule:549
fn new(output: &Tensor, dim: i64) -> SelfSource: autograd.joule:555
fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>Source: autograd.joule:564
fn inputs(&self) -> &[&Tensor]Source: autograd.joule:572
fn name(&self) -> &strSource: autograd.joule:573
struct ExpGradFnExp gradient
Source: autograd.joule:577
fn new(output: &Tensor) -> SelfSource: autograd.joule:582
fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>Source: autograd.joule:590
fn inputs(&self) -> &[&Tensor]Source: autograd.joule:595
fn name(&self) -> &strSource: autograd.joule:596
struct LogGradFnLog gradient
Source: autograd.joule:600
fn new(input: &Tensor) -> SelfSource: autograd.joule:605
fn backward(&self, grad_output: &Tensor) -> Vec<Tensor>Source: autograd.joule:613
fn inputs(&self) -> &[&Tensor]Source: autograd.joule:618
fn name(&self) -> &strSource: autograd.joule:619
fn maybe_reduce_grad(grad: &Tensor, target_shape: &[usize]) -> TensorReduce gradient to match target shape (handles broadcasting)
Source: autograd.joule:627
fn jacobian(output: &Tensor, input: &Tensor) -> TensorSource: autograd.joule:662
fn gradcheck(Source: autograd.joule:685