enum Reduction
Source: loss.joule:9
enum ReductionSource: loss.joule:9
fn apply(&self, tensor: &Tensor) -> TensorApply reduction to tensor
Source: loss.joule:17
struct MSELossMean Squared Error Loss L = (1/n) * sum((y_pred - y_true)^2)
Source: loss.joule:33
fn new(reduction: Reduction) -> SelfSource: loss.joule:38
fn forward(&self, input: &Tensor, target: &Tensor) -> TensorSource: loss.joule:43
struct L1LossL1 Loss (Mean Absolute Error) L = (1/n) * sum(|y_pred - y_true|)
Source: loss.joule:52
fn new(reduction: Reduction) -> SelfSource: loss.joule:57
fn forward(&self, input: &Tensor, target: &Tensor) -> TensorSource: loss.joule:62
struct SmoothL1LossSmooth L1 Loss (Huber Loss) L = 0.5 * (y_pred - y_true)^2 / beta if |y_pred - y_true| < beta = |y_pred - y_true| - 0.5 * beta otherwise
Source: loss.joule:72
fn new(reduction: Reduction, beta: f64) -> SelfSource: loss.joule:78
fn forward(&self, input: &Tensor, target: &Tensor) -> TensorSource: loss.joule:83
struct HuberLossHuber Loss (same as Smooth L1 with delta instead of beta)
Source: loss.joule:99
fn new(reduction: Reduction, delta: f64) -> SelfSource: loss.joule:105
fn forward(&self, input: &Tensor, target: &Tensor) -> TensorSource: loss.joule:110
struct CrossEntropyLossCross Entropy Loss L = -sum(y_true * log(softmax(y_pred)))
Source: loss.joule:131
fn new(reduction: Reduction) -> SelfSource: loss.joule:139
fn with_weight(mut self, weight: Tensor) -> SelfSource: loss.joule:148
fn with_ignore_index(mut self, index: i64) -> SelfSource: loss.joule:153
fn with_label_smoothing(mut self, smoothing: f64) -> SelfSource: loss.joule:158
fn forward(&self, input: &Tensor, target: &Tensor) -> TensorSource: loss.joule:164
struct BCELossBinary Cross Entropy Loss L = -(y_true * log(y_pred) + (1 - y_true) * log(1 - y_pred))
Source: loss.joule:220
fn new(reduction: Reduction) -> SelfSource: loss.joule:226
fn with_weight(mut self, weight: Tensor) -> SelfSource: loss.joule:233
fn forward(&self, input: &Tensor, target: &Tensor) -> TensorSource: loss.joule:239
struct BCEWithLogitsLossBinary Cross Entropy with Logits Loss More numerically stable version that combines sigmoid with BCE
Source: loss.joule:259
fn new(reduction: Reduction) -> SelfSource: loss.joule:266
fn with_weight(mut self, weight: Tensor) -> SelfSource: loss.joule:274
fn with_pos_weight(mut self, pos_weight: Tensor) -> SelfSource: loss.joule:279
fn forward(&self, input: &Tensor, target: &Tensor) -> TensorSource: loss.joule:285
struct NLLLossNegative Log Likelihood Loss L = -log(p[target])
Source: loss.joule:317
fn new(reduction: Reduction) -> SelfSource: loss.joule:324
fn forward(&self, input: &Tensor, target: &Tensor) -> TensorSource: loss.joule:333
struct FocalLossFocal Loss (for class imbalance) L = -alpha * (1 - p)^gamma * log(p)
Source: loss.joule:360
fn new(reduction: Reduction, alpha: f64, gamma: f64) -> SelfSource: loss.joule:367
fn forward(&self, input: &Tensor, target: &Tensor) -> TensorSource: loss.joule:372
struct TripletMarginLossTriplet Margin Loss L = max(0, ||anchor - positive|| - ||anchor - negative|| + margin)
Source: loss.joule:399
fn new(reduction: Reduction, margin: f64, p: f64, swap: bool) -> SelfSource: loss.joule:407
fn forward(&self, anchor: &Tensor, positive: &Tensor, negative: &Tensor) -> TensorSource: loss.joule:412
struct ContrastiveLossContrastive Loss L = (1 - y) * d^2 + y * max(0, margin - d)^2
Source: loss.joule:433
fn new(reduction: Reduction, margin: f64) -> SelfSource: loss.joule:439
fn forward(&self, x1: &Tensor, x2: &Tensor, y: &Tensor) -> TensorSource: loss.joule:444
struct CosineEmbeddingLossCosine Embedding Loss L = 1 - cos(x1, x2) if y = 1 = max(0, cos(x1, x2) - margin) if y = -1
Source: loss.joule:462
fn new(reduction: Reduction, margin: f64) -> SelfSource: loss.joule:468
fn forward(&self, x1: &Tensor, x2: &Tensor, y: &Tensor) -> TensorSource: loss.joule:473
struct MarginRankingLossMargin Ranking Loss L = max(0, -y * (x1 - x2) + margin)
Source: loss.joule:494
fn new(reduction: Reduction, margin: f64) -> SelfSource: loss.joule:500
fn forward(&self, x1: &Tensor, x2: &Tensor, y: &Tensor) -> TensorSource: loss.joule:505
struct HingeEmbeddingLossHinge Embedding Loss L = x if y = 1 = max(0, margin - x) if y = -1
Source: loss.joule:519
fn new(reduction: Reduction, margin: f64) -> SelfSource: loss.joule:525
fn forward(&self, input: &Tensor, target: &Tensor) -> TensorSource: loss.joule:530
struct KLDivLossKL Divergence Loss L = sum(p * log(p / q))
Source: loss.joule:548
fn new(reduction: Reduction, log_target: bool) -> SelfSource: loss.joule:554
fn forward(&self, input: &Tensor, target: &Tensor) -> TensorSource: loss.joule:559
struct GaussianNLLLossGaussian NLL Loss L = 0.5 * (log(var) + (input - target)^2 / var + log(2*pi))
Source: loss.joule:585
fn new(reduction: Reduction, full: bool, eps: f64) -> SelfSource: loss.joule:592
fn forward(&self, input: &Tensor, target: &Tensor, var: &Tensor) -> TensorSource: loss.joule:597
struct PoissonNLLLossPoisson NLL Loss L = exp(input) - target * input
Source: loss.joule:616
fn new(reduction: Reduction, log_input: bool, full: bool, eps: f64) -> SelfSource: loss.joule:624
fn forward(&self, input: &Tensor, target: &Tensor) -> TensorSource: loss.joule:629
struct DiceLossDice Loss (for segmentation) L = 1 - 2 * |A ∩ B| / (|A| + |B|)
Source: loss.joule:654
fn new(reduction: Reduction, smooth: f64) -> SelfSource: loss.joule:660
fn forward(&self, input: &Tensor, target: &Tensor) -> TensorSource: loss.joule:665
struct IoULossIoU Loss (Intersection over Union) L = 1 - IoU
Source: loss.joule:685
fn new(reduction: Reduction, smooth: f64) -> SelfSource: loss.joule:691
fn forward(&self, input: &Tensor, target: &Tensor) -> TensorSource: loss.joule:696
struct AdversarialLossAdversarial Loss (original GAN) D loss: -log(D(x)) - log(1 - D(G(z))) G loss: -log(D(G(z)))
Source: loss.joule:717
fn new(reduction: Reduction) -> SelfSource: loss.joule:722
fn discriminator_loss(&self, real_output: &Tensor, fake_output: &Tensor) -> TensorSource: loss.joule:728
fn generator_loss(&self, fake_output: &Tensor) -> TensorSource: loss.joule:739
struct WGANLossWGAN Loss (Wasserstein GAN) D loss: D(G(z)) - D(x) G loss: -D(G(z))
Source: loss.joule:749
fn new(reduction: Reduction) -> SelfSource: loss.joule:754
fn discriminator_loss(&self, real_output: &Tensor, fake_output: &Tensor) -> TensorSource: loss.joule:760
fn generator_loss(&self, fake_output: &Tensor) -> TensorSource: loss.joule:767
fn gradient_penalty(Source: loss.joule:773
struct LSGANLossLeast Squares GAN Loss D loss: (D(x) - 1)^2 + D(G(z))^2 G loss: (D(G(z)) - 1)^2
Source: loss.joule:805
fn new(reduction: Reduction) -> SelfSource: loss.joule:810
fn discriminator_loss(&self, real_output: &Tensor, fake_output: &Tensor) -> TensorSource: loss.joule:816
fn generator_loss(&self, fake_output: &Tensor) -> TensorSource: loss.joule:825
struct PerceptualLossPerceptual Loss (using VGG features)
Source: loss.joule:836
fn new(layers: Vec<usize>, weights: Vec<f64>, reduction: Reduction) -> SelfSource: loss.joule:843
fn forward(Source: loss.joule:850
struct SSIMLossSSIM Loss (Structural Similarity)
Source: loss.joule:875
fn new(window_size: usize, reduction: Reduction) -> SelfSource: loss.joule:881
fn forward(&self, input: &Tensor, target: &Tensor) -> TensorSource: loss.joule:886