Struct leaf::layers::activation::sigmoid::Sigmoid [] [src]

pub struct Sigmoid;

Sigmoid Activation Layer

Trait Implementations

impl<B: IBackend + Sigmoid<f32>> ILayer<B> for Sigmoid

fn exact_num_output_blobs(&self) -> Option<usize>

fn exact_num_input_blobs(&self) -> Option<usize>

fn reshape(&mut self, backend: Rc<B>, input_data: &mut Vec<ArcLock<SharedTensor<f32>>>, input_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>, weights_data: &mut Vec<ArcLock<SharedTensor<f32>>>, weights_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>, output_data: &mut Vec<ArcLock<SharedTensor<f32>>>, output_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>)

fn init(&mut self, backend: Rc<B>)

fn resize_shared_workspace(&mut self, backend: Rc<B>, workspace: Option<ArcLock<SharedTensor<u8>>>) -> Option<ArcLock<SharedTensor<u8>>>

fn forward(&self, backend: &B, input_data: &[ArcLock<SharedTensor<f32>>], weights_data: &[ArcLock<SharedTensor<f32>>], output_data: &mut [ArcLock<SharedTensor<f32>>])

fn backward_input(&self, backend: &B, weights_data: &[ArcLock<SharedTensor<f32>>], output_data: &[ArcLock<SharedTensor<f32>>], output_gradients: &[ArcLock<SharedTensor<f32>>], input_data: &[ArcLock<SharedTensor<f32>>], input_gradients: &mut [ArcLock<SharedTensor<f32>>])

fn backward_parameters(&self, backend: &B, output_data: &[ArcLock<SharedTensor<f32>>], output_gradients: &[ArcLock<SharedTensor<f32>>], input_data: &[ArcLock<SharedTensor<f32>>], weights_gradients: &mut [ArcLock<SharedTensor<f32>>])

fn sync(&self, backend: &B, input_data: &mut [ArcLock<SharedTensor<f32>>], input_gradients: &mut [ArcLock<SharedTensor<f32>>], weights_data: &mut [ArcLock<SharedTensor<f32>>], weights_gradients: &mut [ArcLock<SharedTensor<f32>>], output_data: &mut Vec<ArcLock<SharedTensor<f32>>>, output_gradients: &mut Vec<ArcLock<SharedTensor<f32>>>)

fn auto_output_blobs(&self) -> bool

fn min_output_blobs(&self) -> usize

fn auto_weight_blobs(&self) -> bool

fn allow_force_backward(&self, input_id: usize) -> bool

fn sync_native(&self) -> bool

fn compute_in_place(&self) -> bool

fn is_container(&self) -> bool

fn loss_weight(&self, output_id: usize) -> Option<f32>

fn inputs_data(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

fn inputs_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

fn outputs_data(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

fn outputs_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

fn learnable_weights(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

fn learnable_weights_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

fn learnable_weights_names(&self) -> Option<Vec<String>>

fn learnable_weights_lr(&self) -> Option<Vec<Option<f32>>>

impl<B: IBackend + Sigmoid<f32>> ComputeOutput<f32, B> for Sigmoid

fn compute_output(&self, backend: &B, _weights: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], output_data: &mut [&mut SharedTensor<f32>])

impl<B: IBackend + Sigmoid<f32>> ComputeInputGradient<f32, B> for Sigmoid

fn compute_input_gradient(&self, backend: &B, weights_data: &[&SharedTensor<f32>], output_data: &[&SharedTensor<f32>], output_gradients: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], input_gradients: &mut [&mut SharedTensor<f32>])

impl<B: IBackend + Sigmoid<f32>> ComputeParametersGradient<f32, B> for Sigmoid

fn compute_parameters_gradient(&self, backend: &B, output_data: &[&SharedTensor<T>], output_gradients: &[&SharedTensor<T>], input_data: &[&SharedTensor<T>], parameters_gradients: &mut [&mut SharedTensor<T>])

Derived Implementations

impl Clone for Sigmoid

fn clone(&self) -> Sigmoid

1.0.0fn clone_from(&mut self, source: &Self)

impl Debug for Sigmoid

fn fmt(&self, __arg_0: &mut Formatter) -> Result