Struct leaf::layers::container::sequential::Sequential [] [src]

pub struct Sequential<B: IBackend + LayerOps<f32>> {
    // some fields omitted
}

Sequential Layer

Methods

impl<B: IBackend + LayerOps<f32> + 'static> Sequential<B>

fn empty() -> Sequential<B>

Create a empty Sequential container layer.

fn from_config(backend: Rc<B>, config: &SequentialConfig) -> Sequential<B>

Create a Sequential layer from a SequentialConfig.

fn init_layers(&mut self, backend: Rc<B>, in_config: &SequentialConfig)

Initializes a sequential container.

Sets up the structure of the sequential container. It reads the supplied SequentialConfig, connects the input and output blobs of each layer and determines if the backpropagation has to be executed for each tensor and layer.

Trait Implementations

impl<B: IBackend + LayerOps<f32> + 'static> ILayer<B> for Sequential<B>

fn is_container(&self) -> bool

fn inputs_data(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

fn inputs_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

fn outputs_data(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

fn outputs_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

fn learnable_weights(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

fn learnable_weights_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

fn learnable_weights_names(&self) -> Option<Vec<String>>

fn resize_shared_workspace(&mut self, backend: Rc<B>, workspace: Option<ArcLock<SharedTensor<u8>>>) -> Option<ArcLock<SharedTensor<u8>>>

fn forward(&self, backend: &B, input_data: &[ArcLock<SharedTensor<f32>>], weights_data: &[ArcLock<SharedTensor<f32>>], output_data: &mut [ArcLock<SharedTensor<f32>>])

fn backward_input(&self, backend: &B, weights_data: &[ArcLock<SharedTensor<f32>>], output_data: &[ArcLock<SharedTensor<f32>>], output_gradients: &[ArcLock<SharedTensor<f32>>], input_data: &[ArcLock<SharedTensor<f32>>], input_gradients: &mut [ArcLock<SharedTensor<f32>>])

fn backward_parameters(&self, backend: &B, output_data: &[ArcLock<SharedTensor<f32>>], output_gradients: &[ArcLock<SharedTensor<f32>>], input_data: &[ArcLock<SharedTensor<f32>>], weights_gradients: &mut [ArcLock<SharedTensor<f32>>])

fn init(&mut self, backend: Rc<B>)

fn reshape(&mut self, backend: Rc<B>, input_data: &mut Vec<ArcLock<SharedTensor<f32>>>, input_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>, weights_data: &mut Vec<ArcLock<SharedTensor<f32>>>, weights_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>, output_data: &mut Vec<ArcLock<SharedTensor<f32>>>, output_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>)

fn sync(&self, backend: &B, input_data: &mut [ArcLock<SharedTensor<f32>>], input_gradients: &mut [ArcLock<SharedTensor<f32>>], weights_data: &mut [ArcLock<SharedTensor<f32>>], weights_gradients: &mut [ArcLock<SharedTensor<f32>>], output_data: &mut Vec<ArcLock<SharedTensor<f32>>>, output_gradients: &mut Vec<ArcLock<SharedTensor<f32>>>)

fn auto_output_blobs(&self) -> bool

fn min_output_blobs(&self) -> usize

fn exact_num_output_blobs(&self) -> Option<usize>

fn auto_weight_blobs(&self) -> bool

fn exact_num_input_blobs(&self) -> Option<usize>

fn allow_force_backward(&self, input_id: usize) -> bool

fn sync_native(&self) -> bool

fn compute_in_place(&self) -> bool

fn loss_weight(&self, output_id: usize) -> Option<f32>

fn learnable_weights_lr(&self) -> Option<Vec<Option<f32>>>

impl<B: IBackend + LayerOps<f32> + 'static> ComputeOutput<f32, B> for Sequential<B>

fn compute_output(&self, backend: &B, weights: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], output_data: &mut [&mut SharedTensor<f32>])

impl<B: IBackend + LayerOps<f32> + 'static> ComputeInputGradient<f32, B> for Sequential<B>

fn compute_input_gradient(&self, backend: &B, weights_data: &[&SharedTensor<f32>], output_data: &[&SharedTensor<f32>], output_gradients: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], input_gradients: &mut [&mut SharedTensor<f32>])

impl<B: IBackend + LayerOps<f32> + 'static> ComputeParametersGradient<f32, B> for Sequential<B>

fn compute_parameters_gradient(&self, backend: &B, output_data: &[&SharedTensor<f32>], output_gradients: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], parameters_gradients: &mut [&mut SharedTensor<f32>])

Derived Implementations

impl<B: Debug + IBackend + LayerOps<f32>> Debug for Sequential<B>

fn fmt(&self, __arg_0: &mut Formatter) -> Result