Struct leaf::layers::utility::reshape::Reshape [] [src]

pub struct Reshape {
    // some fields omitted
}

Reshape Utility Layer

Methods

impl Reshape

fn from_config(config: &ReshapeConfig) -> Reshape

Create a Reshape layer from a ReshapeConfig.

Trait Implementations

impl<B: IBackend> ILayer<B> for Reshape

fn compute_in_place(&self) -> bool

fn auto_output_blobs(&self) -> bool

fn reshape(&mut self, backend: Rc<B>, input_data: &mut Vec<ArcLock<SharedTensor<f32>>>, input_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>, weights_data: &mut Vec<ArcLock<SharedTensor<f32>>>, weights_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>, output_data: &mut Vec<ArcLock<SharedTensor<f32>>>, output_gradient: &mut Vec<ArcLock<SharedTensor<f32>>>)

fn init(&mut self, backend: Rc<B>)

fn resize_shared_workspace(&mut self, backend: Rc<B>, workspace: Option<ArcLock<SharedTensor<u8>>>) -> Option<ArcLock<SharedTensor<u8>>>

fn forward(&self, backend: &B, input_data: &[ArcLock<SharedTensor<f32>>], weights_data: &[ArcLock<SharedTensor<f32>>], output_data: &mut [ArcLock<SharedTensor<f32>>])

fn backward_input(&self, backend: &B, weights_data: &[ArcLock<SharedTensor<f32>>], output_data: &[ArcLock<SharedTensor<f32>>], output_gradients: &[ArcLock<SharedTensor<f32>>], input_data: &[ArcLock<SharedTensor<f32>>], input_gradients: &mut [ArcLock<SharedTensor<f32>>])

fn backward_parameters(&self, backend: &B, output_data: &[ArcLock<SharedTensor<f32>>], output_gradients: &[ArcLock<SharedTensor<f32>>], input_data: &[ArcLock<SharedTensor<f32>>], weights_gradients: &mut [ArcLock<SharedTensor<f32>>])

fn sync(&self, backend: &B, input_data: &mut [ArcLock<SharedTensor<f32>>], input_gradients: &mut [ArcLock<SharedTensor<f32>>], weights_data: &mut [ArcLock<SharedTensor<f32>>], weights_gradients: &mut [ArcLock<SharedTensor<f32>>], output_data: &mut Vec<ArcLock<SharedTensor<f32>>>, output_gradients: &mut Vec<ArcLock<SharedTensor<f32>>>)

fn min_output_blobs(&self) -> usize

fn exact_num_output_blobs(&self) -> Option<usize>

fn auto_weight_blobs(&self) -> bool

fn exact_num_input_blobs(&self) -> Option<usize>

fn allow_force_backward(&self, input_id: usize) -> bool

fn sync_native(&self) -> bool

fn is_container(&self) -> bool

fn loss_weight(&self, output_id: usize) -> Option<f32>

fn inputs_data(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

fn inputs_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

fn outputs_data(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

fn outputs_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

fn learnable_weights(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

fn learnable_weights_gradients(&self) -> Option<Vec<ArcLock<SharedTensor<f32>>>>

fn learnable_weights_names(&self) -> Option<Vec<String>>

fn learnable_weights_lr(&self) -> Option<Vec<Option<f32>>>

impl<B: IBackend> ComputeOutput<f32, B> for Reshape

fn compute_output(&self, backend: &B, _weights: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], output_data: &mut [&mut SharedTensor<f32>])

impl<B: IBackend> ComputeInputGradient<f32, B> for Reshape

fn compute_input_gradient(&self, backend: &B, weights_data: &[&SharedTensor<f32>], output_data: &[&SharedTensor<f32>], output_gradients: &[&SharedTensor<f32>], input_data: &[&SharedTensor<f32>], input_gradients: &mut [&mut SharedTensor<f32>])

impl<B: IBackend> ComputeParametersGradient<f32, B> for Reshape

fn compute_parameters_gradient(&self, backend: &B, output_data: &[&SharedTensor<T>], output_gradients: &[&SharedTensor<T>], input_data: &[&SharedTensor<T>], parameters_gradients: &mut [&mut SharedTensor<T>])

Derived Implementations

impl Clone for Reshape

fn clone(&self) -> Reshape

1.0.0fn clone_from(&mut self, source: &Self)

impl Debug for Reshape

fn fmt(&self, __arg_0: &mut Formatter) -> Result