Cytnx v0.9.3
Loading...
Searching...
No Matches
Public Member Functions | Static Public Member Functions | List of all members
cytnx::Tensor Class Reference

an tensor (multi-dimensional array) More...

#include <Tensor.hpp>

Public Member Functions

void Save (const std::string &fname) const
 Save current Tensor to file.
 
void Save (const char *fname) const
 
void Tofile (const std::string &fname) const
 Save current Tensor to the binary file.
 
void Tofile (const char *fname) const
 
void Tofile (std::fstream &f) const
 
void Init (const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1, const bool &init_zero=true)
 initialize a Tensor
 
 Tensor (const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1, const bool &init_zero=1)
 Construct a new Tensor object.
 
unsigned int dtype () const
 the dtype-id of the Tensor
 
int device () const
 the device-id of the Tensor
 
std::string dtype_str () const
 the dtype (in string) of the Tensor
 
std::string device_str () const
 the device (in string) of the Tensor
 
const std::vector< cytnx_uint64 > & shape () const
 the shape of the Tensor
 
cytnx_uint64 rank () const
 the rank of the Tensor
 
Tensor clone () const
 return a clone of the current Tensor.
 
Tensor to (const int &device) const
 copy a tensor to new device
 
void to_ (const int &device)
 move the current Tensor to the device.
 
const boolis_contiguous () const
 return whether the Tensor is contiguous or not.
 
Tensor permute_ (const std::vector< cytnx_uint64 > &rnks)
 
Tensor permute (const std::vector< cytnx_uint64 > &rnks) const
 perform tensor permute on the cytnx::Tensor and return a new instance.
 
Tensor contiguous () const
 Make the Tensor contiguous by coalescing the memory (storage).
 
Tensor contiguous_ ()
 Make the Tensor contiguous by coalescing the memory (storage), inplacely.
 
void reshape_ (const std::vector< cytnx_int64 > &new_shape)
 reshape the Tensor, inplacely
 
Tensor reshape (const std::vector< cytnx_int64 > &new_shape) const
 return a new Tensor that is reshaped.
 
Tensor reshape (const std::vector< cytnx_uint64 > &new_shape) const
 
Tensor reshape (const std::initializer_list< cytnx_int64 > &new_shape) const
 
Tensor astype (const int &new_type) const
 return a new Tensor that cast to different dtype.
 
template<class T >
Tat (const std::vector< cytnx_uint64 > &locator)
 Get an element at specific location.
 
template<class T >
const Tat (const std::vector< cytnx_uint64 > &locator) const
 
template<class T >
Titem ()
 get the element from a rank-0 Tensor.
 
Tensor get (const std::vector< cytnx::Accessor > &accessors) const
 get elements using Accessor (C++ API) / slices (python API)
 
void set (const std::vector< cytnx::Accessor > &accessors, const Tensor &rhs)
 set elements with the input Tensor using Accessor (C++ API) / slices (python API)
 
template<class T >
void set (const std::vector< cytnx::Accessor > &accessors, const T &rc)
 set elements with the input constant using Accessor (C++ API) / slices (python API)
 
Storagestorage () const
 return the storage of current Tensor.
 
template<class T >
void fill (const T &val)
 fill all the element of current Tensor with the value.
 
bool equivshape (const Tensor &rhs)
 compare the shape of two tensors.
 
Tensor real ()
 return the real part of the tensor.
 
Tensor imag ()
 return the imaginary part of the tensor.
 
template<class T >
Tensoroperator+= (const T &rc)
 addition assignment operator with a Tensor or a scalar.
 
template<class T >
Tensoroperator-= (const T &rc)
 subtraction assignment operator with a Tensor or a scalar.
 
template<class T >
Tensoroperator*= (const T &rc)
 multiplication assignment operator with a Tensor or a scalar.
 
template<class T >
Tensoroperator/= (const T &rc)
 division assignment operator with a Tensor or a scalar.
 
template<class T >
Tensor Add (const T &rhs)
 Addition function with a Tensor or a scalar. Same as cytnx::operator+(const Tensor &self, const T &rhs).
 
template<class T >
TensorAdd_ (const T &rhs)
 Addition function with a Tensor or a scalar, inplacely. Same as operator+=(const T &rhs).
 
template<class T >
Tensor Sub (const T &rhs)
 Subtraction function with a Tensor or a scalar. Same as cytnx::operator-(const Tensor &self, const T &rhs).
 
template<class T >
TensorSub_ (const T &rhs)
 Subtraction function with a Tensor or a scalar, inplacely. Same as operator-=(const T &rhs).
 
template<class T >
Tensor Mul (const T &rhs)
 Multiplication function with a Tensor or a scalar. Same as cytnx::operator*(const Tensor &self, const T &rhs).
 
template<class T >
TensorMul_ (const T &rhs)
 Multiplication function with a Tensor or a scalar, inplacely. Same as operator*=(const T &rhs).
 
template<class T >
Tensor Div (const T &rhs)
 Division function with a Tensor or a scalar. Same as cytnx::operator/(const Tensor &self, const T &rhs).
 
template<class T >
TensorDiv_ (const T &rhs)
 Division function with a Tensor or a scalar, inplacely. Same as operator/=(const T &rhs).
 
template<class T >
Tensor Cpr (const T &rhs)
 The comparison function.
 
template<class T >
Tensor Mod (const T &rhs)
 
Tensor operator- ()
 The negation function.
 
Tensor flatten () const
 The flatten function.
 
void flatten_ ()
 The flatten function, inplacely.
 
void append (const Tensor &rhs)
 the append function.
 
void append (const Storage &srhs)
 the append function of the Storage.
 
template<class T >
void append (const T &rhs)
 the append function of the scalar.
 
bool same_data (const Tensor &rhs) const
 Check whether two tensors share the same internal memory.
 
std::vector< TensorSvd (const bool &is_UvT=true) const
 the SVD member function. Same as cytnx::linalg::Svd(const Tensor &Tin, const bool &is_UvT) , where Tin is the current Tensor.
 
std::vector< TensorEigh (const bool &is_V=true, const bool &row_v=false) const
 the Eigh member function. Same as cytnx::linalg::Eigh(const Tensor &Tin, const bool &is_V, const bool &row_v) , where Tin is the current Tensor.
 
TensorInvM_ ()
 the InvM_ member function. Same as cytnx::linalg::InvM_(Tensor &Tin), where Tin is the current Tensor.
 
Tensor InvM () const
 the InvM member function. Same as cytnx::linalg::InvM(const Tensor &Tin), where Tin is the current Tensor.
 
TensorInv_ (const double &clip)
 the Inv_ member function. Same as cytnx::linalg::Inv_(Tensor &Tin, const double &clip)
 
Tensor Inv (const double &clip) const
 the Inv member function. Same as cytnx::linalg::Inv(const Tensor &Tin, const double &clip)
 
TensorConj_ ()
 the Conj_ member function. Same as cytnx::linalg::Conj_(Tensor &Tin), where Tin is the current Tensor.
 
Tensor Conj () const
 the Conj member function. Same as cytnx::linalg::Conj(const Tensor &Tin), where Tin is the current Tensor.
 
TensorExp_ ()
 the Exp_ member function. Same as linalg::Exp_(Tensor &Tin), where Tin is the current Tensor.
 
Tensor Exp () const
 the Exp member function. Same as linalg::Exp(const Tensor &Tin), where Tin is the current Tensor.
 
Tensor Norm () const
 the Norm member function. Same as linalg::Norm(const Tensor &Tin), where Tin is the current Tensor.
 
Tensor Pow (const cytnx_double &p) const
 the Pow member function. Same as linalg::Pow(const Tensor &Tin, const cytnx_double &p), where Tin is the current Tensor.
 
TensorPow_ (const cytnx_double &p)
 the Pow_ member function. Same as linalg::Pow_(Tensor &Tin, const cytnx_double &p), where Tin is the current Tensor.
 
Tensor Trace (const cytnx_uint64 &a=0, const cytnx_uint64 &b=1) const
 the Trace member function. Same as linalg::Trace(const Tensor &Tin, const cytnx_uint64 &a, const cytnx_uint64 &b), where Tin is the current Tensor.
 
Tensor Abs () const
 the Abs member function. Same as linalg::Abs(const Tensor &Tin), where Tin is the current Tensor.
 
TensorAbs_ ()
 the Abs_ member function. Same as linalg::Abs_(Tensor &Tin), where Tin is the current Tensor.
 
Tensor Max () const
 the Max member function. Same as linalg::Max(const Tensor &Tin), where Tin is the current Tensor.
 
Tensor Min () const
 the Min member function. Same as linalg::Min(const Tensor &Tin), where Tin is the current Tensor.
 
void Save (const std::string &fname) const
 Save current Tensor to file.
 
void Save (const char *fname) const
 
void Tofile (const std::string &fname) const
 Save current Tensor to the binary file.
 
void Tofile (const char *fname) const
 
void Tofile (std::fstream &f) const
 
void Init (const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1, const bool &init_zero=true)
 initialize a Tensor
 
 Tensor (const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1, const bool &init_zero=1)
 Construct a new Tensor object.
 
unsigned int dtype () const
 the dtype-id of the Tensor
 
int device () const
 the device-id of the Tensor
 
std::string dtype_str () const
 the dtype (in string) of the Tensor
 
std::string device_str () const
 the device (in string) of the Tensor
 
const std::vector< cytnx_uint64 > & shape () const
 the shape of the Tensor
 
cytnx_uint64 rank () const
 the rank of the Tensor
 
Tensor clone () const
 return a clone of the current Tensor.
 
Tensor to (const int &device) const
 copy a tensor to new device
 
void to_ (const int &device)
 move the current Tensor to the device.
 
const boolis_contiguous () const
 return whether the Tensor is contiguous or not.
 
Tensor permute_ (const std::vector< cytnx_uint64 > &rnks)
 
Tensor permute (const std::vector< cytnx_uint64 > &rnks) const
 perform tensor permute on the cytnx::Tensor and return a new instance.
 
Tensor contiguous () const
 Make the Tensor contiguous by coalescing the memory (storage).
 
Tensor contiguous_ ()
 Make the Tensor contiguous by coalescing the memory (storage), inplacely.
 
void reshape_ (const std::vector< cytnx_int64 > &new_shape)
 reshape the Tensor, inplacely
 
Tensor reshape (const std::vector< cytnx_int64 > &new_shape) const
 return a new Tensor that is reshaped.
 
Tensor reshape (const std::vector< cytnx_uint64 > &new_shape) const
 
Tensor reshape (const std::initializer_list< cytnx_int64 > &new_shape) const
 
Tensor astype (const int &new_type) const
 return a new Tensor that cast to different dtype.
 
template<class T >
Tat (const std::vector< cytnx_uint64 > &locator)
 Get an element at specific location.
 
template<class T >
const Tat (const std::vector< cytnx_uint64 > &locator) const
 
template<class T >
Titem ()
 get the element from a rank-0 Tensor.
 
Tensor get (const std::vector< cytnx::Accessor > &accessors) const
 get elements using Accessor (C++ API) / slices (python API)
 
void set (const std::vector< cytnx::Accessor > &accessors, const Tensor &rhs)
 set elements with the input Tensor using Accessor (C++ API) / slices (python API)
 
template<class T >
void set (const std::vector< cytnx::Accessor > &accessors, const T &rc)
 set elements with the input constant using Accessor (C++ API) / slices (python API)
 
Storagestorage () const
 return the storage of current Tensor.
 
template<class T >
void fill (const T &val)
 fill all the element of current Tensor with the value.
 
bool equivshape (const Tensor &rhs)
 compare the shape of two tensors.
 
Tensor real ()
 return the real part of the tensor.
 
Tensor imag ()
 return the imaginary part of the tensor.
 
template<class T >
Tensoroperator+= (const T &rc)
 addition assignment operator with a Tensor or a scalar.
 
template<class T >
Tensoroperator-= (const T &rc)
 subtraction assignment operator with a Tensor or a scalar.
 
template<class T >
Tensoroperator*= (const T &rc)
 multiplication assignment operator with a Tensor or a scalar.
 
template<class T >
Tensoroperator/= (const T &rc)
 division assignment operator with a Tensor or a scalar.
 
template<class T >
Tensor Add (const T &rhs)
 Addition function with a Tensor or a scalar. Same as cytnx::operator+(const Tensor &self, const T &rhs).
 
template<class T >
TensorAdd_ (const T &rhs)
 Addition function with a Tensor or a scalar, inplacely. Same as operator+=(const T &rhs).
 
template<class T >
Tensor Sub (const T &rhs)
 Subtraction function with a Tensor or a scalar. Same as cytnx::operator-(const Tensor &self, const T &rhs).
 
template<class T >
TensorSub_ (const T &rhs)
 Subtraction function with a Tensor or a scalar, inplacely. Same as operator-=(const T &rhs).
 
template<class T >
Tensor Mul (const T &rhs)
 Multiplication function with a Tensor or a scalar. Same as cytnx::operator*(const Tensor &self, const T &rhs).
 
template<class T >
TensorMul_ (const T &rhs)
 Multiplication function with a Tensor or a scalar, inplacely. Same as operator*=(const T &rhs).
 
template<class T >
Tensor Div (const T &rhs)
 Division function with a Tensor or a scalar. Same as cytnx::operator/(const Tensor &self, const T &rhs).
 
template<class T >
TensorDiv_ (const T &rhs)
 Division function with a Tensor or a scalar, inplacely. Same as operator/=(const T &rhs).
 
template<class T >
Tensor Cpr (const T &rhs)
 The comparison function.
 
template<class T >
Tensor Mod (const T &rhs)
 
Tensor operator- ()
 The negation function.
 
Tensor flatten () const
 The flatten function.
 
void flatten_ ()
 The flatten function, inplacely.
 
void append (const Tensor &rhs)
 the append function.
 
void append (const Storage &srhs)
 the append function of the Storage.
 
template<class T >
void append (const T &rhs)
 the append function of the scalar.
 
bool same_data (const Tensor &rhs) const
 Check whether two tensors share the same internal memory.
 
std::vector< TensorSvd (const bool &is_UvT=true) const
 the SVD member function. Same as cytnx::linalg::Svd(const Tensor &Tin, const bool &is_UvT) , where Tin is the current Tensor.
 
std::vector< TensorEigh (const bool &is_V=true, const bool &row_v=false) const
 the Eigh member function. Same as cytnx::linalg::Eigh(const Tensor &Tin, const bool &is_V, const bool &row_v) , where Tin is the current Tensor.
 
TensorInvM_ ()
 the InvM_ member function. Same as cytnx::linalg::InvM_(Tensor &Tin), where Tin is the current Tensor.
 
Tensor InvM () const
 the InvM member function. Same as cytnx::linalg::InvM(const Tensor &Tin), where Tin is the current Tensor.
 
TensorInv_ (const double &clip)
 the Inv_ member function. Same as cytnx::linalg::Inv_(Tensor &Tin, const double &clip)
 
Tensor Inv (const double &clip) const
 the Inv member function. Same as cytnx::linalg::Inv(const Tensor &Tin, const double &clip)
 
TensorConj_ ()
 the Conj_ member function. Same as cytnx::linalg::Conj_(Tensor &Tin), where Tin is the current Tensor.
 
Tensor Conj () const
 the Conj member function. Same as cytnx::linalg::Conj(const Tensor &Tin), where Tin is the current Tensor.
 
TensorExp_ ()
 the Exp_ member function. Same as linalg::Exp_(Tensor &Tin), where Tin is the current Tensor.
 
Tensor Exp () const
 the Exp member function. Same as linalg::Exp(const Tensor &Tin), where Tin is the current Tensor.
 
Tensor Norm () const
 the Norm member function. Same as linalg::Norm(const Tensor &Tin), where Tin is the current Tensor.
 
Tensor Pow (const cytnx_double &p) const
 the Pow member function. Same as linalg::Pow(const Tensor &Tin, const cytnx_double &p), where Tin is the current Tensor.
 
TensorPow_ (const cytnx_double &p)
 the Pow_ member function. Same as linalg::Pow_(Tensor &Tin, const cytnx_double &p), where Tin is the current Tensor.
 
Tensor Trace (const cytnx_uint64 &a=0, const cytnx_uint64 &b=1) const
 the Trace member function. Same as linalg::Trace(const Tensor &Tin, const cytnx_uint64 &a, const cytnx_uint64 &b), where Tin is the current Tensor.
 
Tensor Abs () const
 the Abs member function. Same as linalg::Abs(const Tensor &Tin), where Tin is the current Tensor.
 
TensorAbs_ ()
 the Abs_ member function. Same as linalg::Abs_(Tensor &Tin), where Tin is the current Tensor.
 
Tensor Max () const
 the Max member function. Same as linalg::Max(const Tensor &Tin), where Tin is the current Tensor.
 
Tensor Min () const
 the Min member function. Same as linalg::Min(const Tensor &Tin), where Tin is the current Tensor.
 

Static Public Member Functions

static Tensor Load (const std::string &fname)
 Load current Tensor from file.
 
static Tensor Load (const char *fname)
 
static Tensor Fromfile (const std::string &fname, const unsigned int &dtype, const cytnx_int64 &count=-1)
 Load current Tensor from the binary file.
 
static Tensor Fromfile (const char *fname, const unsigned int &dtype, const cytnx_int64 &count=-1)
 
static Tensor from_storage (const Storage &in)
 Convert a Storage to Tensor.
 
static Tensor Load (const std::string &fname)
 Load current Tensor from file.
 
static Tensor Load (const char *fname)
 
static Tensor Fromfile (const std::string &fname, const unsigned int &dtype, const cytnx_int64 &count=-1)
 Load current Tensor from the binary file.
 
static Tensor Fromfile (const char *fname, const unsigned int &dtype, const cytnx_int64 &count=-1)
 
static Tensor from_storage (const Storage &in)
 Convert a Storage to Tensor.
 

Detailed Description

an tensor (multi-dimensional array)

Constructor & Destructor Documentation

◆ Tensor() [1/2]

cytnx::Tensor::Tensor ( const std::vector< cytnx_uint64 > &  shape,
const unsigned int dtype = Type.Double,
const int device = -1,
const bool init_zero = 1 
)
inline

Construct a new Tensor object.

This is the constructor of Tensor. It will call cytnx::Tensor::Init() to initialize the Tensor.

Parameters
[in]shapethe shape of tensor
[in]dtypethe dtype of tensor. This can be any of type defined in cytnx::Type.
[in]devicethe device that tensor to be created. This can be cytnx::Device.cpu or cytnx::Device.cuda+<gpuid>, see cytnx::Device for more detail.
[in]init_zeroif true, the content of Tensor will be initialized to zero. If false, the content of Tensor will be un-initialized.
See also
cytnx::Tensor::Init

◆ Tensor() [2/2]

cytnx::Tensor::Tensor ( const std::vector< cytnx_uint64 > &  shape,
const unsigned int dtype = Type.Double,
const int device = -1,
const bool init_zero = 1 
)
inline

Construct a new Tensor object.

This is the constructor of Tensor. It will call cytnx::Tensor::Init() to initialize the Tensor.

Parameters
[in]shapethe shape of tensor
[in]dtypethe dtype of tensor. This can be any of type defined in cytnx::Type.
[in]devicethe device that tensor to be created. This can be cytnx::Device.cpu or cytnx::Device.cuda+<gpuid>, see cytnx::Device for more detail.
[in]init_zeroif true, the content of Tensor will be initialized to zero. If false, the content of Tensor will be un-initialized.
See also
cytnx::Tensor::Init

Member Function Documentation

◆ Abs() [1/2]

Tensor cytnx::Tensor::Abs ( ) const

the Abs member function. Same as linalg::Abs(const Tensor &Tin), where Tin is the current Tensor.

◆ Abs() [2/2]

Tensor cytnx::Tensor::Abs ( ) const

the Abs member function. Same as linalg::Abs(const Tensor &Tin), where Tin is the current Tensor.

◆ Abs_() [1/2]

Tensor & cytnx::Tensor::Abs_ ( )

the Abs_ member function. Same as linalg::Abs_(Tensor &Tin), where Tin is the current Tensor.

◆ Abs_() [2/2]

Tensor & cytnx::Tensor::Abs_ ( )

the Abs_ member function. Same as linalg::Abs_(Tensor &Tin), where Tin is the current Tensor.

◆ Add() [1/2]

template<class T >
Tensor cytnx::Tensor::Add ( const T rhs)
inline

Addition function with a Tensor or a scalar. Same as cytnx::operator+(const Tensor &self, const T &rhs).

Parameters
[in]rhsthe added Tensor or scalar.

◆ Add() [2/2]

template<class T >
Tensor cytnx::Tensor::Add ( const T rhs)
inline

Addition function with a Tensor or a scalar. Same as cytnx::operator+(const Tensor &self, const T &rhs).

Parameters
[in]rhsthe added Tensor or scalar.

◆ Add_() [1/2]

template<class T >
Tensor & cytnx::Tensor::Add_ ( const T rhs)
inline

Addition function with a Tensor or a scalar, inplacely. Same as operator+=(const T &rhs).

Parameters
[in]rhsthe added Tensor or scalar.

◆ Add_() [2/2]

template<class T >
Tensor & cytnx::Tensor::Add_ ( const T rhs)
inline

Addition function with a Tensor or a scalar, inplacely. Same as operator+=(const T &rhs).

Parameters
[in]rhsthe added Tensor or scalar.

◆ append() [1/6]

void cytnx::Tensor::append ( const Storage srhs)
inline

the append function of the Storage.

This function is the append function of the Storage. It will append the srhs Storage to the current tensor. The current tensor must be rank-2 and the srhs Storage must have the same size as the second dimension of the current tensor. For example, if the current tensor is \(A\) with size \(M \times N\) and the srhs Storage is \(B\) with size \(N\), then the output tensor is \(C\) with size \(M \times (N+1)\) where

\[ C(i,j) = \begin{cases} A(i,j) & \text{if } j \neq N \\ B(i) & \text{if } j = N \end{cases} \]

Here indices \(i\) and \(j\) start from 0.

Parameters
[in]srhsthe appended Storage.
Returns
The appended tensor.
Precondition
  1. The srhs Storage and the current tensor cannot be empty.
  2. The current tensor must be rank-2.
  3. The srhs Storage must have the same size as the second dimension of the current tensor. Namely, srhs.size() == this->shape()[1].
Note
If the dtype of the srhs is different from the current tensor, the srhs will be casted to the dtype of the current tensor.
See also
append(const Tensor &rhs)

◆ append() [2/6]

void cytnx::Tensor::append ( const Storage srhs)
inline

the append function of the Storage.

This function is the append function of the Storage. It will append the srhs Storage to the current tensor. The current tensor must be rank-2 and the srhs Storage must have the same size as the second dimension of the current tensor. For example, if the current tensor is \(A\) with size \(M \times N\) and the srhs Storage is \(B\) with size \(N\), then the output tensor is \(C\) with size \(M \times (N+1)\) where

\[ C(i,j) = \begin{cases} A(i,j) & \text{if } j \neq N \\ B(i) & \text{if } j = N \end{cases} \]

Here indices \(i\) and \(j\) start from 0.

Parameters
[in]srhsthe appended Storage.
Returns
The appended tensor.
Precondition
  1. The srhs Storage and the current tensor cannot be empty.
  2. The current tensor must be rank-2.
  3. The srhs Storage must have the same size as the second dimension of the current tensor. Namely, srhs.size() == this->shape()[1].
Note
If the dtype of the srhs is different from the current tensor, the srhs will be casted to the dtype of the current tensor.
See also
append(const Tensor &rhs)

◆ append() [3/6]

template<class T >
void cytnx::Tensor::append ( const T rhs)
inline

the append function of the scalar.

This function is the append function of the scalar. It can only append scalar into rank-1 Tensor.

Parameters
[in]rhsthe appended scalar.
Returns
The appended tensor.
Precondition
  1. The current Tensor must be rank-1. (1D array)
  2. The current Tensor must be contiguous.
  3. rhs must be a scalar.

◆ append() [4/6]

template<class T >
void cytnx::Tensor::append ( const T rhs)
inline

the append function of the scalar.

This function is the append function of the scalar. It can only append scalar into rank-1 Tensor.

Parameters
[in]rhsthe appended scalar.
Returns
The appended tensor.
Precondition
  1. The current Tensor must be rank-1. (1D array)
  2. The current Tensor must be contiguous.
  3. rhs must be a scalar.

◆ append() [5/6]

void cytnx::Tensor::append ( const Tensor rhs)
inline

the append function.

This function is the append function. It will append the rhs tensor to the current tensor. The rhs tensor must have the same shape as the current tensor, except the first dimension. For example, if the current tensor is \(A(i,j,k)\) and the rhs tensor is \(B(j,k)\), then the output tensor is \(C(i,j,k)\) where

\[ C(i,j,k) = \begin{cases} A(i,j,k) & \text{if } i \neq N \\ B(j,k) & \text{if } i = N \end{cases} \]

where \(N\) is the number of the first dimension of the current tensor. Here indices \(i\), \(j\) and \(k\) start from 0.

Parameters
[in]rhsthe appended tensor.
Returns
The appended tensor.
Precondition
  1. The rhs tensor and the current tensor cannot be empty.
  2. The rhs tensor must have the same shape as the current tensor, except the first dimension. Namely, rhs.shape()[i] == this->shape()[i+1] and rhs.shape().size() == this->shape().size()-1.
Note
If the dtype of the rhs is different from the current tensor, the rhs will be casted to the dtype of the current tensor.
See also
append(const Storage &rhs)

◆ append() [6/6]

void cytnx::Tensor::append ( const Tensor rhs)
inline

the append function.

This function is the append function. It will append the rhs tensor to the current tensor. The rhs tensor must have the same shape as the current tensor, except the first dimension. For example, if the current tensor is \(A(i,j,k)\) and the rhs tensor is \(B(j,k)\), then the output tensor is \(C(i,j,k)\) where

\[ C(i,j,k) = \begin{cases} A(i,j,k) & \text{if } i \neq N \\ B(j,k) & \text{if } i = N \end{cases} \]

where \(N\) is the number of the first dimension of the current tensor. Here indices \(i\), \(j\) and \(k\) start from 0.

Parameters
[in]rhsthe appended tensor.
Returns
The appended tensor.
Precondition
  1. The rhs tensor and the current tensor cannot be empty.
  2. The rhs tensor must have the same shape as the current tensor, except the first dimension. Namely, rhs.shape()[i] == this->shape()[i+1] and rhs.shape().size() == this->shape().size()-1.
Note
If the dtype of the rhs is different from the current tensor, the rhs will be casted to the dtype of the current tensor.
See also
append(const Storage &rhs)

◆ astype() [1/2]

Tensor cytnx::Tensor::astype ( const int new_type) const
inline

return a new Tensor that cast to different dtype.

Parameters
[in]new_typethe new dtype. It can be any type defined in cytnx::Type
Returns
[Tensor]
Note
If the new_type is the same as dtype of the current Tensor, return self.
Attention
This function cannot convert complex type to real type, please use Tensor::real() or Tensor::imag() to get the real or imaginary part of the complex Tensor instead.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A = zeros({3, 4, 5}, Type.Double);
cout << A;
Tensor B = A.astype(Type.Uint64);
cout << B;
// the new type is the same as current dtype, return self.
Tensor C = A.astype(Type.Double);
cout << is(C, A) << endl; // this should be true.
return 0;
}
an tensor (multi-dimensional array)
Definition Tensor.hpp:41
Tensor astype(const int &new_type) const
return a new Tensor that cast to different dtype.
Definition Tensor.hpp:823
int device() const
the device-id of the Tensor
Definition Tensor.hpp:521
Helper function to print vector with ODT:
Definition Accessor.hpp:12
Tensor zeros(const cytnx_uint64 &Nelem, const unsigned int &dtype=Type.Double, const int &device=Device.cpu)
create an rank-1 Tensor with all the elements are initialized with zero.
Type_class Type
data type

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]]


Total elem: 60
type  : Uint64
cytnx device: CPU
Shape : (3,4,5)
[[[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]]

1

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = zeros([3,4,5],dtype=Type.Double)
print(A)
B = A.astype(Type.Uint64)
print(B)
C = A.astype(Type.Double)
print(C is A)

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]]




Total elem: 60
type  : Uint64
cytnx device: CPU
Shape : (3,4,5)
[[[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]]



True

◆ astype() [2/2]

Tensor cytnx::Tensor::astype ( const int new_type) const
inline

return a new Tensor that cast to different dtype.

Parameters
[in]new_typethe new dtype. It can be any type defined in cytnx::Type
Returns
[Tensor]
Note
If the new_type is the same as dtype of the current Tensor, return self.
Attention
This function cannot convert complex type to real type, please use Tensor::real() or Tensor::imag() to get the real or imaginary part of the complex Tensor instead.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A = zeros({3, 4, 5}, Type.Double);
cout << A;
Tensor B = A.astype(Type.Uint64);
cout << B;
// the new type is the same as current dtype, return self.
Tensor C = A.astype(Type.Double);
cout << is(C, A) << endl; // this should be true.
return 0;
}

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]]


Total elem: 60
type  : Uint64
cytnx device: CPU
Shape : (3,4,5)
[[[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]]

1

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = zeros([3,4,5],dtype=Type.Double)
print(A)
B = A.astype(Type.Uint64)
print(B)
C = A.astype(Type.Double)
print(C is A)

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]]




Total elem: 60
type  : Uint64
cytnx device: CPU
Shape : (3,4,5)
[[[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]]



True

◆ at() [1/4]

template<class T >
T & cytnx::Tensor::at ( const std::vector< cytnx_uint64 > &  locator)
inline

Get an element at specific location.

This function is used to get an element at specific location. If the template type is not given, the return will be a Scalar.

Parameters
[in]locatorthe location of the element
Returns
[ref]
Note
  1. This is for C++ API only!
  2. need template instantiation to resolve the type, which should be consist with the dtype of the Tensor. An error will be issued if the template type is inconsist with the current dtype of Tensor.
  3. For python API, use [] directly to get element.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A = arange(30, Type.Float).reshape(2, 3, 5);
cout << A << endl;
// note that type resolver should be consist with the dtype
cout << A.at<cytnx_float>(0, 0, 2) << endl;
// the return is a ref., can be modify directly.
A.at<cytnx_float>(0, 0, 2) = 999;
cout << A.at<cytnx_float>(0, 0, 2) << endl;
// [Note] there are two way to give argument:
// Method 1: more like 'c++' way:
// (alternatively, you can also simply give a std::vector)
A.at<cytnx_float>({0, 0, 2}); // note the braket{}
// Method 2: more like 'python' way:
A.at<cytnx_float>(0, 0, 2);
return 0;
}
T & at(const std::vector< cytnx_uint64 > &locator)
Get an element at specific location.
Definition Tensor.hpp:858
Tensor reshape(const std::vector< cytnx_int64 > &new_shape) const
return a new Tensor that is reshaped.
Definition Tensor.hpp:770
float cytnx_float
Definition Type.hpp:54
Tensor arange(const cytnx_int64 &Nelem)
create an rank-1 Tensor with incremental unsigned integer elements start with [0,Nelem)

output>

 

◆ at() [2/4]

template<class T >
T & cytnx::Tensor::at ( const std::vector< cytnx_uint64 > &  locator)
inline

Get an element at specific location.

This function is used to get an element at specific location. If the template type is not given, the return will be a Scalar.

Parameters
[in]locatorthe location of the element
Returns
[ref]
Note
  1. This is for C++ API only!
  2. need template instantiation to resolve the type, which should be consist with the dtype of the Tensor. An error will be issued if the template type is inconsist with the current dtype of Tensor.
  3. For python API, use [] directly to get element.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A = arange(30, Type.Float).reshape(2, 3, 5);
cout << A << endl;
// note that type resolver should be consist with the dtype
cout << A.at<cytnx_float>(0, 0, 2) << endl;
// the return is a ref., can be modify directly.
A.at<cytnx_float>(0, 0, 2) = 999;
cout << A.at<cytnx_float>(0, 0, 2) << endl;
// [Note] there are two way to give argument:
// Method 1: more like 'c++' way:
// (alternatively, you can also simply give a std::vector)
A.at<cytnx_float>({0, 0, 2}); // note the braket{}
// Method 2: more like 'python' way:
A.at<cytnx_float>(0, 0, 2);
return 0;
}

output>

 

◆ at() [3/4]

template<class T >
const T & cytnx::Tensor::at ( const std::vector< cytnx_uint64 > &  locator) const
inline

◆ at() [4/4]

template<class T >
const T & cytnx::Tensor::at ( const std::vector< cytnx_uint64 > &  locator) const
inline

◆ clone() [1/2]

Tensor cytnx::Tensor::clone ( ) const
inline

return a clone of the current Tensor.

Returns
[Tensor]

In C++ API, the behavior of assignment operator is designed to have same behavior as python,
to have a copy of the current tensor, we call clone to return a copy.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A({3, 4, 5});
Tensor B = A; // B shares same object with A
Tensor C = A.clone(); // C is a copy of A
// use is() to check if two variable shares same object
cout << is(B, A) << endl;
cout << is(C, A) << endl;
return 0;
}
Tensor clone() const
return a clone of the current Tensor.
Definition Tensor.hpp:566

output>

1
0

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = Tensor([3,4,5])
B = A
C = A.clone()
print(B is A)
print(C is A)

output>

True
False

◆ clone() [2/2]

Tensor cytnx::Tensor::clone ( ) const
inline

return a clone of the current Tensor.

Returns
[Tensor]

In C++ API, the behavior of assignment operator is designed to have same behavior as python,
to have a copy of the current tensor, we call clone to return a copy.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A({3, 4, 5});
Tensor B = A; // B shares same object with A
Tensor C = A.clone(); // C is a copy of A
// use is() to check if two variable shares same object
cout << is(B, A) << endl;
cout << is(C, A) << endl;
return 0;
}

output>

1
0

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = Tensor([3,4,5])
B = A
C = A.clone()
print(B is A)
print(C is A)

output>

True
False

◆ Conj() [1/2]

Tensor cytnx::Tensor::Conj ( ) const

the Conj member function. Same as cytnx::linalg::Conj(const Tensor &Tin), where Tin is the current Tensor.

◆ Conj() [2/2]

Tensor cytnx::Tensor::Conj ( ) const

the Conj member function. Same as cytnx::linalg::Conj(const Tensor &Tin), where Tin is the current Tensor.

◆ Conj_() [1/2]

Tensor & cytnx::Tensor::Conj_ ( )

the Conj_ member function. Same as cytnx::linalg::Conj_(Tensor &Tin), where Tin is the current Tensor.

◆ Conj_() [2/2]

Tensor & cytnx::Tensor::Conj_ ( )

the Conj_ member function. Same as cytnx::linalg::Conj_(Tensor &Tin), where Tin is the current Tensor.

◆ contiguous() [1/2]

Tensor cytnx::Tensor::contiguous ( ) const
inline

Make the Tensor contiguous by coalescing the memory (storage).

Returns
[Tensor] a new Tensor that is with contiguous memory (storage).
See also
Tensor::contiguous_()

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A({3, 4, 5});
cout << A.shape() << endl;
Tensor B = A.permute({0, 2, 1});
cout << B.shape() << endl;
//[Note] permute will not actually move the internal memory (storage) layout.
// this is called non-contiguous status.
// the memory layout will only move when Tensor.contiguous() is called.
Tensor C = B.contiguous(); // actual moving the memory
cout << B.is_contiguous() << endl; // false.
cout << C.is_contiguous() << endl; // true.
cout << C.shape() << endl;
cout << C.same_data(B) << endl; // false
cout << B.same_data(A) << endl; // true
return 0;
}
Tensor contiguous() const
Make the Tensor contiguous by coalescing the memory (storage).
Definition Tensor.hpp:682
const bool & is_contiguous() const
return whether the Tensor is contiguous or not.
Definition Tensor.hpp:621
Tensor permute(const std::vector< cytnx_uint64 > &rnks) const
perform tensor permute on the cytnx::Tensor and return a new instance.
Definition Tensor.hpp:654
const std::vector< cytnx_uint64 > & shape() const
the shape of the Tensor
Definition Tensor.hpp:541

output>

Vector Print:
Total Elements:3
[3, 4, 5]

Vector Print:
Total Elements:3
[3, 5, 4]

0
1
Vector Print:
Total Elements:3
[3, 5, 4]

0
1

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = Tensor([3,4,5])
print(A.shape())
B = A.permute(0,2,1)
print(B.shape())
C = B.contiguous()
print(B.is_contiguous()) #false
print(C.is_contiguous()) #true
print(C.shape())

output>

[3, 4, 5]
[3, 5, 4]
False
True
[3, 5, 4]

◆ contiguous() [2/2]

Tensor cytnx::Tensor::contiguous ( ) const
inline

Make the Tensor contiguous by coalescing the memory (storage).

Returns
[Tensor] a new Tensor that is with contiguous memory (storage).
See also
Tensor::contiguous_()

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A({3, 4, 5});
cout << A.shape() << endl;
Tensor B = A.permute({0, 2, 1});
cout << B.shape() << endl;
//[Note] permute will not actually move the internal memory (storage) layout.
// this is called non-contiguous status.
// the memory layout will only move when Tensor.contiguous() is called.
Tensor C = B.contiguous(); // actual moving the memory
cout << B.is_contiguous() << endl; // false.
cout << C.is_contiguous() << endl; // true.
cout << C.shape() << endl;
cout << C.same_data(B) << endl; // false
cout << B.same_data(A) << endl; // true
return 0;
}

output>

Vector Print:
Total Elements:3
[3, 4, 5]

Vector Print:
Total Elements:3
[3, 5, 4]

0
1
Vector Print:
Total Elements:3
[3, 5, 4]

0
1

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = Tensor([3,4,5])
print(A.shape())
B = A.permute(0,2,1)
print(B.shape())
C = B.contiguous()
print(B.is_contiguous()) #false
print(C.is_contiguous()) #true
print(C.shape())

output>

[3, 4, 5]
[3, 5, 4]
False
True
[3, 5, 4]

◆ contiguous_() [1/2]

Tensor cytnx::Tensor::contiguous_ ( )
inline

Make the Tensor contiguous by coalescing the memory (storage), inplacely.

See also
Tensor::contiguous()

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A({3, 4, 5});
cout << A.shape() << endl;
Tensor B = A.permute({0, 2, 1});
cout << B.shape() << endl;
//[Note] permute will not actually move the internal memory (storage) layout.
// this is called non-contiguous status.
// the memory layout will only move when Tensor.contiguous() is called.
cout << B.is_contiguous() << endl; // false.
B.contiguous_(); // actual moving the memory
cout << B.is_contiguous() << endl; // true.
return 0;
}

output>

Vector Print:
Total Elements:3
[3, 4, 5]

Vector Print:
Total Elements:3
[3, 5, 4]

0
1

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = Tensor([3,4,5])
print(A.shape())
B = A.permute(0,2,1)
print(B.shape())
print(B.is_contiguous()) #false
B.contiguous_()
print(B.is_contiguous()) #true

output>

[3, 4, 5]
[3, 5, 4]
False
True

◆ contiguous_() [2/2]

Tensor cytnx::Tensor::contiguous_ ( )
inline

Make the Tensor contiguous by coalescing the memory (storage), inplacely.

See also
Tensor::contiguous()

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A({3, 4, 5});
cout << A.shape() << endl;
Tensor B = A.permute({0, 2, 1});
cout << B.shape() << endl;
//[Note] permute will not actually move the internal memory (storage) layout.
// this is called non-contiguous status.
// the memory layout will only move when Tensor.contiguous() is called.
cout << B.is_contiguous() << endl; // false.
B.contiguous_(); // actual moving the memory
cout << B.is_contiguous() << endl; // true.
return 0;
}

output>

Vector Print:
Total Elements:3
[3, 4, 5]

Vector Print:
Total Elements:3
[3, 5, 4]

0
1

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = Tensor([3,4,5])
print(A.shape())
B = A.permute(0,2,1)
print(B.shape())
print(B.is_contiguous()) #false
B.contiguous_()
print(B.is_contiguous()) #true

output>

[3, 4, 5]
[3, 5, 4]
False
True

◆ Cpr() [1/2]

template<class T >
Tensor cytnx::Tensor::Cpr ( const T rhs)
inline

The comparison function.

This function is the comparison function. Same as cytnx::operator==(const Tensor &self, const T &rhs).

Parameters
[in]rhsthe compared object.

◆ Cpr() [2/2]

template<class T >
Tensor cytnx::Tensor::Cpr ( const T rhs)
inline

The comparison function.

This function is the comparison function. Same as cytnx::operator==(const Tensor &self, const T &rhs).

Parameters
[in]rhsthe compared object.

◆ device() [1/2]

int cytnx::Tensor::device ( ) const
inline

the device-id of the Tensor

See also
cytnx::Device
Returns
[int] the device_id of the Tensor

◆ device() [2/2]

int cytnx::Tensor::device ( ) const
inline

the device-id of the Tensor

See also
cytnx::Device
Returns
[int] the device_id of the Tensor

◆ device_str() [1/2]

std::string cytnx::Tensor::device_str ( ) const
inline

the device (in string) of the Tensor

See also
cytnx::Device, device() const
Returns
[std::string] the device of the Tensor

◆ device_str() [2/2]

std::string cytnx::Tensor::device_str ( ) const
inline

the device (in string) of the Tensor

See also
cytnx::Device, device() const
Returns
[std::string] the device of the Tensor

◆ Div() [1/2]

template<class T >
Tensor cytnx::Tensor::Div ( const T rhs)
inline

Division function with a Tensor or a scalar. Same as cytnx::operator/(const Tensor &self, const T &rhs).

Parameters
[in]rhsthe divided Tensor or scalar. @attension rhs cannot be zero.

◆ Div() [2/2]

template<class T >
Tensor cytnx::Tensor::Div ( const T rhs)
inline

Division function with a Tensor or a scalar. Same as cytnx::operator/(const Tensor &self, const T &rhs).

Parameters
[in]rhsthe divided Tensor or scalar. @attension rhs cannot be zero.

◆ Div_() [1/2]

template<class T >
Tensor & cytnx::Tensor::Div_ ( const T rhs)
inline

Division function with a Tensor or a scalar, inplacely. Same as operator/=(const T &rhs).

Parameters
[in]rhsthe divided Tensor or scalar. @attension rhs cannot be zero.

◆ Div_() [2/2]

template<class T >
Tensor & cytnx::Tensor::Div_ ( const T rhs)
inline

Division function with a Tensor or a scalar, inplacely. Same as operator/=(const T &rhs).

Parameters
[in]rhsthe divided Tensor or scalar. @attension rhs cannot be zero.

◆ dtype() [1/2]

unsigned int cytnx::Tensor::dtype ( ) const
inline

the dtype-id of the Tensor

See also
cytnx::Type
Returns
[unsigned int] the dtype_id of the Tensor

◆ dtype() [2/2]

unsigned int cytnx::Tensor::dtype ( ) const
inline

the dtype-id of the Tensor

See also
cytnx::Type
Returns
[unsigned int] the dtype_id of the Tensor

◆ dtype_str() [1/2]

std::string cytnx::Tensor::dtype_str ( ) const
inline

the dtype (in string) of the Tensor

See also
cytnx::Type, dtype() const
Returns
[std::string] the dtype of the Tensor

◆ dtype_str() [2/2]

std::string cytnx::Tensor::dtype_str ( ) const
inline

the dtype (in string) of the Tensor

See also
cytnx::Type, dtype() const
Returns
[std::string] the dtype of the Tensor

◆ Eigh() [1/2]

std::vector< Tensor > cytnx::Tensor::Eigh ( const bool is_V = true,
const bool row_v = false 
) const

the Eigh member function. Same as cytnx::linalg::Eigh(const Tensor &Tin, const bool &is_V, const bool &row_v) , where Tin is the current Tensor.

◆ Eigh() [2/2]

std::vector< Tensor > cytnx::Tensor::Eigh ( const bool is_V = true,
const bool row_v = false 
) const

the Eigh member function. Same as cytnx::linalg::Eigh(const Tensor &Tin, const bool &is_V, const bool &row_v) , where Tin is the current Tensor.

◆ equivshape() [1/2]

bool cytnx::Tensor::equivshape ( const Tensor rhs)
inline

compare the shape of two tensors.

Parameters
[in]rhsthe tensor to be compared.

◆ equivshape() [2/2]

bool cytnx::Tensor::equivshape ( const Tensor rhs)
inline

compare the shape of two tensors.

Parameters
[in]rhsthe tensor to be compared.

◆ Exp() [1/2]

Tensor cytnx::Tensor::Exp ( ) const

the Exp member function. Same as linalg::Exp(const Tensor &Tin), where Tin is the current Tensor.

◆ Exp() [2/2]

Tensor cytnx::Tensor::Exp ( ) const

the Exp member function. Same as linalg::Exp(const Tensor &Tin), where Tin is the current Tensor.

◆ Exp_() [1/2]

Tensor & cytnx::Tensor::Exp_ ( )

the Exp_ member function. Same as linalg::Exp_(Tensor &Tin), where Tin is the current Tensor.

◆ Exp_() [2/2]

Tensor & cytnx::Tensor::Exp_ ( )

the Exp_ member function. Same as linalg::Exp_(Tensor &Tin), where Tin is the current Tensor.

◆ fill() [1/2]

template<class T >
void cytnx::Tensor::fill ( const T val)
inline

fill all the element of current Tensor with the value.

Parameters
[in]valthe assigned value

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A = arange(60).reshape({3, 4, 5});
cout << A << endl;
A.fill(999);
cout << A << endl;
return 0;
}
void fill(const T &val)
fill all the element of current Tensor with the value.
Definition Tensor.hpp:1053

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]]
 [[9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]]
 [[9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]]]

python API

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = arange(60).reshape(3,4,5)
print(A)
A.fill(999)
print(A)

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]]
 [[9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]]
 [[9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]]]

◆ fill() [2/2]

template<class T >
void cytnx::Tensor::fill ( const T val)
inline

fill all the element of current Tensor with the value.

Parameters
[in]valthe assigned value

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A = arange(60).reshape({3, 4, 5});
cout << A << endl;
A.fill(999);
cout << A << endl;
return 0;
}

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]]
 [[9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]]
 [[9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]]]

python API

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = arange(60).reshape(3,4,5)
print(A)
A.fill(999)
print(A)

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]]
 [[9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]]
 [[9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]]]

◆ flatten() [1/2]

Tensor cytnx::Tensor::flatten ( ) const
inline

The flatten function.

This function is the flatten function. It will clone (deep copy) , contiguos the current tensor and reshape it to 1-rank Tensor.

Note
compare to the flatten_() function, this function will return a new tensor and the current tensor will not be changed.

◆ flatten() [2/2]

Tensor cytnx::Tensor::flatten ( ) const
inline

The flatten function.

This function is the flatten function. It will clone (deep copy) , contiguos the current tensor and reshape it to 1-rank Tensor.

Note
compare to the flatten_() function, this function will return a new tensor and the current tensor will not be changed.

◆ flatten_() [1/2]

void cytnx::Tensor::flatten_ ( )
inline

The flatten function, inplacely.

This function is the flatten function, inplacely. It will contiguos the current tensor and reshape it to 1-rank Tensor.

Note
compare to the flatten() function, this is an inplacely function, the current tensor will be changed.

◆ flatten_() [2/2]

void cytnx::Tensor::flatten_ ( )
inline

The flatten function, inplacely.

This function is the flatten function, inplacely. It will contiguos the current tensor and reshape it to 1-rank Tensor.

Note
compare to the flatten() function, this is an inplacely function, the current tensor will be changed.

◆ from_storage() [1/2]

static Tensor cytnx::Tensor::from_storage ( const Storage in)
inlinestatic

Convert a Storage to Tensor.

Parameters
[in]inthe Storage to be converted
Returns
[Tensor] a Tensor with the same dtype and device as the input Storage

◆ from_storage() [2/2]

static Tensor cytnx::Tensor::from_storage ( const Storage in)
inlinestatic

Convert a Storage to Tensor.

Parameters
[in]inthe Storage to be converted
Returns
[Tensor] a Tensor with the same dtype and device as the input Storage

◆ Fromfile() [1/4]

static Tensor cytnx::Tensor::Fromfile ( const char fname,
const unsigned int dtype,
const cytnx_int64 count = -1 
)
static

◆ Fromfile() [2/4]

static Tensor cytnx::Tensor::Fromfile ( const char fname,
const unsigned int dtype,
const cytnx_int64 count = -1 
)
static

◆ Fromfile() [3/4]

static Tensor cytnx::Tensor::Fromfile ( const std::string &  fname,
const unsigned int dtype,
const cytnx_int64 count = -1 
)
static

Load current Tensor from the binary file.

This function will load the Tensor from the binary file which is saved by cytnx::Tensor::Tofile. Given the file name fname , data type dtype and number of elements count, this function will load the first count elements from the binary file fname with data type dtype.

Parameters
fname[in]the file name of the binary file.
dtype[in]the data type of the binary file. This can be any of the type defined in cytnx::Type.
count[in]the number of elements to be loaded from the binary file. If set to -1, all elements in the binary file will be loaded.
Returns
Tensor
Precondition
  1. The dtype cannot be Type.Void.
  2. The dtype must be the same as the data type of the binary file.
  3. The Nelem cannot be 0.
  4. The Nelem cannot be larger than the number of elements in the binary file.
  5. The file name fname must be valid.
See also
cytnx::Tensor::Tofile

◆ Fromfile() [4/4]

static Tensor cytnx::Tensor::Fromfile ( const std::string &  fname,
const unsigned int dtype,
const cytnx_int64 count = -1 
)
static

Load current Tensor from the binary file.

This function will load the Tensor from the binary file which is saved by cytnx::Tensor::Tofile. Given the file name fname , data type dtype and number of elements count, this function will load the first count elements from the binary file fname with data type dtype.

Parameters
fname[in]the file name of the binary file.
dtype[in]the data type of the binary file. This can be any of the type defined in cytnx::Type.
count[in]the number of elements to be loaded from the binary file. If set to -1, all elements in the binary file will be loaded.
Returns
Tensor
Precondition
  1. The dtype cannot be Type.Void.
  2. The dtype must be the same as the data type of the binary file.
  3. The Nelem cannot be 0.
  4. The Nelem cannot be larger than the number of elements in the binary file.
  5. The file name fname must be valid.
See also
cytnx::Tensor::Tofile

◆ get() [1/2]

Tensor cytnx::Tensor::get ( const std::vector< cytnx::Accessor > &  accessors) const
inline

get elements using Accessor (C++ API) / slices (python API)

Parameters
[in]accessorsthe Accessor (C++ API) / slices (python API) to get the elements.
Returns
[Tensor]
See also
Accessor for cordinate with Accessor in C++ API.
Note
  1. the return will be a new Tensor instance, which not share memory with the current Tensor.

Equivalently:

One can also using more intruisive way to get the slice using [] operator.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
typedef Accessor ac;
/*
In C++ API, generally, there are two ways you can access Tensor.
1. Using operator():
This is more of 'python' way.
2. Using low-level API or operator[]:
This is more of 'c++' way.
*/
Tensor A = arange(60).reshape({3, 4, 5});
cout << A << endl;
// Method 1, Using operator():
//-----------------------------------------
Tensor B = A(2, ":", "2:5:1");
cout << B << endl;
/* [Note]
This is equivalent as:
> Tensor B = A(2,ac::all(),ac::range(2,5,1));
See also cytnx::Accessor.
*/
// Method 2, Using operator[] or low-level API get():
//----------------------------------------
Tensor B2 = A[{ac(2), ac::all(), ac::range(2, 5, 1)}]; // remember the {}braket
cout << B2 << endl;
/* [Note]
You can also use the low-level API get() as
> Tensor B2 = A.get({ac(2),ac::all(),ac::range(2,5,1)});
*/
return 0;
}
object that mimic the python slice to access elements in C++ [this is for c++ API only].
Definition Accessor.hpp:17

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]



Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[4.20000e+01 4.30000e+01 4.40000e+01 ]
 [4.70000e+01 4.80000e+01 4.90000e+01 ]
 [5.20000e+01 5.30000e+01 5.40000e+01 ]
 [5.70000e+01 5.80000e+01 5.90000e+01 ]]



Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[4.20000e+01 4.30000e+01 4.40000e+01 ]
 [4.70000e+01 4.80000e+01 4.90000e+01 ]
 [5.20000e+01 5.30000e+01 5.40000e+01 ]
 [5.70000e+01 5.80000e+01 5.90000e+01 ]]

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = arange(60).reshape(3,4,5)
print(A)
B = A[2,:,2:5:1]
print(B)

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]




Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[4.20000e+01 4.30000e+01 4.40000e+01 ]
 [4.70000e+01 4.80000e+01 4.90000e+01 ]
 [5.20000e+01 5.30000e+01 5.40000e+01 ]
 [5.70000e+01 5.80000e+01 5.90000e+01 ]]

◆ get() [2/2]

Tensor cytnx::Tensor::get ( const std::vector< cytnx::Accessor > &  accessors) const
inline

get elements using Accessor (C++ API) / slices (python API)

Parameters
[in]accessorsthe Accessor (C++ API) / slices (python API) to get the elements.
Returns
[Tensor]
See also
Accessor for cordinate with Accessor in C++ API.
Note
  1. the return will be a new Tensor instance, which not share memory with the current Tensor.

Equivalently:

One can also using more intruisive way to get the slice using [] operator.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
typedef Accessor ac;
/*
In C++ API, generally, there are two ways you can access Tensor.
1. Using operator():
This is more of 'python' way.
2. Using low-level API or operator[]:
This is more of 'c++' way.
*/
Tensor A = arange(60).reshape({3, 4, 5});
cout << A << endl;
// Method 1, Using operator():
//-----------------------------------------
Tensor B = A(2, ":", "2:5:1");
cout << B << endl;
/* [Note]
This is equivalent as:
> Tensor B = A(2,ac::all(),ac::range(2,5,1));
See also cytnx::Accessor.
*/
// Method 2, Using operator[] or low-level API get():
//----------------------------------------
Tensor B2 = A[{ac(2), ac::all(), ac::range(2, 5, 1)}]; // remember the {}braket
cout << B2 << endl;
/* [Note]
You can also use the low-level API get() as
> Tensor B2 = A.get({ac(2),ac::all(),ac::range(2,5,1)});
*/
return 0;
}

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]



Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[4.20000e+01 4.30000e+01 4.40000e+01 ]
 [4.70000e+01 4.80000e+01 4.90000e+01 ]
 [5.20000e+01 5.30000e+01 5.40000e+01 ]
 [5.70000e+01 5.80000e+01 5.90000e+01 ]]



Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[4.20000e+01 4.30000e+01 4.40000e+01 ]
 [4.70000e+01 4.80000e+01 4.90000e+01 ]
 [5.20000e+01 5.30000e+01 5.40000e+01 ]
 [5.70000e+01 5.80000e+01 5.90000e+01 ]]

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = arange(60).reshape(3,4,5)
print(A)
B = A[2,:,2:5:1]
print(B)

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]




Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[4.20000e+01 4.30000e+01 4.40000e+01 ]
 [4.70000e+01 4.80000e+01 4.90000e+01 ]
 [5.20000e+01 5.30000e+01 5.40000e+01 ]
 [5.70000e+01 5.80000e+01 5.90000e+01 ]]

◆ imag() [1/2]

Tensor cytnx::Tensor::imag ( )

return the imaginary part of the tensor.

Returns
[Tensor] the imaginary part of the tensor.
Precondition
the tensor must be complex type (Type.ComplexDouble or Type.ComplexFloat).
See also
cytnx::Type

◆ imag() [2/2]

Tensor cytnx::Tensor::imag ( )

return the imaginary part of the tensor.

Returns
[Tensor] the imaginary part of the tensor.
Precondition
the tensor must be complex type (Type.ComplexDouble or Type.ComplexFloat).
See also
cytnx::Type

◆ Init() [1/2]

void cytnx::Tensor::Init ( const std::vector< cytnx_uint64 > &  shape,
const unsigned int dtype = Type.Double,
const int device = -1,
const bool init_zero = true 
)
inline

initialize a Tensor

Parameters
[in]shapethe shape of tensor.
[in]dtypethe dtype of tensor. This can be any of type defined in cytnx::Type
[in]devicethe device that tensor to be created. This can be cytnx::Device.cpu or
[in]init_zeroif true, the content of Tensor will be initialized to zero. if false, the content of Tensor will be un-initialize. cytnx::Device.cuda+<gpuid>, see cytnx::Device for more detail.
Note
The content of Tensor created will be un-initialize! See zeros(), ones() or arange() for generating an Tensor.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
/*
1. Create a Tensor with
shape (3,4,5),
dtype =Type.Double [default],
device=Device.cpu [default]
*/
Tensor A({3, 4, 5});
cout << A << endl;
/*
2. Create a Tensor with
shape (3,4,5),
dtype =Type.Uint64,
device=Device.cpu [default],
[Note] the dtype can be any one of the supported type.
*/
Tensor B({3, 4, 5}, Type.Uint64);
cout << B << endl;
/*
3. Initialize a Tensor with
shape (3,4,5),
dtype =Type.Double,
device=Device.cuda+0, (on gpu with gpu-id=0)
[Note] the gpu device can be set with Device.cuda+<gpu-id>
*/
Tensor C({3, 4, 5}, Type.Double, Device.cuda + 0);
cout << C << endl;
// 4. Create an empty Tensor, and init later
D.Init({3, 4, 5}, Type.Double, Device.cpu);
return 0;
}
void Init(const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1, const bool &init_zero=true)
initialize a Tensor
Definition Tensor.hpp:450
Device_class Device
data on which devices.

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]]



Total elem: 60
type  : Uint64
cytnx device: CPU
Shape : (3,4,5)
[[[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]]

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
#1. Create a Tensor with
# shape (3,4,5),
# dtype =Type.Double [default],
# device=Device.cpu [default]
A = Tensor([3,4,5])
print(A)
#2. Create a Tensor with
# shape (3,4,5),
# dtype =Type.Uint64,
# device=Device.cpu [default],
# [Note] the dtype can be any one of the supported type.
B = Tensor([3,4,5],dtype=Type.Uint64)
print(B)
#3. Initialize a Tensor with
# shape (3,4,5),
# dtype =Type.Double,
# device=Device.cuda+0, (on gpu with gpu-id=0)
# [Note] the gpu device can be set with Device.cuda+<gpu-id>
C = Tensor([3,4,5],dtype=Type.Double,device=Device.cuda+0);
print(C)
#4. Create an empty Tensor, and init later
D = Tensor()
D.Init([3,4,5],dtype=Type.Double,device=Device.cpu);

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]]




Total elem: 60
type  : Uint64
cytnx device: CPU
Shape : (3,4,5)
[[[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]]




Total elem: 60
type  : Double (Float64)
cytnx device: CUDA/GPU-id:0
Shape : (3,4,5)
[[[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]]

◆ Init() [2/2]

void cytnx::Tensor::Init ( const std::vector< cytnx_uint64 > &  shape,
const unsigned int dtype = Type.Double,
const int device = -1,
const bool init_zero = true 
)
inline

initialize a Tensor

Parameters
[in]shapethe shape of tensor.
[in]dtypethe dtype of tensor. This can be any of type defined in cytnx::Type
[in]devicethe device that tensor to be created. This can be cytnx::Device.cpu or
[in]init_zeroif true, the content of Tensor will be initialized to zero. if false, the content of Tensor will be un-initialize. cytnx::Device.cuda+<gpuid>, see cytnx::Device for more detail.
Note
The content of Tensor created will be un-initialize! See zeros(), ones() or arange() for generating an Tensor.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
/*
1. Create a Tensor with
shape (3,4,5),
dtype =Type.Double [default],
device=Device.cpu [default]
*/
Tensor A({3, 4, 5});
cout << A << endl;
/*
2. Create a Tensor with
shape (3,4,5),
dtype =Type.Uint64,
device=Device.cpu [default],
[Note] the dtype can be any one of the supported type.
*/
Tensor B({3, 4, 5}, Type.Uint64);
cout << B << endl;
/*
3. Initialize a Tensor with
shape (3,4,5),
dtype =Type.Double,
device=Device.cuda+0, (on gpu with gpu-id=0)
[Note] the gpu device can be set with Device.cuda+<gpu-id>
*/
Tensor C({3, 4, 5}, Type.Double, Device.cuda + 0);
cout << C << endl;
// 4. Create an empty Tensor, and init later
D.Init({3, 4, 5}, Type.Double, Device.cpu);
return 0;
}

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]]



Total elem: 60
type  : Uint64
cytnx device: CPU
Shape : (3,4,5)
[[[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]]

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
#1. Create a Tensor with
# shape (3,4,5),
# dtype =Type.Double [default],
# device=Device.cpu [default]
A = Tensor([3,4,5])
print(A)
#2. Create a Tensor with
# shape (3,4,5),
# dtype =Type.Uint64,
# device=Device.cpu [default],
# [Note] the dtype can be any one of the supported type.
B = Tensor([3,4,5],dtype=Type.Uint64)
print(B)
#3. Initialize a Tensor with
# shape (3,4,5),
# dtype =Type.Double,
# device=Device.cuda+0, (on gpu with gpu-id=0)
# [Note] the gpu device can be set with Device.cuda+<gpu-id>
C = Tensor([3,4,5],dtype=Type.Double,device=Device.cuda+0);
print(C)
#4. Create an empty Tensor, and init later
D = Tensor()
D.Init([3,4,5],dtype=Type.Double,device=Device.cpu);

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]]




Total elem: 60
type  : Uint64
cytnx device: CPU
Shape : (3,4,5)
[[[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]]




Total elem: 60
type  : Double (Float64)
cytnx device: CUDA/GPU-id:0
Shape : (3,4,5)
[[[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]]

◆ Inv() [1/2]

Tensor cytnx::Tensor::Inv ( const double clip) const

◆ Inv() [2/2]

Tensor cytnx::Tensor::Inv ( const double clip) const

◆ Inv_() [1/2]

Tensor & cytnx::Tensor::Inv_ ( const double clip)

the Inv_ member function. Same as cytnx::linalg::Inv_(Tensor &Tin, const double &clip)

◆ Inv_() [2/2]

Tensor & cytnx::Tensor::Inv_ ( const double clip)

the Inv_ member function. Same as cytnx::linalg::Inv_(Tensor &Tin, const double &clip)

◆ InvM() [1/2]

Tensor cytnx::Tensor::InvM ( ) const

the InvM member function. Same as cytnx::linalg::InvM(const Tensor &Tin), where Tin is the current Tensor.

◆ InvM() [2/2]

Tensor cytnx::Tensor::InvM ( ) const

the InvM member function. Same as cytnx::linalg::InvM(const Tensor &Tin), where Tin is the current Tensor.

◆ InvM_() [1/2]

Tensor & cytnx::Tensor::InvM_ ( )

the InvM_ member function. Same as cytnx::linalg::InvM_(Tensor &Tin), where Tin is the current Tensor.

◆ InvM_() [2/2]

Tensor & cytnx::Tensor::InvM_ ( )

the InvM_ member function. Same as cytnx::linalg::InvM_(Tensor &Tin), where Tin is the current Tensor.

◆ is_contiguous() [1/2]

const bool & cytnx::Tensor::is_contiguous ( ) const
inline

return whether the Tensor is contiguous or not.

Returns
[bool] true if the Tensor is contiguous, false otherwise.

◆ is_contiguous() [2/2]

const bool & cytnx::Tensor::is_contiguous ( ) const
inline

return whether the Tensor is contiguous or not.

Returns
[bool] true if the Tensor is contiguous, false otherwise.

◆ item() [1/2]

template<class T >
T & cytnx::Tensor::item ( )
inline

get the element from a rank-0 Tensor.

This function is used to get the element from a rank-0 Tensor. If the template type is not given, the return will be a Scalar.

Returns
[T]
Note
  1. This can only be called on a rank-0 Tensor (scalar). For C++ API, a template instantiation of type is needed to resolve the type, which should be connsist with the dtype of the Tensor. An error will be issued if the template type if inconsist with the current dtype of Tensor.
  2. Although the return is by reference in C++ part, the return in python is not.
  3. From 2., We recommend user to use at<T> (C++ API) and [] (python API) to modify the value of the element to have consistant syntax across two languages.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A = ones(1, Type.Uint64);
cout << A << endl;
// note that type resolver should be consist with the dtype
cout << A.item<cytnx_uint64>() << endl;
return 0;
}
Tensor ones(const cytnx_uint64 &Nelem, const unsigned int &dtype=Type.Double, const int &device=Device.cpu)
create an rank-1 Tensor with all the elements are initialized with one.
uint64_t cytnx_uint64
Definition Type.hpp:55

output>

Total elem: 1
type  : Uint64
cytnx device: CPU
Shape : (1)
[                  1 ]


1

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = ones(1,Type.Uint64)
print(A)
print(A.item())

output>

Total elem: 1
type  : Uint64
cytnx device: CPU
Shape : (1)
[                  1 ]



1

◆ item() [2/2]

template<class T >
T & cytnx::Tensor::item ( )
inline

get the element from a rank-0 Tensor.

This function is used to get the element from a rank-0 Tensor. If the template type is not given, the return will be a Scalar.

Returns
[T]
Note
  1. This can only be called on a rank-0 Tensor (scalar). For C++ API, a template instantiation of type is needed to resolve the type, which should be connsist with the dtype of the Tensor. An error will be issued if the template type if inconsist with the current dtype of Tensor.
  2. Although the return is by reference in C++ part, the return in python is not.
  3. From 2., We recommend user to use at<T> (C++ API) and [] (python API) to modify the value of the element to have consistant syntax across two languages.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A = ones(1, Type.Uint64);
cout << A << endl;
// note that type resolver should be consist with the dtype
cout << A.item<cytnx_uint64>() << endl;
return 0;
}

output>

Total elem: 1
type  : Uint64
cytnx device: CPU
Shape : (1)
[                  1 ]


1

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = ones(1,Type.Uint64)
print(A)
print(A.item())

output>

Total elem: 1
type  : Uint64
cytnx device: CPU
Shape : (1)
[                  1 ]



1

◆ Load() [1/4]

static Tensor cytnx::Tensor::Load ( const char fname)
static

◆ Load() [2/4]

static Tensor cytnx::Tensor::Load ( const char fname)
static

◆ Load() [3/4]

static Tensor cytnx::Tensor::Load ( const std::string &  fname)
static

Load current Tensor from file.

Parameters
fname[in]file name

load the Storage from file with file path specify with input param 'fname'

Precondition
the file must be a Tensor object which is saved by cytnx::Tensor::Save.

◆ Load() [4/4]

static Tensor cytnx::Tensor::Load ( const std::string &  fname)
static

Load current Tensor from file.

Parameters
fname[in]file name

load the Storage from file with file path specify with input param 'fname'

Precondition
the file must be a Tensor object which is saved by cytnx::Tensor::Save.

◆ Max() [1/2]

Tensor cytnx::Tensor::Max ( ) const

the Max member function. Same as linalg::Max(const Tensor &Tin), where Tin is the current Tensor.

◆ Max() [2/2]

Tensor cytnx::Tensor::Max ( ) const

the Max member function. Same as linalg::Max(const Tensor &Tin), where Tin is the current Tensor.

◆ Min() [1/2]

Tensor cytnx::Tensor::Min ( ) const

the Min member function. Same as linalg::Min(const Tensor &Tin), where Tin is the current Tensor.

◆ Min() [2/2]

Tensor cytnx::Tensor::Min ( ) const

the Min member function. Same as linalg::Min(const Tensor &Tin), where Tin is the current Tensor.

◆ Mod() [1/2]

template<class T >
Tensor cytnx::Tensor::Mod ( const T rhs)
inline

◆ Mod() [2/2]

template<class T >
Tensor cytnx::Tensor::Mod ( const T rhs)
inline

◆ Mul() [1/2]

template<class T >
Tensor cytnx::Tensor::Mul ( const T rhs)
inline

Multiplication function with a Tensor or a scalar. Same as cytnx::operator*(const Tensor &self, const T &rhs).

Parameters
[in]rhsthe multiplied Tensor or scalar.

◆ Mul() [2/2]

template<class T >
Tensor cytnx::Tensor::Mul ( const T rhs)
inline

Multiplication function with a Tensor or a scalar. Same as cytnx::operator*(const Tensor &self, const T &rhs).

Parameters
[in]rhsthe multiplied Tensor or scalar.

◆ Mul_() [1/2]

template<class T >
Tensor & cytnx::Tensor::Mul_ ( const T rhs)
inline

Multiplication function with a Tensor or a scalar, inplacely. Same as operator*=(const T &rhs).

Parameters
[in]rhsthe multiplied Tensor or scalar.

◆ Mul_() [2/2]

template<class T >
Tensor & cytnx::Tensor::Mul_ ( const T rhs)
inline

Multiplication function with a Tensor or a scalar, inplacely. Same as operator*=(const T &rhs).

Parameters
[in]rhsthe multiplied Tensor or scalar.

◆ Norm() [1/2]

Tensor cytnx::Tensor::Norm ( ) const

the Norm member function. Same as linalg::Norm(const Tensor &Tin), where Tin is the current Tensor.

◆ Norm() [2/2]

Tensor cytnx::Tensor::Norm ( ) const

the Norm member function. Same as linalg::Norm(const Tensor &Tin), where Tin is the current Tensor.

◆ operator*=() [1/2]

template<class T >
Tensor & cytnx::Tensor::operator*= ( const T rc)

multiplication assignment operator with a Tensor or a scalar.

This function will multiply the template type to the current tensor, inplacely. The template can be either a scalar or a tensor. If the template is a scalar, then the scalar will be multiplied to all the elements of the current tensor. If the template is a tensor, then the shape of the template tensor must be the same as the current tensor. The supported type of the template are Tensor, Scalar or any scalar type (see cytnx_complex128, cytnx_complex64, cytnx_double, cytnx_float, cytnx_int64, cytnx_int32, cytnx_int16, cytnx_uint64, cytnx_uint32, cytnx_uint16, cytnx_bool).

Parameters
[in]rcthe multiplied Tensor or scalar.
Precondition
If the template type is Tensor, then the shape of the template tensor must be the same as the current tensor.

◆ operator*=() [2/2]

template<class T >
Tensor & cytnx::Tensor::operator*= ( const T rc)

multiplication assignment operator with a Tensor or a scalar.

This function will multiply the template type to the current tensor, inplacely. The template can be either a scalar or a tensor. If the template is a scalar, then the scalar will be multiplied to all the elements of the current tensor. If the template is a tensor, then the shape of the template tensor must be the same as the current tensor. The supported type of the template are Tensor, Scalar or any scalar type (see cytnx_complex128, cytnx_complex64, cytnx_double, cytnx_float, cytnx_int64, cytnx_int32, cytnx_int16, cytnx_uint64, cytnx_uint32, cytnx_uint16, cytnx_bool).

Parameters
[in]rcthe multiplied Tensor or scalar.
Precondition
If the template type is Tensor, then the shape of the template tensor must be the same as the current tensor.

◆ operator+=() [1/2]

template<class T >
Tensor & cytnx::Tensor::operator+= ( const T rc)

addition assignment operator with a Tensor or a scalar.

This function will add the template type to the current tensor, inplacely. The template can be either a scalar or a tensor. If the template is a scalar, then the scalar will be added to all the elements of the current tensor. If the template is a tensor, then the shape of the template tensor must be the same as the current tensor. The supported type of the template are Tensor, Scalar or any scalar type (see cytnx_complex128, cytnx_complex64, cytnx_double, cytnx_float, cytnx_int64, cytnx_int32, cytnx_int16, cytnx_uint64, cytnx_uint32, cytnx_uint16, cytnx_bool).

Parameters
[in]rcthe added Tensor or scalar.
Precondition
If the template type is Tensor, then the shape of the template tensor must be the same as the current tensor.

◆ operator+=() [2/2]

template<class T >
Tensor & cytnx::Tensor::operator+= ( const T rc)

addition assignment operator with a Tensor or a scalar.

This function will add the template type to the current tensor, inplacely. The template can be either a scalar or a tensor. If the template is a scalar, then the scalar will be added to all the elements of the current tensor. If the template is a tensor, then the shape of the template tensor must be the same as the current tensor. The supported type of the template are Tensor, Scalar or any scalar type (see cytnx_complex128, cytnx_complex64, cytnx_double, cytnx_float, cytnx_int64, cytnx_int32, cytnx_int16, cytnx_uint64, cytnx_uint32, cytnx_uint16, cytnx_bool).

Parameters
[in]rcthe added Tensor or scalar.
Precondition
If the template type is Tensor, then the shape of the template tensor must be the same as the current tensor.

◆ operator-() [1/2]

The negation function.

This function is the negation function. Namely, if the current tensor is \(A\), then the output tensor is \(-A\).

Returns
The negation of the current tensor.

◆ operator-() [2/2]

The negation function.

This function is the negation function. Namely, if the current tensor is \(A\), then the output tensor is \(-A\).

Returns
The negation of the current tensor.

◆ operator-=() [1/2]

template<class T >
Tensor & cytnx::Tensor::operator-= ( const T rc)

subtraction assignment operator with a Tensor or a scalar.

This function will subtract the template type to the current tensor, inplacely. The template can be either a scalar or a tensor. If the template is a scalar, then the scalar will be subtracted to all the elements of the current tensor. If the template is a tensor, then the shape of the template tensor must be the same as the current tensor. The supported type of the template are Tensor, Scalar or any scalar type (see cytnx_complex128, cytnx_complex64, cytnx_double, cytnx_float, cytnx_int64, cytnx_int32, cytnx_int16, cytnx_uint64, cytnx_uint32, cytnx_uint16, cytnx_bool).

Parameters
[in]rcthe subtracted Tensor or scalar.
Precondition
If the template type is Tensor, then the shape of the template tensor must be the same as the current tensor.

◆ operator-=() [2/2]

template<class T >
Tensor & cytnx::Tensor::operator-= ( const T rc)

subtraction assignment operator with a Tensor or a scalar.

This function will subtract the template type to the current tensor, inplacely. The template can be either a scalar or a tensor. If the template is a scalar, then the scalar will be subtracted to all the elements of the current tensor. If the template is a tensor, then the shape of the template tensor must be the same as the current tensor. The supported type of the template are Tensor, Scalar or any scalar type (see cytnx_complex128, cytnx_complex64, cytnx_double, cytnx_float, cytnx_int64, cytnx_int32, cytnx_int16, cytnx_uint64, cytnx_uint32, cytnx_uint16, cytnx_bool).

Parameters
[in]rcthe subtracted Tensor or scalar.
Precondition
If the template type is Tensor, then the shape of the template tensor must be the same as the current tensor.

◆ operator/=() [1/2]

template<class T >
Tensor & cytnx::Tensor::operator/= ( const T rc)

division assignment operator with a Tensor or a scalar.

This function will divide the template type to the current tensor, inplacely. The template can be either a scalar or a tensor. If the template is a scalar, then the scalar will be divided to all the elements of the current tensor. If the template is a tensor, then the shape of the template tensor must be the same as the current tensor. The supported type of the template are Tensor, Scalar or any scalar type (see cytnx_complex128, cytnx_complex64, cytnx_double, cytnx_float, cytnx_int64, cytnx_int32, cytnx_int16, cytnx_uint64, cytnx_uint32, cytnx_uint16, cytnx_bool).

Parameters
[in]rcthe divided Tensor or scalar.
Precondition
  1. If the template type is Tensor, then the shape of the template tensor must be the same as the current tensor.
  2. rc cannot be zero.

◆ operator/=() [2/2]

template<class T >
Tensor & cytnx::Tensor::operator/= ( const T rc)

division assignment operator with a Tensor or a scalar.

This function will divide the template type to the current tensor, inplacely. The template can be either a scalar or a tensor. If the template is a scalar, then the scalar will be divided to all the elements of the current tensor. If the template is a tensor, then the shape of the template tensor must be the same as the current tensor. The supported type of the template are Tensor, Scalar or any scalar type (see cytnx_complex128, cytnx_complex64, cytnx_double, cytnx_float, cytnx_int64, cytnx_int32, cytnx_int16, cytnx_uint64, cytnx_uint32, cytnx_uint16, cytnx_bool).

Parameters
[in]rcthe divided Tensor or scalar.
Precondition
  1. If the template type is Tensor, then the shape of the template tensor must be the same as the current tensor.
  2. rc cannot be zero.

◆ permute() [1/2]

Tensor cytnx::Tensor::permute ( const std::vector< cytnx_uint64 > &  rnks) const
inline

perform tensor permute on the cytnx::Tensor and return a new instance.

Parameters
[in]rnksthe permute indices, should have No. of elements equal to the rank of tensor.
Returns
[Tensor] a permuted new Tensor
Precondition
  1. The size of input and output Tensor should be the same.
  2. rnks cannot contain duplicated elements.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A({3, 4, 5});
cout << A.shape() << endl;
Tensor B = A.permute({0, 2, 1});
cout << B.shape() << endl;
cout << is(B, A) << endl; // this should be false, different object.
cout << B.same_data(A) << endl; // this should be true, since no new pointer/memory is created.
return 0;
}

output>

Vector Print:
Total Elements:3
[3, 4, 5]

Vector Print:
Total Elements:3
[3, 5, 4]

0
1

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = Tensor([3,4,5])
print(A.shape())
B = A.permute(0,2,1)
print(B.shape())
print(B is A) #False
print(B.same_data(A)) #True

output>

[3, 4, 5]
[3, 5, 4]
False
True

◆ permute() [2/2]

Tensor cytnx::Tensor::permute ( const std::vector< cytnx_uint64 > &  rnks) const
inline

perform tensor permute on the cytnx::Tensor and return a new instance.

Parameters
[in]rnksthe permute indices, should have No. of elements equal to the rank of tensor.
Returns
[Tensor] a permuted new Tensor
Precondition
  1. The size of input and output Tensor should be the same.
  2. rnks cannot contain duplicated elements.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A({3, 4, 5});
cout << A.shape() << endl;
Tensor B = A.permute({0, 2, 1});
cout << B.shape() << endl;
cout << is(B, A) << endl; // this should be false, different object.
cout << B.same_data(A) << endl; // this should be true, since no new pointer/memory is created.
return 0;
}

output>

Vector Print:
Total Elements:3
[3, 4, 5]

Vector Print:
Total Elements:3
[3, 5, 4]

0
1

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = Tensor([3,4,5])
print(A.shape())
B = A.permute(0,2,1)
print(B.shape())
print(B is A) #False
print(B.same_data(A)) #True

output>

[3, 4, 5]
[3, 5, 4]
False
True

◆ permute_() [1/2]

Tensor cytnx::Tensor::permute_ ( const std::vector< cytnx_uint64 > &  rnks)
inline

◆ permute_() [2/2]

Tensor cytnx::Tensor::permute_ ( const std::vector< cytnx_uint64 > &  rnks)
inline

◆ Pow() [1/2]

Tensor cytnx::Tensor::Pow ( const cytnx_double p) const

the Pow member function. Same as linalg::Pow(const Tensor &Tin, const cytnx_double &p), where Tin is the current Tensor.

◆ Pow() [2/2]

Tensor cytnx::Tensor::Pow ( const cytnx_double p) const

the Pow member function. Same as linalg::Pow(const Tensor &Tin, const cytnx_double &p), where Tin is the current Tensor.

◆ Pow_() [1/2]

Tensor & cytnx::Tensor::Pow_ ( const cytnx_double p)

the Pow_ member function. Same as linalg::Pow_(Tensor &Tin, const cytnx_double &p), where Tin is the current Tensor.

◆ Pow_() [2/2]

Tensor & cytnx::Tensor::Pow_ ( const cytnx_double p)

the Pow_ member function. Same as linalg::Pow_(Tensor &Tin, const cytnx_double &p), where Tin is the current Tensor.

◆ rank() [1/2]

cytnx_uint64 cytnx::Tensor::rank ( ) const
inline

the rank of the Tensor

Returns
[cytnx_uint64] the rank of the Tensor

◆ rank() [2/2]

cytnx_uint64 cytnx::Tensor::rank ( ) const
inline

the rank of the Tensor

Returns
[cytnx_uint64] the rank of the Tensor

◆ real() [1/2]

Tensor cytnx::Tensor::real ( )

return the real part of the tensor.

Returns
[Tensor] the real part of the tensor.
Precondition
the tensor must be complex type (Type.ComplexDouble or Type.ComplexFloat).
See also
cytnx::Type

◆ real() [2/2]

Tensor cytnx::Tensor::real ( )

return the real part of the tensor.

Returns
[Tensor] the real part of the tensor.
Precondition
the tensor must be complex type (Type.ComplexDouble or Type.ComplexFloat).
See also
cytnx::Type

◆ reshape() [1/6]

Tensor cytnx::Tensor::reshape ( const std::initializer_list< cytnx_int64 > &  new_shape) const
inline

◆ reshape() [2/6]

Tensor cytnx::Tensor::reshape ( const std::initializer_list< cytnx_int64 > &  new_shape) const
inline

◆ reshape() [3/6]

Tensor cytnx::Tensor::reshape ( const std::vector< cytnx_int64 > &  new_shape) const
inline

return a new Tensor that is reshaped.

Parameters
[in]new_shapethe new shape of the Tensor.
Returns
[Tensor]
Precondition
  1. The size of input and output Tensor should be the same.
  2. new_shape cannot be empty.
Note
  1. This function will not change the original Tensor.
  2. You can use Tensor::reshape_() to reshape the Tensor inplacely.
  3. You can set new_shape to -1, which will be automatically determined by the size of the Tensor. The behavior is the same as numpy.reshape().
See also
Tensor::reshape_()

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A = arange(60);
// there are two ways you can give argument to reshape:
// Method 1: more like 'C++' way:
Tensor B = A.reshape({5, 12}); // note the braket{}
cout << A << endl;
cout << B << endl;
// Method 2: more like 'python' way:
Tensor B2 = A.reshape(5, 12);
return 0;
}

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (60)
[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (5,12)
[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 ]
 [1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 ]
 [2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 ]
 [3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 ]
 [4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = arange(60)
B = A.reshape(5,12)
print(A)
print(B)

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (60)
[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (5,12)
[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 ]
 [1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 ]
 [2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 ]
 [3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 ]
 [4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]

◆ reshape() [4/6]

Tensor cytnx::Tensor::reshape ( const std::vector< cytnx_int64 > &  new_shape) const
inline

return a new Tensor that is reshaped.

Parameters
[in]new_shapethe new shape of the Tensor.
Returns
[Tensor]
Precondition
  1. The size of input and output Tensor should be the same.
  2. new_shape cannot be empty.
Note
  1. This function will not change the original Tensor.
  2. You can use Tensor::reshape_() to reshape the Tensor inplacely.
  3. You can set new_shape to -1, which will be automatically determined by the size of the Tensor. The behavior is the same as numpy.reshape().
See also
Tensor::reshape_()

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A = arange(60);
// there are two ways you can give argument to reshape:
// Method 1: more like 'C++' way:
Tensor B = A.reshape({5, 12}); // note the braket{}
cout << A << endl;
cout << B << endl;
// Method 2: more like 'python' way:
Tensor B2 = A.reshape(5, 12);
return 0;
}

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (60)
[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (5,12)
[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 ]
 [1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 ]
 [2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 ]
 [3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 ]
 [4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = arange(60)
B = A.reshape(5,12)
print(A)
print(B)

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (60)
[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (5,12)
[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 ]
 [1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 ]
 [2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 ]
 [3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 ]
 [4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]

◆ reshape() [5/6]

Tensor cytnx::Tensor::reshape ( const std::vector< cytnx_uint64 > &  new_shape) const
inline

◆ reshape() [6/6]

Tensor cytnx::Tensor::reshape ( const std::vector< cytnx_uint64 > &  new_shape) const
inline

◆ reshape_() [1/2]

void cytnx::Tensor::reshape_ ( const std::vector< cytnx_int64 > &  new_shape)
inline

reshape the Tensor, inplacely

Parameters
[in]new_shapethe new shape of the Tensor.
Precondition
  1. The size of input and output Tensor should be the same.
  2. new_shape cannot be empty.
See also
Tensor::reshape()
Note
Compare to reshape(), this function will not create a new Tensor, but reshape the current Tensor inplacely.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A = arange(60);
cout << A << endl;
// there are two ways you can assign the argument:
// Method 1: more like 'c++' way:
A.reshape_({5, 12}); // note the braket{}
cout << A << endl;
// Method 2: more like 'python' way:
A.reshape_(5, 4, 3);
cout << A << endl;
return 0;
}

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (60)
[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (5,12)
[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 ]
 [1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 ]
 [2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 ]
 [3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 ]
 [4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (5,4,3)
[[[0.00000e+00 1.00000e+00 2.00000e+00 ]
  [3.00000e+00 4.00000e+00 5.00000e+00 ]
  [6.00000e+00 7.00000e+00 8.00000e+00 ]
  [9.00000e+00 1.00000e+01 1.10000e+01 ]]
 [[1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 ]
  [1.80000e+01 1.90000e+01 2.00000e+01 ]
  [2.10000e+01 2.20000e+01 2.30000e+01 ]]
 [[2.40000e+01 2.50000e+01 2.60000e+01 ]
  [2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 ]
  [3.30000e+01 3.40000e+01 3.50000e+01 ]]
 [[3.60000e+01 3.70000e+01 3.80000e+01 ]
  [3.90000e+01 4.00000e+01 4.10000e+01 ]
  [4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 ]]
 [[4.80000e+01 4.90000e+01 5.00000e+01 ]
  [5.10000e+01 5.20000e+01 5.30000e+01 ]
  [5.40000e+01 5.50000e+01 5.60000e+01 ]
  [5.70000e+01 5.80000e+01 5.90000e+01 ]]]

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = arange(60)
print(A)
A.reshape_(5,12)
print(A)

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (60)
[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (5,12)
[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 ]
 [1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 ]
 [2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 ]
 [3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 ]
 [4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]

◆ reshape_() [2/2]

void cytnx::Tensor::reshape_ ( const std::vector< cytnx_int64 > &  new_shape)
inline

reshape the Tensor, inplacely

Parameters
[in]new_shapethe new shape of the Tensor.
Precondition
  1. The size of input and output Tensor should be the same.
  2. new_shape cannot be empty.
See also
Tensor::reshape()
Note
Compare to reshape(), this function will not create a new Tensor, but reshape the current Tensor inplacely.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A = arange(60);
cout << A << endl;
// there are two ways you can assign the argument:
// Method 1: more like 'c++' way:
A.reshape_({5, 12}); // note the braket{}
cout << A << endl;
// Method 2: more like 'python' way:
A.reshape_(5, 4, 3);
cout << A << endl;
return 0;
}

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (60)
[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (5,12)
[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 ]
 [1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 ]
 [2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 ]
 [3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 ]
 [4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (5,4,3)
[[[0.00000e+00 1.00000e+00 2.00000e+00 ]
  [3.00000e+00 4.00000e+00 5.00000e+00 ]
  [6.00000e+00 7.00000e+00 8.00000e+00 ]
  [9.00000e+00 1.00000e+01 1.10000e+01 ]]
 [[1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 ]
  [1.80000e+01 1.90000e+01 2.00000e+01 ]
  [2.10000e+01 2.20000e+01 2.30000e+01 ]]
 [[2.40000e+01 2.50000e+01 2.60000e+01 ]
  [2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 ]
  [3.30000e+01 3.40000e+01 3.50000e+01 ]]
 [[3.60000e+01 3.70000e+01 3.80000e+01 ]
  [3.90000e+01 4.00000e+01 4.10000e+01 ]
  [4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 ]]
 [[4.80000e+01 4.90000e+01 5.00000e+01 ]
  [5.10000e+01 5.20000e+01 5.30000e+01 ]
  [5.40000e+01 5.50000e+01 5.60000e+01 ]
  [5.70000e+01 5.80000e+01 5.90000e+01 ]]]

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = arange(60)
print(A)
A.reshape_(5,12)
print(A)

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (60)
[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (5,12)
[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 ]
 [1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 ]
 [2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 ]
 [3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 ]
 [4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]

◆ same_data() [1/2]

bool cytnx::Tensor::same_data ( const Tensor rhs) const

Check whether two tensors share the same internal memory.

This function will check whether two tensors share the same internal memory. If the two tensors share the same internal memory, then the function will return true. Otherwise, it will return false. See user guide for more details.

Parameters
[in]rhsthe tensor to be compared.

◆ same_data() [2/2]

bool cytnx::Tensor::same_data ( const Tensor rhs) const

Check whether two tensors share the same internal memory.

This function will check whether two tensors share the same internal memory. If the two tensors share the same internal memory, then the function will return true. Otherwise, it will return false. See user guide for more details.

Parameters
[in]rhsthe tensor to be compared.

◆ Save() [1/4]

void cytnx::Tensor::Save ( const char fname) const

◆ Save() [2/4]

void cytnx::Tensor::Save ( const char fname) const

◆ Save() [3/4]

void cytnx::Tensor::Save ( const std::string &  fname) const

Save current Tensor to file.

Parameters
[in]fnamefile name (without file extension)

save the Tensor to file with file path specify with input param fname with postfix ".cytn"

See also
Load(const std::string &fname)

◆ Save() [4/4]

void cytnx::Tensor::Save ( const std::string &  fname) const

Save current Tensor to file.

Parameters
[in]fnamefile name (without file extension)

save the Tensor to file with file path specify with input param fname with postfix ".cytn"

See also
Load(const std::string &fname)

◆ set() [1/4]

template<class T >
void cytnx::Tensor::set ( const std::vector< cytnx::Accessor > &  accessors,
const T rc 
)
inline

set elements with the input constant using Accessor (C++ API) / slices (python API)

Parameters
[in]accessorsthe list(vector) of accessors.
rc[Const]
See also
Tensor::fill for filling all elements with assigned constant.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
typedef Accessor ac;
// Let's define two Tensor.
Tensor A = arange(60).reshape({3, 4, 5});
cout << A << endl;
Tensor B = zeros({4, 3});
cout << B << endl;
// I. To set part of A with Tesnor B, or constant:
// [Method 1] Using direct assignment
//-------------------------------------
A(2, ":", "2:5:1") = B;
cout << A << endl;
A(2, ":", "2:5:1") = 999;
cout << A << endl;
// note this is the same as
// A(ac(2),ac::all(),ac::range(2,5,1)) = B;
// A[{ac(2),ac::all(),ac::range(2,5,1)}] = B; // note that braket{}
// [Method 2] Using low-level API set():
//--------------------------------------
A.set({ac(2), ac::all(), ac::range(2, 5, 1)}, B);
cout << A << endl;
A.set({ac(2), ac::all(), ac::range(0, 2, 1)}, 999);
cout << A << endl;
return 0;
}
void set(const std::vector< cytnx::Accessor > &accessors, const Tensor &rhs)
set elements with the input Tensor using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:993

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]



Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [4.50000e+01 4.60000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [5.00000e+01 5.10000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [5.50000e+01 5.60000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]]]

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = arange(60).reshape(3,4,5)
print(A)
B = zeros([4,3])
print(B)
A[2,:,2:5:1] = B
print(A)
A[0,:,0:2:1] = 999
print(A)

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]




Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[9.99000e+02 9.99000e+02 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [9.99000e+02 9.99000e+02 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [9.99000e+02 9.99000e+02 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [9.99000e+02 9.99000e+02 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]

◆ set() [2/4]

template<class T >
void cytnx::Tensor::set ( const std::vector< cytnx::Accessor > &  accessors,
const T rc 
)
inline

set elements with the input constant using Accessor (C++ API) / slices (python API)

Parameters
[in]accessorsthe list(vector) of accessors.
rc[Const]
See also
Tensor::fill for filling all elements with assigned constant.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
typedef Accessor ac;
// Let's define two Tensor.
Tensor A = arange(60).reshape({3, 4, 5});
cout << A << endl;
Tensor B = zeros({4, 3});
cout << B << endl;
// I. To set part of A with Tesnor B, or constant:
// [Method 1] Using direct assignment
//-------------------------------------
A(2, ":", "2:5:1") = B;
cout << A << endl;
A(2, ":", "2:5:1") = 999;
cout << A << endl;
// note this is the same as
// A(ac(2),ac::all(),ac::range(2,5,1)) = B;
// A[{ac(2),ac::all(),ac::range(2,5,1)}] = B; // note that braket{}
// [Method 2] Using low-level API set():
//--------------------------------------
A.set({ac(2), ac::all(), ac::range(2, 5, 1)}, B);
cout << A << endl;
A.set({ac(2), ac::all(), ac::range(0, 2, 1)}, 999);
cout << A << endl;
return 0;
}

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]



Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [4.50000e+01 4.60000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [5.00000e+01 5.10000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [5.50000e+01 5.60000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]]]

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = arange(60).reshape(3,4,5)
print(A)
B = zeros([4,3])
print(B)
A[2,:,2:5:1] = B
print(A)
A[0,:,0:2:1] = 999
print(A)

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]




Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[9.99000e+02 9.99000e+02 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [9.99000e+02 9.99000e+02 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [9.99000e+02 9.99000e+02 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [9.99000e+02 9.99000e+02 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]

◆ set() [3/4]

void cytnx::Tensor::set ( const std::vector< cytnx::Accessor > &  accessors,
const Tensor rhs 
)
inline

set elements with the input Tensor using Accessor (C++ API) / slices (python API)

Parameters
[in]accessorsthe list(vector) of accessors.
rhs[Tensor]
Note
: the shape of the input Tensor should be the same as the shape that indicated using Accessor. The memory is not shared with the input Tensor.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
typedef Accessor ac;
// Let's define two Tensor.
Tensor A = arange(60).reshape({3, 4, 5});
cout << A << endl;
Tensor B = zeros({4, 3});
cout << B << endl;
// I. To set part of A with Tesnor B, or constant:
// [Method 1] Using direct assignment
//-------------------------------------
A(2, ":", "2:5:1") = B;
cout << A << endl;
A(2, ":", "2:5:1") = 999;
cout << A << endl;
// note this is the same as
// A(ac(2),ac::all(),ac::range(2,5,1)) = B;
// A[{ac(2),ac::all(),ac::range(2,5,1)}] = B; // note that braket{}
// [Method 2] Using low-level API set():
//--------------------------------------
A.set({ac(2), ac::all(), ac::range(2, 5, 1)}, B);
cout << A << endl;
A.set({ac(2), ac::all(), ac::range(0, 2, 1)}, 999);
cout << A << endl;
return 0;
}

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]



Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [4.50000e+01 4.60000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [5.00000e+01 5.10000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [5.50000e+01 5.60000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]]]

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = arange(60).reshape(3,4,5)
print(A)
B = zeros([4,3])
print(B)
A[2,:,2:5:1] = B
print(A)
A[0,:,0:2:1] = 999
print(A)

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]




Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[9.99000e+02 9.99000e+02 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [9.99000e+02 9.99000e+02 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [9.99000e+02 9.99000e+02 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [9.99000e+02 9.99000e+02 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]

◆ set() [4/4]

void cytnx::Tensor::set ( const std::vector< cytnx::Accessor > &  accessors,
const Tensor rhs 
)
inline

set elements with the input Tensor using Accessor (C++ API) / slices (python API)

Parameters
[in]accessorsthe list(vector) of accessors.
rhs[Tensor]
Note
: the shape of the input Tensor should be the same as the shape that indicated using Accessor. The memory is not shared with the input Tensor.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
typedef Accessor ac;
// Let's define two Tensor.
Tensor A = arange(60).reshape({3, 4, 5});
cout << A << endl;
Tensor B = zeros({4, 3});
cout << B << endl;
// I. To set part of A with Tesnor B, or constant:
// [Method 1] Using direct assignment
//-------------------------------------
A(2, ":", "2:5:1") = B;
cout << A << endl;
A(2, ":", "2:5:1") = 999;
cout << A << endl;
// note this is the same as
// A(ac(2),ac::all(),ac::range(2,5,1)) = B;
// A[{ac(2),ac::all(),ac::range(2,5,1)}] = B; // note that braket{}
// [Method 2] Using low-level API set():
//--------------------------------------
A.set({ac(2), ac::all(), ac::range(2, 5, 1)}, B);
cout << A << endl;
A.set({ac(2), ac::all(), ac::range(0, 2, 1)}, 999);
cout << A << endl;
return 0;
}

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]



Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [4.50000e+01 4.60000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [5.00000e+01 5.10000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [5.50000e+01 5.60000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]]]

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = arange(60).reshape(3,4,5)
print(A)
B = zeros([4,3])
print(B)
A[2,:,2:5:1] = B
print(A)
A[0,:,0:2:1] = 999
print(A)

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]




Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[9.99000e+02 9.99000e+02 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [9.99000e+02 9.99000e+02 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [9.99000e+02 9.99000e+02 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [9.99000e+02 9.99000e+02 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]

◆ shape() [1/2]

const std::vector< cytnx_uint64 > & cytnx::Tensor::shape ( ) const
inline

the shape of the Tensor

Returns
[std::vector<cytnx_uint64>] the shape of the Tensor

◆ shape() [2/2]

const std::vector< cytnx_uint64 > & cytnx::Tensor::shape ( ) const
inline

the shape of the Tensor

Returns
[std::vector<cytnx_uint64>] the shape of the Tensor

◆ storage() [1/2]

Storage & cytnx::Tensor::storage ( ) const
inline

return the storage of current Tensor.

Returns
[Storage]
Note
The return storage shares the same instance of the storage of current Tensor. Use Storage.clone() to create a new instance of the returned Storage.

◆ storage() [2/2]

Storage & cytnx::Tensor::storage ( ) const
inline

return the storage of current Tensor.

Returns
[Storage]
Note
The return storage shares the same instance of the storage of current Tensor. Use Storage.clone() to create a new instance of the returned Storage.

◆ Sub() [1/2]

template<class T >
Tensor cytnx::Tensor::Sub ( const T rhs)
inline

Subtraction function with a Tensor or a scalar. Same as cytnx::operator-(const Tensor &self, const T &rhs).

Parameters
[in]rhsthe subtracted Tensor or scalar.

◆ Sub() [2/2]

template<class T >
Tensor cytnx::Tensor::Sub ( const T rhs)
inline

Subtraction function with a Tensor or a scalar. Same as cytnx::operator-(const Tensor &self, const T &rhs).

Parameters
[in]rhsthe subtracted Tensor or scalar.

◆ Sub_() [1/2]

template<class T >
Tensor & cytnx::Tensor::Sub_ ( const T rhs)
inline

Subtraction function with a Tensor or a scalar, inplacely. Same as operator-=(const T &rhs).

Parameters
[in]rhsthe subtracted Tensor or scalar.

◆ Sub_() [2/2]

template<class T >
Tensor & cytnx::Tensor::Sub_ ( const T rhs)
inline

Subtraction function with a Tensor or a scalar, inplacely. Same as operator-=(const T &rhs).

Parameters
[in]rhsthe subtracted Tensor or scalar.

◆ Svd() [1/2]

std::vector< Tensor > cytnx::Tensor::Svd ( const bool is_UvT = true) const

the SVD member function. Same as cytnx::linalg::Svd(const Tensor &Tin, const bool &is_UvT) , where Tin is the current Tensor.

◆ Svd() [2/2]

std::vector< Tensor > cytnx::Tensor::Svd ( const bool is_UvT = true) const

the SVD member function. Same as cytnx::linalg::Svd(const Tensor &Tin, const bool &is_UvT) , where Tin is the current Tensor.

◆ to() [1/2]

Tensor cytnx::Tensor::to ( const int device) const
inline

copy a tensor to new device

Parameters
[in]devicethe device-id that is moving to. it can be any device defined in cytnx::Device
Returns
[Tensor]

description:
if the device-id is the same as current Tensor's device, then return self.
otherwise, return a copy of instance that located on the target device.
see also: Tensor.to_

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A({3, 4, 5});
// move the tensor to different device by creating a clone object
Tensor B = A.to(Device.cuda + 0);
cout << B.device_str() << endl;
cout << A.device_str() << endl;
return 0;
}
std::string device_str() const
the device (in string) of the Tensor
Definition Tensor.hpp:535
Tensor to(const int &device) const
copy a tensor to new device
Definition Tensor.hpp:592

output>

 

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = Tensor([3,4,5])
B = A.to(Device.cuda+0);
print(B.device_str())
print(A.device_str())

output>

cytnx device: CUDA/GPU-id:0
cytnx device: CPU

◆ to() [2/2]

Tensor cytnx::Tensor::to ( const int device) const
inline

copy a tensor to new device

Parameters
[in]devicethe device-id that is moving to. it can be any device defined in cytnx::Device
Returns
[Tensor]

description:
if the device-id is the same as current Tensor's device, then return self.
otherwise, return a copy of instance that located on the target device.
see also: Tensor.to_

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A({3, 4, 5});
// move the tensor to different device by creating a clone object
Tensor B = A.to(Device.cuda + 0);
cout << B.device_str() << endl;
cout << A.device_str() << endl;
return 0;
}

output>

 

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = Tensor([3,4,5])
B = A.to(Device.cuda+0);
print(B.device_str())
print(A.device_str())

output>

cytnx device: CUDA/GPU-id:0
cytnx device: CPU

◆ to_() [1/2]

void cytnx::Tensor::to_ ( const int device)
inline

move the current Tensor to the device.

Parameters
[in]devicethe device-id that is moving to. it can be any device defined in cytnx::Device

description:
see also: Tensor.to

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A({3, 4, 5});
// move the instance tensor to different device
A.to_(Device.cuda + 0);
cout << A.device_str() << endl;
return 0;
}
void to_(const int &device)
move the current Tensor to the device.
Definition Tensor.hpp:615

output>

 

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = Tensor([3,4,5])
A.to_(Device.cuda+0);
print(A.device_str())

output>

cytnx device: CUDA/GPU-id:0

◆ to_() [2/2]

void cytnx::Tensor::to_ ( const int device)
inline

move the current Tensor to the device.

Parameters
[in]devicethe device-id that is moving to. it can be any device defined in cytnx::Device

description:
see also: Tensor.to

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main() {
Tensor A({3, 4, 5});
// move the instance tensor to different device
A.to_(Device.cuda + 0);
cout << A.device_str() << endl;
return 0;
}

output>

 

python API:

import sys
from pathlib import Path
home = str(Path.home())
sys.path.append(home + '/Cytnx_lib')
from cytnx import *
A = Tensor([3,4,5])
A.to_(Device.cuda+0);
print(A.device_str())

output>

cytnx device: CUDA/GPU-id:0

◆ Tofile() [1/6]

void cytnx::Tensor::Tofile ( const char fname) const

◆ Tofile() [2/6]

void cytnx::Tensor::Tofile ( const char fname) const

◆ Tofile() [3/6]

void cytnx::Tensor::Tofile ( const std::string &  fname) const

Save current Tensor to the binary file.

This function will save the Tensor to the binary file with file name fname .

Parameters
fname[in]the file name of the binary file.
Precondition
The file name fname must be valid.
See also
cytnx::Tensor::Fromfile

◆ Tofile() [4/6]

void cytnx::Tensor::Tofile ( const std::string &  fname) const

Save current Tensor to the binary file.

This function will save the Tensor to the binary file with file name fname .

Parameters
fname[in]the file name of the binary file.
Precondition
The file name fname must be valid.
See also
cytnx::Tensor::Fromfile

◆ Tofile() [5/6]

void cytnx::Tensor::Tofile ( std::fstream &  f) const

◆ Tofile() [6/6]

void cytnx::Tensor::Tofile ( std::fstream &  f) const

◆ Trace() [1/2]

Tensor cytnx::Tensor::Trace ( const cytnx_uint64 a = 0,
const cytnx_uint64 b = 1 
) const

the Trace member function. Same as linalg::Trace(const Tensor &Tin, const cytnx_uint64 &a, const cytnx_uint64 &b), where Tin is the current Tensor.

◆ Trace() [2/2]

Tensor cytnx::Tensor::Trace ( const cytnx_uint64 a = 0,
const cytnx_uint64 b = 1 
) const

the Trace member function. Same as linalg::Trace(const Tensor &Tin, const cytnx_uint64 &a, const cytnx_uint64 &b), where Tin is the current Tensor.


The documentation for this class was generated from the following files: