Cytnx v0.7.3
Loading...
Searching...
No Matches
Public Member Functions | Static Public Member Functions | List of all members
cytnx::Tensor Class Reference

an tensor (multi-dimensional array) More...

#include <Tensor.hpp>

Public Member Functions

void Save (const std::string &fname) const
 Save current Tensor to file.
 
void Save (const char *fname) const
 
void Tofile (const std::string &fname) const
 
void Tofile (const char *fname) const
 
void Tofile (std::fstream &f) const
 
void Init (const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1)
 initialize a Tensor
 
 Tensor (const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1)
 
unsigned int dtype () const
 the dtype-id of the Tensor
 
int device () const
 the device-id of the Tensor
 
std::string dtype_str () const
 the dtype (in string) of the Tensor
 
std::string device_str () const
 the device (in string) of the Tensor
 
const std::vector< cytnx_uint64 > & shape () const
 the shape of the Tensor
 
cytnx_uint64 rank () const
 the rank of the Tensor
 
Tensor clone () const
 return a clone of the current Tensor.
 
Tensor to (const int &device) const
 copy a tensor to new device
 
void to_ (const int &device)
 move the current Tensor to the device.
 
const boolis_contiguous () const
 
Tensor permute_ (const std::vector< cytnx_uint64 > &rnks)
 
Tensor permute (const std::vector< cytnx_uint64 > &rnks) const
 perform tensor permute on the cytnx::Tensor and return a new instance.
 
Tensor contiguous () const
 Make the Tensor contiguous by coalescing the memory (storage).
 
Tensor contiguous_ ()
 Make the Tensor contiguous by coalescing the memory (storage), inplacely.
 
void reshape_ (const std::vector< cytnx_int64 > &new_shape)
 reshape the Tensor, inplacely
 
Tensor reshape (const std::vector< cytnx_int64 > &new_shape) const
 return a new Tensor that is reshaped.
 
Tensor astype (const int &new_type) const
 return a new Tensor that cast to different dtype.
 
template<class T >
Tat (const std::vector< cytnx_uint64 > &locator)
 [C++ only] get an element at specific location.
 
template<class T >
const Tat (const std::vector< cytnx_uint64 > &locator) const
 
template<class T >
Titem ()
 get an from a rank-0 Tensor
 
Tensor get (const std::vector< cytnx::Accessor > &accessors) const
 get elements using Accessor (C++ API) / slices (python API)
 
void set (const std::vector< cytnx::Accessor > &accessors, const Tensor &rhs)
 set elements with the input Tensor using Accessor (C++ API) / slices (python API)
 
template<class T >
void set (const std::vector< cytnx::Accessor > &accessors, const T &rc)
 set elements with the input constant using Accessor (C++ API) / slices (python API)
 
Storagestorage () const
 return the storage of current Tensor.
 
template<class T >
void fill (const T &val)
 fill all the element of current Tensor with the value.
 
bool equiv (const Tensor &rhs)
 
Tensor real ()
 
Tensor imag ()
 
template<class T >
Tensoroperator+= (const T &rc)
 
template<class T >
Tensoroperator-= (const T &rc)
 
template<class T >
Tensoroperator*= (const T &rc)
 
template<class T >
Tensoroperator/= (const T &rc)
 
template<class T >
Tensor Add (const T &rhs)
 
template<class T >
TensorAdd_ (const T &rhs)
 
template<class T >
Tensor Sub (const T &rhs)
 
template<class T >
TensorSub_ (const T &rhs)
 
template<class T >
Tensor Mul (const T &rhs)
 
template<class T >
TensorMul_ (const T &rhs)
 
template<class T >
Tensor Div (const T &rhs)
 
template<class T >
TensorDiv_ (const T &rhs)
 
template<class T >
Tensor Cpr (const T &rhs)
 
template<class T >
Tensor Mod (const T &rhs)
 
Tensor operator- ()
 
Tensor flatten () const
 
void flatten_ ()
 
void append (const Tensor &rhs)
 
void append (const Storage &srhs)
 
template<class T >
void append (const T &rhs)
 
bool same_data (const Tensor &rhs) const
 
std::vector< TensorSvd (const bool &is_U=true, const bool &is_vT=true) const
 
std::vector< TensorEigh (const bool &is_V=true, const bool &row_v=false) const
 
TensorInvM_ ()
 
Tensor InvM () const
 
TensorInv_ (const double &clip)
 
Tensor Inv (const double &clip) const
 
TensorConj_ ()
 
Tensor Conj () const
 
TensorExp_ ()
 
Tensor Exp () const
 
Tensor Norm () const
 
Tensor Pow (const cytnx_double &p) const
 
TensorPow_ (const cytnx_double &p)
 
Tensor Trace (const cytnx_uint64 &a, const cytnx_uint64 &b) const
 
Tensor Abs () const
 
TensorAbs_ ()
 
Tensor Max () const
 
Tensor Min () const
 
template<>
Tensoroperator+= (const Tensor &rc)
 
template<>
Tensoroperator+= (const cytnx_complex128 &rc)
 
template<>
Tensoroperator+= (const cytnx_complex64 &rc)
 
template<>
Tensoroperator+= (const cytnx_double &rc)
 
template<>
Tensoroperator+= (const cytnx_float &rc)
 
template<>
Tensoroperator+= (const cytnx_int64 &rc)
 
template<>
Tensoroperator+= (const cytnx_uint64 &rc)
 
template<>
Tensoroperator+= (const cytnx_int32 &rc)
 
template<>
Tensoroperator+= (const cytnx_uint32 &rc)
 
template<>
Tensoroperator+= (const cytnx_int16 &rc)
 
template<>
Tensoroperator+= (const cytnx_uint16 &rc)
 
template<>
Tensoroperator+= (const cytnx_bool &rc)
 
template<>
Tensoroperator+= (const Scalar &rc)
 
template<>
Tensoroperator-= (const Tensor &rc)
 
template<>
Tensoroperator-= (const cytnx_complex128 &rc)
 
template<>
Tensoroperator-= (const cytnx_complex64 &rc)
 
template<>
Tensoroperator-= (const cytnx_double &rc)
 
template<>
Tensoroperator-= (const cytnx_float &rc)
 
template<>
Tensoroperator-= (const cytnx_int64 &rc)
 
template<>
Tensoroperator-= (const cytnx_uint64 &rc)
 
template<>
Tensoroperator-= (const cytnx_int32 &rc)
 
template<>
Tensoroperator-= (const cytnx_uint32 &rc)
 
template<>
Tensoroperator-= (const cytnx_int16 &rc)
 
template<>
Tensoroperator-= (const cytnx_uint16 &rc)
 
template<>
Tensoroperator-= (const cytnx_bool &rc)
 
template<>
Tensoroperator-= (const Scalar &rc)
 
template<>
Tensoroperator*= (const Tensor &rc)
 
template<>
Tensoroperator*= (const cytnx_complex128 &rc)
 
template<>
Tensoroperator*= (const cytnx_complex64 &rc)
 
template<>
Tensoroperator*= (const cytnx_double &rc)
 
template<>
Tensoroperator*= (const cytnx_float &rc)
 
template<>
Tensoroperator*= (const cytnx_int64 &rc)
 
template<>
Tensoroperator*= (const cytnx_uint64 &rc)
 
template<>
Tensoroperator*= (const cytnx_int32 &rc)
 
template<>
Tensoroperator*= (const cytnx_uint32 &rc)
 
template<>
Tensoroperator*= (const cytnx_int16 &rc)
 
template<>
Tensoroperator*= (const cytnx_uint16 &rc)
 
template<>
Tensoroperator*= (const cytnx_bool &rc)
 
template<>
Tensoroperator*= (const Scalar &rc)
 
template<>
Tensoroperator/= (const Tensor &rc)
 
template<>
Tensoroperator/= (const cytnx_complex128 &rc)
 
template<>
Tensoroperator/= (const cytnx_complex64 &rc)
 
template<>
Tensoroperator/= (const cytnx_double &rc)
 
template<>
Tensoroperator/= (const cytnx_float &rc)
 
template<>
Tensoroperator/= (const cytnx_int64 &rc)
 
template<>
Tensoroperator/= (const cytnx_uint64 &rc)
 
template<>
Tensoroperator/= (const cytnx_int32 &rc)
 
template<>
Tensoroperator/= (const cytnx_uint32 &rc)
 
template<>
Tensoroperator/= (const cytnx_int16 &rc)
 
template<>
Tensoroperator/= (const cytnx_uint16 &rc)
 
template<>
Tensoroperator/= (const cytnx_bool &rc)
 
template<>
Tensoroperator/= (const Scalar &rc)
 

Static Public Member Functions

static Tensor Load (const std::string &fname)
 Load current Tensor to file.
 
static Tensor Load (const char *fname)
 
static Tensor Fromfile (const std::string &fname, const unsigned int &dtype, const cytnx_int64 &count=-1)
 
static Tensor Fromfile (const char *fname, const unsigned int &dtype, const cytnx_int64 &count=-1)
 
static Tensor from_storage (const Storage &in)
 

Detailed Description

an tensor (multi-dimensional array)

Constructor & Destructor Documentation

◆ Tensor()

cytnx::Tensor::Tensor ( const std::vector< cytnx_uint64 > &  shape,
const unsigned int dtype = Type.Double,
const int device = -1 
)
inline

Member Function Documentation

◆ Abs()

Tensor cytnx::Tensor::Abs ( ) const

◆ Abs_()

Tensor & cytnx::Tensor::Abs_ ( )

◆ Add()

template<class T >
Tensor cytnx::Tensor::Add ( const T rhs)
inline

◆ Add_()

template<class T >
Tensor & cytnx::Tensor::Add_ ( const T rhs)
inline

◆ append() [1/3]

void cytnx::Tensor::append ( const Storage srhs)
inline

◆ append() [2/3]

template<class T >
void cytnx::Tensor::append ( const T rhs)
inline

◆ append() [3/3]

void cytnx::Tensor::append ( const Tensor rhs)
inline

◆ astype()

Tensor cytnx::Tensor::astype ( const int new_type) const
inline

return a new Tensor that cast to different dtype.

Parameters
new_typethe new dtype. It can be any type defined in cytnx::Type
Returns
[Tensor]

Note:

If the new_type is the same as dtype of the current Tensor, return self.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main(){
Tensor A = zeros({3,4,5},Type.Double);
cout << A;
Tensor B = A.astype(Type.Uint64);
cout << B;
// the new type is the same as current dtype, return self.
Tensor C = A.astype(Type.Double);
cout << is(C,A) << endl; // this should be true.
return 0;
}
an tensor (multi-dimensional array)
Definition Tensor.hpp:289
Tensor astype(const int &new_type) const
return a new Tensor that cast to different dtype.
Definition Tensor.hpp:944
int device() const
the device-id of the Tensor
Definition Tensor.hpp:668
Definition Accessor.hpp:12
Tensor zeros(const cytnx_uint64 &Nelem, const unsigned int &dtype=Type.Double, const int &device=Device.cpu)
create an rank-1 Tensor with all the elements are initialized with zero.
Definition Generator.cpp:10
Type_class Type
Definition Type.cpp:137

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]]


Total elem: 60
type  : Uint64
cytnx device: CPU
Shape : (3,4,5)
[[[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]]

1

python API:

from cytnx import *
A = zeros([3,4,5],dtype=Type.Double)
print(A)
B = A.astype(Type.Uint64)
print(B)
C = A.astype(Type.Double)
print(C is A)


output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]]




Total elem: 60
type  : Uint64
cytnx device: CPU
Shape : (3,4,5)
[[[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]]



True

◆ at() [1/2]

template<class T >
T & cytnx::Tensor::at ( const std::vector< cytnx_uint64 > &  locator)
inline

[C++ only] get an element at specific location.

Parameters
locatorthe location of the element
Returns
[ref]

Note:

  1. This is for C++ API only!
  2. need template instantiation to resolve the type, which should be consist with the dtype of the Tensor. An error will be issued if the template type is inconsist with the current dtype of Tensor.
  3. For python API, use [] directly to get element.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main(){
Tensor A = arange(30,Type.Float).reshape(2,3,5);
cout << A << endl;
// note that type resolver should be consist with the dtype
cout << A.at<cytnx_float>(0,0,2) << endl;
// the return is a ref., can be modify directly.
A.at<cytnx_float>(0,0,2) = 999;
cout << A.at<cytnx_float>(0,0,2) << endl;
// [Note] there are two way to give argument:
// Method 1: more like 'c++' way:
// (alternatively, you can also simply give a std::vector)
A.at<cytnx_float>({0,0,2}); // note the braket{}
// Method 2: more like 'python' way:
A.at<cytnx_float>(0,0,2);
return 0;
}
T & at(const std::vector< cytnx_uint64 > &locator)
[C++ only] get an element at specific location.
Definition Tensor.hpp:976
Tensor reshape(const std::vector< cytnx_int64 > &new_shape) const
return a new Tensor that is reshaped.
Definition Tensor.hpp:913
float cytnx_float
Definition Type.hpp:21
Tensor arange(const cytnx_int64 &Nelem)
create an rank-1 Tensor with incremental unsigned integer elements start with [0,Nelem)
Definition Generator.cpp:68

output>

Total elem: 30
type  : Float32
cytnx device: CPU
Shape : (2,3,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]]
 [[1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]
  [2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]]]


2
999

◆ at() [2/2]

template<class T >
const T & cytnx::Tensor::at ( const std::vector< cytnx_uint64 > &  locator) const
inline

◆ clone()

Tensor cytnx::Tensor::clone ( ) const
inline

return a clone of the current Tensor.

Returns
[Tensor]

description:
In C++ API, the behavior of assignment operator is designed to have same behavior as python,
to have a copy of the current tensor, we call clone to return a copy.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main(){
Tensor A({3,4,5});
Tensor B = A; // B shares same object with A
Tensor C = A.clone(); // C is a copy of A
// use is() to check if two variable shares same object
cout << is(B,A) << endl;
cout << is(C,A) << endl;
return 0;
}
Tensor clone() const
return a clone of the current Tensor.
Definition Tensor.hpp:719

output>

1
0

python API:

from cytnx import *
A = Tensor([3,4,5])
B = A
C = A.clone()
print(B is A)
print(C is A)


output>

True
False

◆ Conj()

Tensor cytnx::Tensor::Conj ( ) const

◆ Conj_()

Tensor & cytnx::Tensor::Conj_ ( )

◆ contiguous()

Tensor cytnx::Tensor::contiguous ( ) const
inline

Make the Tensor contiguous by coalescing the memory (storage).

Returns
[Tensor] a new Tensor that is with contiguous memory (storage).

See also Tensor::contiguous_()

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main(){
Tensor A({3,4,5});
cout << A.shape() << endl;
Tensor B = A.permute({0,2,1});
cout << B.shape() << endl;
//[Note] permute will not actually move the internal memory (storage) layout.
// this is called non-contiguous status.
// the memory layout will only move when Tensor.contiguous() is called.
Tensor C = B.contiguous(); //actual moving the memory
cout << B.is_contiguous() << endl; // false.
cout << C.is_contiguous() << endl; // true.
cout << C.shape() << endl;
cout << C.same_data(B) << endl; //false
cout << B.same_data(A) << endl; //true
return 0;
}
Tensor contiguous() const
Make the Tensor contiguous by coalescing the memory (storage).
Definition Tensor.hpp:834
const bool & is_contiguous() const
Definition Tensor.hpp:772
Tensor permute(const std::vector< cytnx_uint64 > &rnks) const
perform tensor permute on the cytnx::Tensor and return a new instance.
Definition Tensor.hpp:804
const std::vector< cytnx_uint64 > & shape() const
the shape of the Tensor
Definition Tensor.hpp:689

output>

Vector Print:
Total Elements:3
[3, 4, 5]

Vector Print:
Total Elements:3
[3, 5, 4]

0
1
Vector Print:
Total Elements:3
[3, 5, 4]

0
1

python API:

from cytnx import *
A = Tensor([3,4,5])
print(A.shape())
B = A.permute(0,2,1)
print(B.shape())
C = B.contiguous()
print(B.is_contiguous()) #false
print(C.is_contiguous()) #true
print(C.shape())


output>

[3, 4, 5]
[3, 5, 4]
False
True
[3, 5, 4]

◆ contiguous_()

Tensor cytnx::Tensor::contiguous_ ( )
inline

Make the Tensor contiguous by coalescing the memory (storage), inplacely.

See also Tensor::contiguous()

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main(){
Tensor A({3,4,5});
cout << A.shape() << endl;
Tensor B = A.permute({0,2,1});
cout << B.shape() << endl;
//[Note] permute will not actually move the internal memory (storage) layout.
// this is called non-contiguous status.
// the memory layout will only move when Tensor.contiguous() is called.
cout << B.is_contiguous() << endl; // false.
B.contiguous_(); //actual moving the memory
cout << B.is_contiguous() << endl; // true.
return 0;
}

output>

Vector Print:
Total Elements:3
[3, 4, 5]

Vector Print:
Total Elements:3
[3, 5, 4]

0
1

python API:

from cytnx import *
A = Tensor([3,4,5])
print(A.shape())
B = A.permute(0,2,1)
print(B.shape())
print(B.is_contiguous()) #false
B.contiguous_()
print(B.is_contiguous()) #true


output>

[3, 4, 5]
[3, 5, 4]
False
True

◆ Cpr()

template<class T >
Tensor cytnx::Tensor::Cpr ( const T rhs)
inline

◆ device()

int cytnx::Tensor::device ( ) const
inline

the device-id of the Tensor

Returns
[cytnx_int64] the device_id of the Tensor

◆ device_str()

std::string cytnx::Tensor::device_str ( ) const
inline

the device (in string) of the Tensor

Returns
[std::string] the device of the Tensor

◆ Div()

template<class T >
Tensor cytnx::Tensor::Div ( const T rhs)
inline

◆ Div_()

template<class T >
Tensor & cytnx::Tensor::Div_ ( const T rhs)
inline

◆ dtype()

unsigned int cytnx::Tensor::dtype ( ) const
inline

the dtype-id of the Tensor

Returns
[cytnx_uint64] the dtype_id of the Tensor

◆ dtype_str()

std::string cytnx::Tensor::dtype_str ( ) const
inline

the dtype (in string) of the Tensor

Returns
[std::string] the dtype of the Tensor

◆ Eigh()

std::vector< Tensor > cytnx::Tensor::Eigh ( const bool is_V = true,
const bool row_v = false 
) const

◆ equiv()

bool cytnx::Tensor::equiv ( const Tensor rhs)
inline

◆ Exp()

Tensor cytnx::Tensor::Exp ( ) const

◆ Exp_()

Tensor & cytnx::Tensor::Exp_ ( )

◆ fill()

template<class T >
void cytnx::Tensor::fill ( const T val)
inline

fill all the element of current Tensor with the value.

Parameters
valthe assigned value

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main(){
Tensor A = arange(60).reshape({3,4,5});
cout << A << endl;
A.fill(999);
cout << A << endl;
return 0;
}
void fill(const T &val)
fill all the element of current Tensor with the value.
Definition Tensor.hpp:1159

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]]
 [[9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]]
 [[9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]]]


python API

from cytnx import *
A = arange(60).reshape(3,4,5)
print(A)
A.fill(999)
print(A)


output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]]
 [[9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]]
 [[9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 9.99000e+02 ]]]



◆ flatten()

Tensor cytnx::Tensor::flatten ( ) const
inline

◆ flatten_()

void cytnx::Tensor::flatten_ ( )
inline

◆ from_storage()

static Tensor cytnx::Tensor::from_storage ( const Storage in)
inlinestatic

◆ Fromfile() [1/2]

Tensor cytnx::Tensor::Fromfile ( const char fname,
const unsigned int dtype,
const cytnx_int64 count = -1 
)
static

◆ Fromfile() [2/2]

Tensor cytnx::Tensor::Fromfile ( const std::string &  fname,
const unsigned int dtype,
const cytnx_int64 count = -1 
)
static

◆ get()

Tensor cytnx::Tensor::get ( const std::vector< cytnx::Accessor > &  accessors) const
inline

get elements using Accessor (C++ API) / slices (python API)

Returns
[Tensor]

See also Accessor for cordinate with Accessor in C++ API.

Note:

  1. the return will be a new Tensor instance, which not share memory with the current Tensor.

Equivalently:

One can also using more intruisive way to get the slice using [] operator.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main(){
typedef Accessor ac;
/*
In C++ API, generally, there are two ways you can access Tensor.
1. Using operator():
This is more of 'python' way.
2. Using low-level API or operator[]:
This is more of 'c++' way.
*/
Tensor A = arange(60).reshape({3,4,5});
cout << A << endl;
//Method 1, Using operator():
//-----------------------------------------
Tensor B = A(2,":","2:5:1");
cout << B << endl;
/* [Note]
This is equivalent as:
> Tensor B = A(2,ac::all(),ac::range(2,5,1));
See also cytnx::Accessor.
*/
//Method 2, Using operator[] or low-level API get():
//----------------------------------------
Tensor B2 = A[{ac(2),ac::all(),ac::range(2,5,1)}]; // remember the {}braket
cout << B2 << endl;
/* [Note]
You can also use the low-level API get() as
> Tensor B2 = A.get({ac(2),ac::all(),ac::range(2,5,1)});
*/
return 0;
}
object that mimic the python slice to access elements in C++ [this is for c++ API only].
Definition Accessor.hpp:16
static Accessor all()
access the whole rank, this is similar to [:] in python
Definition Accessor.hpp:125
static Accessor range(const cytnx_int64 &min, const cytnx_int64 &max, const cytnx_int64 &step=1)
access the range at assigned rank, this is similar to min:max:step in python
Definition Accessor.hpp:147
Accessor ac
Definition SparseUniTensor.cpp:11

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]



Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[4.20000e+01 4.30000e+01 4.40000e+01 ]
 [4.70000e+01 4.80000e+01 4.90000e+01 ]
 [5.20000e+01 5.30000e+01 5.40000e+01 ]
 [5.70000e+01 5.80000e+01 5.90000e+01 ]]



Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[4.20000e+01 4.30000e+01 4.40000e+01 ]
 [4.70000e+01 4.80000e+01 4.90000e+01 ]
 [5.20000e+01 5.30000e+01 5.40000e+01 ]
 [5.70000e+01 5.80000e+01 5.90000e+01 ]]


python API:

from cytnx import *
A = arange(60).reshape(3,4,5)
print(A)
B = A[2,:,2:5:1]
print(B)


output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]




Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[4.20000e+01 4.30000e+01 4.40000e+01 ]
 [4.70000e+01 4.80000e+01 4.90000e+01 ]
 [5.20000e+01 5.30000e+01 5.40000e+01 ]
 [5.70000e+01 5.80000e+01 5.90000e+01 ]]



◆ imag()

Tensor cytnx::Tensor::imag ( )

◆ Init()

void cytnx::Tensor::Init ( const std::vector< cytnx_uint64 > &  shape,
const unsigned int dtype = Type.Double,
const int device = -1 
)
inline

initialize a Tensor

Parameters
shapethe shape of tensor.
dtypethe dtype of tensor. This can be any of type defined in cytnx::Type
devicethe device that tensor to be created. This can be cytnx::Device.cpu or cytnx::Device.cuda+<gpuid>

[Note]

  1. the content of Tensor created will be un-initialize! See zeros(), ones() or arange() for generating an Tensor.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main(){
/*
1. Create a Tensor with
shape (3,4,5),
dtype =Type.Double [default],
device=Device.cpu [default]
*/
Tensor A({3,4,5});
cout << A << endl;
/*
2. Create a Tensor with
shape (3,4,5),
dtype =Type.Uint64,
device=Device.cpu [default],
[Note] the dtype can be any one of the supported type.
*/
Tensor B({3,4,5},Type.Uint64);
cout << B << endl;
/*
3. Initialize a Tensor with
shape (3,4,5),
dtype =Type.Double,
device=Device.cuda+0, (on gpu with gpu-id=0)
[Note] the gpu device can be set with Device.cuda+<gpu-id>
*/
Tensor C({3,4,5},Type.Double,Device.cuda+0);
cout << C << endl;
//4. Create an empty Tensor, and init later
D.Init({3,4,5},Type.Double,Device.cpu);
return 0;
}
void Init(const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1)
initialize a Tensor
Definition Tensor.hpp:637
Device_class Device
Definition Device.cpp:105

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]]



Total elem: 60
type  : Uint64
cytnx device: CPU
Shape : (3,4,5)
[[[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CUDA/GPU-id:0
Shape : (3,4,5)
[[[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]]


python API:

from cytnx import *
#1. Create a Tensor with
# shape (3,4,5),
# dtype =Type.Double [default],
# device=Device.cpu [default]
A = Tensor([3,4,5])
print(A)
#2. Create a Tensor with
# shape (3,4,5),
# dtype =Type.Uint64,
# device=Device.cpu [default],
# [Note] the dtype can be any one of the supported type.
B = Tensor([3,4,5],dtype=Type.Uint64)
print(B)
#3. Initialize a Tensor with
# shape (3,4,5),
# dtype =Type.Double,
# device=Device.cuda+0, (on gpu with gpu-id=0)
# [Note] the gpu device can be set with Device.cuda+<gpu-id>
C = Tensor([3,4,5],dtype=Type.Double,device=Device.cuda+0);
print(C)
#4. Create an empty Tensor, and init later
D = Tensor()
D.Init([3,4,5],dtype=Type.Double,device=Device.cpu);


output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]]




Total elem: 60
type  : Uint64
cytnx device: CPU
Shape : (3,4,5)
[[[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]
 [[                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]
  [                  0                   0                   0                   0                   0 ]]]




Total elem: 60
type  : Double (Float64)
cytnx device: CUDA/GPU-id:0
Shape : (3,4,5)
[[[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]
 [[0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 0.00000e+00 ]]]



◆ Inv()

Tensor cytnx::Tensor::Inv ( const double clip) const

◆ Inv_()

Tensor & cytnx::Tensor::Inv_ ( const double clip)

◆ InvM()

Tensor cytnx::Tensor::InvM ( ) const

◆ InvM_()

Tensor & cytnx::Tensor::InvM_ ( )

◆ is_contiguous()

const bool & cytnx::Tensor::is_contiguous ( ) const
inline

◆ item()

template<class T >
T & cytnx::Tensor::item ( )
inline

get an from a rank-0 Tensor

Returns
[T]

Note:

  1. This can only be called on a rank-0 Tensor (scalar). For C++ API, a template instantiation of type is needed to resolve the type, which should be connsist with the dtype of the Tensor. An error will be issued if the template type if inconsist with the current dtype of Tensor.
  2. Although the return is by reference in C++ part, the return in python is not.
  3. From 2., We recommend user to use at<T> (C++ API) and [] (python API) to modify the value of the element to have consistant syntax across two languages.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main(){
Tensor A = ones(1,Type.Uint64);
cout << A << endl;
// note that type resolver should be consist with the dtype
cout << A.item<cytnx_uint64>() << endl;
return 0;
}
Tensor ones(const cytnx_uint64 &Nelem, const unsigned int &dtype=Type.Double, const int &device=Device.cpu)
create an rank-1 Tensor with all the elements are initialized with one.
Definition Generator.cpp:23
uint64_t cytnx_uint64
Definition Type.hpp:22

output>

Total elem: 1
type  : Uint64
cytnx device: CPU
Shape : (1)
[                  1 ]


1

python API:

from cytnx import *
A = ones(1,Type.Uint64)
print(A)
print(A.item())


output>

Total elem: 1
type  : Uint64
cytnx device: CPU
Shape : (1)
[                  1 ]



1

◆ Load() [1/2]

Tensor cytnx::Tensor::Load ( const char fname)
static

◆ Load() [2/2]

Tensor cytnx::Tensor::Load ( const std::string &  fname)
static

Load current Tensor to file.

Parameters
fnamefile name

description: load the Storage from file with file path specify with input param 'fname'

◆ Max()

Tensor cytnx::Tensor::Max ( ) const

◆ Min()

Tensor cytnx::Tensor::Min ( ) const

◆ Mod()

template<class T >
Tensor cytnx::Tensor::Mod ( const T rhs)
inline

◆ Mul()

template<class T >
Tensor cytnx::Tensor::Mul ( const T rhs)
inline

◆ Mul_()

template<class T >
Tensor & cytnx::Tensor::Mul_ ( const T rhs)
inline

◆ Norm()

Tensor cytnx::Tensor::Norm ( ) const

◆ operator*=() [1/14]

template<>
Tensor & cytnx::Tensor::operator*= ( const cytnx_bool rc)

◆ operator*=() [2/14]

◆ operator*=() [3/14]

◆ operator*=() [4/14]

◆ operator*=() [5/14]

template<>
Tensor & cytnx::Tensor::operator*= ( const cytnx_float rc)

◆ operator*=() [6/14]

template<>
Tensor & cytnx::Tensor::operator*= ( const cytnx_int16 rc)

◆ operator*=() [7/14]

template<>
Tensor & cytnx::Tensor::operator*= ( const cytnx_int32 rc)

◆ operator*=() [8/14]

template<>
Tensor & cytnx::Tensor::operator*= ( const cytnx_int64 rc)

◆ operator*=() [9/14]

◆ operator*=() [10/14]

◆ operator*=() [11/14]

◆ operator*=() [12/14]

template<>
Tensor & cytnx::Tensor::operator*= ( const Scalar rc)

◆ operator*=() [13/14]

template<class T >
Tensor & cytnx::Tensor::operator*= ( const T rc)

◆ operator*=() [14/14]

template<>
Tensor & cytnx::Tensor::operator*= ( const Tensor rc)

◆ operator+=() [1/14]

template<>
Tensor & cytnx::Tensor::operator+= ( const cytnx_bool rc)

◆ operator+=() [2/14]

◆ operator+=() [3/14]

◆ operator+=() [4/14]

◆ operator+=() [5/14]

template<>
Tensor & cytnx::Tensor::operator+= ( const cytnx_float rc)

◆ operator+=() [6/14]

template<>
Tensor & cytnx::Tensor::operator+= ( const cytnx_int16 rc)

◆ operator+=() [7/14]

template<>
Tensor & cytnx::Tensor::operator+= ( const cytnx_int32 rc)

◆ operator+=() [8/14]

template<>
Tensor & cytnx::Tensor::operator+= ( const cytnx_int64 rc)

◆ operator+=() [9/14]

◆ operator+=() [10/14]

◆ operator+=() [11/14]

◆ operator+=() [12/14]

template<>
Tensor & cytnx::Tensor::operator+= ( const Scalar rc)

◆ operator+=() [13/14]

template<class T >
Tensor & cytnx::Tensor::operator+= ( const T rc)

◆ operator+=() [14/14]

template<>
Tensor & cytnx::Tensor::operator+= ( const Tensor rc)

◆ operator-()

◆ operator-=() [1/14]

template<>
Tensor & cytnx::Tensor::operator-= ( const cytnx_bool rc)

◆ operator-=() [2/14]

◆ operator-=() [3/14]

◆ operator-=() [4/14]

◆ operator-=() [5/14]

template<>
Tensor & cytnx::Tensor::operator-= ( const cytnx_float rc)

◆ operator-=() [6/14]

template<>
Tensor & cytnx::Tensor::operator-= ( const cytnx_int16 rc)

◆ operator-=() [7/14]

template<>
Tensor & cytnx::Tensor::operator-= ( const cytnx_int32 rc)

◆ operator-=() [8/14]

template<>
Tensor & cytnx::Tensor::operator-= ( const cytnx_int64 rc)

◆ operator-=() [9/14]

◆ operator-=() [10/14]

◆ operator-=() [11/14]

◆ operator-=() [12/14]

template<>
Tensor & cytnx::Tensor::operator-= ( const Scalar rc)

◆ operator-=() [13/14]

template<class T >
Tensor & cytnx::Tensor::operator-= ( const T rc)

◆ operator-=() [14/14]

template<>
Tensor & cytnx::Tensor::operator-= ( const Tensor rc)

◆ operator/=() [1/14]

template<>
Tensor & cytnx::Tensor::operator/= ( const cytnx_bool rc)

◆ operator/=() [2/14]

◆ operator/=() [3/14]

◆ operator/=() [4/14]

◆ operator/=() [5/14]

template<>
Tensor & cytnx::Tensor::operator/= ( const cytnx_float rc)

◆ operator/=() [6/14]

template<>
Tensor & cytnx::Tensor::operator/= ( const cytnx_int16 rc)

◆ operator/=() [7/14]

template<>
Tensor & cytnx::Tensor::operator/= ( const cytnx_int32 rc)

◆ operator/=() [8/14]

template<>
Tensor & cytnx::Tensor::operator/= ( const cytnx_int64 rc)

◆ operator/=() [9/14]

◆ operator/=() [10/14]

◆ operator/=() [11/14]

◆ operator/=() [12/14]

template<>
Tensor & cytnx::Tensor::operator/= ( const Scalar rc)

◆ operator/=() [13/14]

template<class T >
Tensor & cytnx::Tensor::operator/= ( const T rc)

◆ operator/=() [14/14]

template<>
Tensor & cytnx::Tensor::operator/= ( const Tensor rc)

◆ permute()

Tensor cytnx::Tensor::permute ( const std::vector< cytnx_uint64 > &  rnks) const
inline

perform tensor permute on the cytnx::Tensor and return a new instance.

Parameters
rnksthe permute indices, should have No. of elements equal to the rank of tensor.
Returns
[Tensor] a permuted new Tensor

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main(){
Tensor A({3,4,5});
cout << A.shape() << endl;
Tensor B = A.permute({0,2,1});
cout << B.shape() << endl;
cout << is(B,A) << endl; // this should be false, different object.
cout << B.same_data(A) << endl; // this should be true, since no new pointer/memory is created.
return 0;
}

output>

Vector Print:
Total Elements:3
[3, 4, 5]

Vector Print:
Total Elements:3
[3, 5, 4]

0
1

python API:

from cytnx import *
A = Tensor([3,4,5])
print(A.shape())
B = A.permute(0,2,1)
print(B.shape())
print(B is A) #False
print(B.same_data(A)) #True


output>

[3, 4, 5]
[3, 5, 4]
False
True

◆ permute_()

Tensor cytnx::Tensor::permute_ ( const std::vector< cytnx_uint64 > &  rnks)
inline

◆ Pow()

Tensor cytnx::Tensor::Pow ( const cytnx_double p) const

◆ Pow_()

Tensor & cytnx::Tensor::Pow_ ( const cytnx_double p)

◆ rank()

cytnx_uint64 cytnx::Tensor::rank ( ) const
inline

the rank of the Tensor

Returns
[cytnx_uint64] the rank of the Tensor

◆ real()

Tensor cytnx::Tensor::real ( )

◆ reshape()

Tensor cytnx::Tensor::reshape ( const std::vector< cytnx_int64 > &  new_shape) const
inline

return a new Tensor that is reshaped.

Parameters
new_shapethe new shape of the Tensor.
Returns
[Tensor]

See also Tensor::reshape_()

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main(){
Tensor A = arange(60);
// there are two ways you can give argument to reshape:
// Method 1: more like 'C++' way:
Tensor B = A.reshape({5,12}); // note the braket{}
cout << A << endl;
cout << B << endl;
// Method 2: more like 'python' way:
Tensor B2 = A.reshape(5,12);
return 0;
}

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (5,12)
[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 ]
 [1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 ]
 [2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 ]
 [3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 ]
 [4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (5,12)
[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 ]
 [1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 ]
 [2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 ]
 [3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 ]
 [4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]


python API:

from cytnx import *
A = arange(60)
B = A.reshape(5,12)
print(A)
print(B)


output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (5,12)
[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 ]
 [1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 ]
 [2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 ]
 [3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 ]
 [4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (5,12)
[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 ]
 [1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 ]
 [2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 ]
 [3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 ]
 [4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]



◆ reshape_()

void cytnx::Tensor::reshape_ ( const std::vector< cytnx_int64 > &  new_shape)
inline

reshape the Tensor, inplacely

Parameters
new_shapethe new shape of the Tensor.

See also Tensor::reshape()

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main(){
Tensor A = arange(60);
cout << A << endl;
//there are two ways you can assign the argument:
// Method 1: more like 'c++' way:
A.reshape_({5,12}); //note the braket{}
cout << A << endl;
// Method 2: more like 'python' way:
A.reshape_(5,4,3);
cout << A << endl;
return 0;
}

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (60)
[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (5,12)
[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 ]
 [1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 ]
 [2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 ]
 [3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 ]
 [4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (5,4,3)
[[[0.00000e+00 1.00000e+00 2.00000e+00 ]
  [3.00000e+00 4.00000e+00 5.00000e+00 ]
  [6.00000e+00 7.00000e+00 8.00000e+00 ]
  [9.00000e+00 1.00000e+01 1.10000e+01 ]]
 [[1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 ]
  [1.80000e+01 1.90000e+01 2.00000e+01 ]
  [2.10000e+01 2.20000e+01 2.30000e+01 ]]
 [[2.40000e+01 2.50000e+01 2.60000e+01 ]
  [2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 ]
  [3.30000e+01 3.40000e+01 3.50000e+01 ]]
 [[3.60000e+01 3.70000e+01 3.80000e+01 ]
  [3.90000e+01 4.00000e+01 4.10000e+01 ]
  [4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 ]]
 [[4.80000e+01 4.90000e+01 5.00000e+01 ]
  [5.10000e+01 5.20000e+01 5.30000e+01 ]
  [5.40000e+01 5.50000e+01 5.60000e+01 ]
  [5.70000e+01 5.80000e+01 5.90000e+01 ]]]


python API:

from cytnx import *
A = arange(60)
print(A)
A.reshape_(5,12)
print(A)


output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (60)
[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (5,12)
[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 1.00000e+01 1.10000e+01 ]
 [1.20000e+01 1.30000e+01 1.40000e+01 1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 ]
 [2.40000e+01 2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 3.50000e+01 ]
 [3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 4.50000e+01 4.60000e+01 4.70000e+01 ]
 [4.80000e+01 4.90000e+01 5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]



◆ same_data()

bool cytnx::Tensor::same_data ( const Tensor rhs) const

◆ Save() [1/2]

void cytnx::Tensor::Save ( const char fname) const

◆ Save() [2/2]

void cytnx::Tensor::Save ( const std::string &  fname) const

Save current Tensor to file.

Parameters
fnamefile name

description: save the Storage to file with file path specify with input param 'fname' with postfix ".cytn"

◆ set() [1/2]

template<class T >
void cytnx::Tensor::set ( const std::vector< cytnx::Accessor > &  accessors,
const T rc 
)
inline

set elements with the input constant using Accessor (C++ API) / slices (python API)

Parameters
accessorsthe list(vector) of accessors.
rc[Const]

See also Tensor::fill for filling all elements with assigned constant.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main(){
typedef Accessor ac;
// Let's define two Tensor.
Tensor A = arange(60).reshape({3,4,5});
cout << A << endl;
Tensor B = zeros({4,3});
cout << B << endl;
// I. To set part of A with Tesnor B, or constant:
// [Method 1] Using direct assignment
//-------------------------------------
A(2,":","2:5:1") = B;
cout << A << endl;
A(2,":","2:5:1") = 999;
cout << A << endl;
// note this is the same as
// A(ac(2),ac::all(),ac::range(2,5,1)) = B;
// A[{ac(2),ac::all(),ac::range(2,5,1)}] = B; // note that braket{}
// [Method 2] Using low-level API set():
//--------------------------------------
A.set({ac(2),ac::all(),ac::range(2,5,1)},B);
cout << A << endl;
A.set({ac(2),ac::all(),ac::range(0,2,1)},999);
cout << A << endl;
return 0;
}
void set(const std::vector< cytnx::Accessor > &accessors, const Tensor &rhs)
set elements with the input Tensor using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1096

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]



Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [4.50000e+01 4.60000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [5.00000e+01 5.10000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [5.50000e+01 5.60000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]]]


python API:

from cytnx import *
A = arange(60).reshape(3,4,5)
print(A)
B = zeros([4,3])
print(B)
A[2,:,2:5:1] = B
print(A)
A[0,:,0:2:1] = 999
print(A)


output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]




Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[9.99000e+02 9.99000e+02 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [9.99000e+02 9.99000e+02 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [9.99000e+02 9.99000e+02 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [9.99000e+02 9.99000e+02 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]



◆ set() [2/2]

void cytnx::Tensor::set ( const std::vector< cytnx::Accessor > &  accessors,
const Tensor rhs 
)
inline

set elements with the input Tensor using Accessor (C++ API) / slices (python API)

Parameters
accessorsthe list(vector) of accessors.
rhs[Tensor]

Note:

the shape of the input Tensor should be the same as the shape that indicated using Accessor. The memory is not shared with the input Tensor.

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main(){
typedef Accessor ac;
// Let's define two Tensor.
Tensor A = arange(60).reshape({3,4,5});
cout << A << endl;
Tensor B = zeros({4,3});
cout << B << endl;
// I. To set part of A with Tesnor B, or constant:
// [Method 1] Using direct assignment
//-------------------------------------
A(2,":","2:5:1") = B;
cout << A << endl;
A(2,":","2:5:1") = 999;
cout << A << endl;
// note this is the same as
// A(ac(2),ac::all(),ac::range(2,5,1)) = B;
// A[{ac(2),ac::all(),ac::range(2,5,1)}] = B; // note that braket{}
// [Method 2] Using low-level API set():
//--------------------------------------
A.set({ac(2),ac::all(),ac::range(2,5,1)},B);
cout << A << endl;
A.set({ac(2),ac::all(),ac::range(0,2,1)},999);
cout << A << endl;
return 0;
}

output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]



Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [4.50000e+01 4.60000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [5.00000e+01 5.10000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]
  [5.50000e+01 5.60000e+01 9.99000e+02 9.99000e+02 9.99000e+02 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]



Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [9.99000e+02 9.99000e+02 0.00000e+00 0.00000e+00 0.00000e+00 ]]]


python API:

from cytnx import *
A = arange(60).reshape(3,4,5)
print(A)
B = zeros([4,3])
print(B)
A[2,:,2:5:1] = B
print(A)
A[0,:,0:2:1] = 999
print(A)


output>

Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 4.20000e+01 4.30000e+01 4.40000e+01 ]
  [4.50000e+01 4.60000e+01 4.70000e+01 4.80000e+01 4.90000e+01 ]
  [5.00000e+01 5.10000e+01 5.20000e+01 5.30000e+01 5.40000e+01 ]
  [5.50000e+01 5.60000e+01 5.70000e+01 5.80000e+01 5.90000e+01 ]]]




Total elem: 12
type  : Double (Float64)
cytnx device: CPU
Shape : (4,3)
[[0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]
 [0.00000e+00 0.00000e+00 0.00000e+00 ]]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[0.00000e+00 1.00000e+00 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [5.00000e+00 6.00000e+00 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [1.00000e+01 1.10000e+01 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [1.50000e+01 1.60000e+01 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]




Total elem: 60
type  : Double (Float64)
cytnx device: CPU
Shape : (3,4,5)
[[[9.99000e+02 9.99000e+02 2.00000e+00 3.00000e+00 4.00000e+00 ]
  [9.99000e+02 9.99000e+02 7.00000e+00 8.00000e+00 9.00000e+00 ]
  [9.99000e+02 9.99000e+02 1.20000e+01 1.30000e+01 1.40000e+01 ]
  [9.99000e+02 9.99000e+02 1.70000e+01 1.80000e+01 1.90000e+01 ]]
 [[2.00000e+01 2.10000e+01 2.20000e+01 2.30000e+01 2.40000e+01 ]
  [2.50000e+01 2.60000e+01 2.70000e+01 2.80000e+01 2.90000e+01 ]
  [3.00000e+01 3.10000e+01 3.20000e+01 3.30000e+01 3.40000e+01 ]
  [3.50000e+01 3.60000e+01 3.70000e+01 3.80000e+01 3.90000e+01 ]]
 [[4.00000e+01 4.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [4.50000e+01 4.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.00000e+01 5.10000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]
  [5.50000e+01 5.60000e+01 0.00000e+00 0.00000e+00 0.00000e+00 ]]]



◆ shape()

const std::vector< cytnx_uint64 > & cytnx::Tensor::shape ( ) const
inline

the shape of the Tensor

Returns
[std::vector<cytnx_uint64>] the shape of the Tensor

◆ storage()

Storage & cytnx::Tensor::storage ( ) const
inline

return the storage of current Tensor.

Returns
[Storage]

Note:

  1. The return storage shares the same instance of the storage of current Tensor. Use Storage.clone() to create a new instance of the returned Storage.

◆ Sub()

template<class T >
Tensor cytnx::Tensor::Sub ( const T rhs)
inline

◆ Sub_()

template<class T >
Tensor & cytnx::Tensor::Sub_ ( const T rhs)
inline

◆ Svd()

std::vector< Tensor > cytnx::Tensor::Svd ( const bool is_U = true,
const bool is_vT = true 
) const

◆ to()

Tensor cytnx::Tensor::to ( const int device) const
inline

copy a tensor to new device

Parameters
devicethe device-id that is moving to. it can be any device defined in cytnx::Device
Returns
[Tensor]

description:
if the device-id is the same as current Tensor's device, then return self.
otherwise, return a copy of instance that located on the target device.
see also: Tensor.to_

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main(){
Tensor A({3,4,5});
//move the tensor to different device by creating a clone object
Tensor B = A.to(Device.cuda+0);
cout << B.device_str() << endl;
cout << A.device_str() << endl;
return 0;
}
std::string device_str() const
the device (in string) of the Tensor
Definition Tensor.hpp:682
Tensor to(const int &device) const
copy a tensor to new device
Definition Tensor.hpp:745

output>

cytnx device: CUDA/GPU-id:0
cytnx device: CPU

python API:

from cytnx import *
A = Tensor([3,4,5])
B = A.to(Device.cuda+0);
print(B.device_str())
print(A.device_str())


output>

cytnx device: CUDA/GPU-id:0
cytnx device: CPU

◆ to_()

void cytnx::Tensor::to_ ( const int device)
inline

move the current Tensor to the device.

Parameters
devicethe device-id that is moving to. it can be any device defined in cytnx::Device

description:
see also: Tensor.to

Example:

c++ API:

#include "cytnx.hpp"
#include <iostream>
using namespace cytnx;
using namespace std;
int main(){
Tensor A({3,4,5});
// move the instance tensor to different device
A.to_(Device.cuda+0);
cout << A.device_str() << endl;
return 0;
}
void to_(const int &device)
move the current Tensor to the device.
Definition Tensor.hpp:768

output>

cytnx device: CUDA/GPU-id:0

python API:

from cytnx import *
A = Tensor([3,4,5])
A.to_(Device.cuda+0);
print(A.device_str())


output>

cytnx device: CUDA/GPU-id:0

◆ Tofile() [1/3]

void cytnx::Tensor::Tofile ( const char fname) const

◆ Tofile() [2/3]

void cytnx::Tensor::Tofile ( const std::string &  fname) const

◆ Tofile() [3/3]

void cytnx::Tensor::Tofile ( std::fstream &  f) const

◆ Trace()

Tensor cytnx::Tensor::Trace ( const cytnx_uint64 a,
const cytnx_uint64 b 
) const

The documentation for this class was generated from the following files: