Cytnx v1.0.0
Loading...
Searching...
No Matches
Tensor.hpp
Go to the documentation of this file.
1#ifndef CYTNX_TENSOR_H_
2#define CYTNX_TENSOR_H_
3
4#include "Type.hpp"
5#include "cytnx_error.hpp"
6#include "Device.hpp"
8#include <iostream>
9#include <fstream>
10#include "utils/dynamic_arg_resolver.hpp"
11#include "Accessor.hpp"
12#include <utility>
13#include <vector>
14#include <initializer_list>
15#include <string>
16
17#ifdef BACKEND_TORCH
18#else
19
20 #include "backend/Scalar.hpp"
21 #include "backend/Storage.hpp"
22 #include "backend/Tensor_impl.hpp"
23
24namespace cytnx {
25
26 class Tensor;
27
29 // [Note] these are fwd from linalg.hpp
30 template <class T>
31 Tensor operator+(const Tensor &lhs, const T &rc);
32 template <class T>
33 Tensor operator-(const Tensor &lhs, const T &rhs);
34 template <class T>
35 Tensor operator*(const Tensor &lhs, const T &rhs);
36 template <class T>
37 Tensor operator/(const Tensor &lhs, const T &rhs);
39
41 class Tensor {
42 private:
43 public:
45 // this is a proxy class to allow get/set element using [] as python!
46 struct Tproxy {
47 boost::intrusive_ptr<Tensor_impl> _insimpl;
48 std::vector<cytnx::Accessor> _accs;
49 Tproxy(boost::intrusive_ptr<Tensor_impl> _ptr, const std::vector<cytnx::Accessor> &accs)
50 : _insimpl(std::move(_ptr)), _accs(accs) {}
51
52 // when used to set elems:
53 const Tensor &operator=(const Tensor &rhs) {
54 this->_insimpl->set(_accs, rhs._impl);
55 return rhs;
56 }
57
58 template <class T>
59 const T &operator=(const T &rc) {
60 this->_insimpl->set(_accs, rc);
61 return rc;
62 }
63 const Tproxy &operator=(const Tproxy &rc) {
65 this->_insimpl->set(_accs, tmp._impl);
66 return rc;
67 }
68
69 template <class T>
70 Tensor operator+=(const T &rc) {
72 self._impl = _insimpl->get(_accs);
73 self += rc;
74 _insimpl->set(_accs, self._impl);
75 self._impl = this->_insimpl;
76 return self;
77 }
78 Tensor operator+=(const Tproxy &rc);
79
80 template <class T>
81 Tensor operator-=(const T &rc) {
83 self._impl = _insimpl->get(_accs);
84 self -= rc;
85 _insimpl->set(_accs, self._impl);
86 self._impl = this->_insimpl;
87 return self;
88 }
89 Tensor operator-=(const Tproxy &rc);
90
91 template <class T>
92 Tensor operator/=(const T &rc) {
94 self._impl = _insimpl->get(_accs);
95 self /= rc;
96 _insimpl->set(_accs, self._impl);
97 self._impl = this->_insimpl;
98 return self;
99 }
100 Tensor operator/=(const Tproxy &rc);
101
102 template <class T>
103 Tensor operator*=(const T &rc) {
104 Tensor self;
105 self._impl = _insimpl->get(_accs);
106 self *= rc;
107 _insimpl->set(_accs, self._impl);
108 self._impl = this->_insimpl;
109 return self;
110 }
111 Tensor operator*=(const Tproxy &rc);
112
113 // alias to resolve conflict with op ovld for rc=Tensor
114 /*
115 template<class T>
116 Tensor _operatorADD(const T &rc) const{
117 Tensor out;
118 out._impl = _insimpl->get(_accs);
119 return out.Add(rc);
120 }
121 */
122 Tensor operator+(const cytnx_complex128 &rc) const; //{return this->_operatorADD(rc);};
123 Tensor operator+(const cytnx_complex64 &rc) const; //{return this->_operatorADD(rc);};
124 Tensor operator+(const cytnx_double &rc) const; //{return this->_operatorADD(rc);};
125 Tensor operator+(const cytnx_float &rc) const; //{return this->_operatorADD(rc);};
126 Tensor operator+(const cytnx_uint64 &rc) const; //{return this->_operatorADD(rc);};
127 Tensor operator+(const cytnx_int64 &rc) const; //{return this->_operatorADD(rc);};
128 Tensor operator+(const cytnx_uint32 &rc) const; //{return this->_operatorADD(rc);};
129 Tensor operator+(const cytnx_int32 &rc) const; //{return this->_operatorADD(rc);};
130 Tensor operator+(const cytnx_uint16 &rc) const; //{return this->_operatorADD(rc);};
131 Tensor operator+(const cytnx_int16 &rc) const; //{return this->_operatorADD(rc);};
132 Tensor operator+(const cytnx_bool &rc) const; //{return this->_operatorADD(rc);};
133 Tensor operator+(const Tproxy &rc) const;
134
135 /*
136 template<class T>
137 Tensor _operatorSUB(const T &rc) const{
138 Tensor out;
139 out._impl = _insimpl->get(_accs);
140 return out.Sub(rc);
141 }
142 */
143 Tensor operator-(const cytnx_complex128 &rc) const; //{return this->_operatorSUB(rc);};
144 Tensor operator-(const cytnx_complex64 &rc) const; //{return this->_operatorSUB(rc);};
145 Tensor operator-(const cytnx_double &rc) const; //{return this->_operatorSUB(rc);};
146 Tensor operator-(const cytnx_float &rc) const; //{return this->_operatorSUB(rc);};
147 Tensor operator-(const cytnx_uint64 &rc) const; //{return this->_operatorSUB(rc);};
148 Tensor operator-(const cytnx_int64 &rc) const; //{return this->_operatorSUB(rc);};
149 Tensor operator-(const cytnx_uint32 &rc) const; //{return this->_operatorSUB(rc);};
150 Tensor operator-(const cytnx_int32 &rc) const; //{return this->_operatorSUB(rc);};
151 Tensor operator-(const cytnx_uint16 &rc) const; //{return this->_operatorSUB(rc);};
152 Tensor operator-(const cytnx_int16 &rc) const; //{return this->_operatorSUB(rc);};
153 Tensor operator-(const cytnx_bool &rc) const; //{return this->_operatorSUB(rc);};
154 Tensor operator-(const Tproxy &rc) const;
155
156 Tensor operator-() const;
157
158 /*
159 template<class T>
160 Tensor _operatorMUL(const T &rc) const{
161 Tensor out;
162 out._impl = _insimpl->get(_accs);
163 return out.Mul(rc);
164 }
165 */
166 Tensor operator*(const cytnx_complex128 &rc) const; //{return this->_operatorMUL(rc);};
167 Tensor operator*(const cytnx_complex64 &rc) const; //{return this->_operatorMUL(rc);};
168 Tensor operator*(const cytnx_double &rc) const; //{return this->_operatorMUL(rc);};
169 Tensor operator*(const cytnx_float &rc) const; //{return this->_operatorMUL(rc);};
170 Tensor operator*(const cytnx_uint64 &rc) const; //{return this->_operatorMUL(rc);};
171 Tensor operator*(const cytnx_int64 &rc) const; //{return this->_operatorMUL(rc);};
172 Tensor operator*(const cytnx_uint32 &rc) const; //{return this->_operatorMUL(rc);};
173 Tensor operator*(const cytnx_int32 &rc) const; //{return this->_operatorMUL(rc);};
174 Tensor operator*(const cytnx_uint16 &rc) const; //{return this->_operatorMUL(rc);};
175 Tensor operator*(const cytnx_int16 &rc) const; //{return this->_operatorMUL(rc);};
176 Tensor operator*(const cytnx_bool &rc) const; //{return this->_operatorMUL(rc);};
177 Tensor operator*(const Tproxy &rc) const;
178
179 /*
180 template<class T>
181 Tensor _operatorDIV(const T &rc) const{
182 Tensor out;
183 out._impl = _insimpl->get(_accs);
184 return out.Div(rc);
185 }
186 */
187 Tensor operator/(const cytnx_complex128 &rc) const; //{return this->_operatorDIV(rc);};
188 Tensor operator/(const cytnx_complex64 &rc) const; //{return this->_operatorDIV(rc);};
189 Tensor operator/(const cytnx_double &rc) const; //{return this->_operatorDIV(rc);};
190 Tensor operator/(const cytnx_float &rc) const; //{return this->_operatorDIV(rc);};
191 Tensor operator/(const cytnx_uint64 &rc) const; //{return this->_operatorDIV(rc);};
192 Tensor operator/(const cytnx_int64 &rc) const; //{return this->_operatorDIV(rc);};
193 Tensor operator/(const cytnx_uint32 &rc) const; //{return this->_operatorDIV(rc);};
194 Tensor operator/(const cytnx_int32 &rc) const; //{return this->_operatorDIV(rc);};
195 Tensor operator/(const cytnx_uint16 &rc) const; //{return this->_operatorDIV(rc);};
196 Tensor operator/(const cytnx_int16 &rc) const; //{return this->_operatorDIV(rc);};
197 Tensor operator/(const cytnx_bool &rc) const; //{return this->_operatorDIV(rc);};
198 Tensor operator/(const Tproxy &rc) const;
199
200 template <class T>
201 T item() const {
202 Tensor out;
203 out._impl = _insimpl->get(_accs);
204 return out.item<T>();
205 }
206
207 Scalar::Sproxy item() const {
208 Tensor out;
209 out._impl = _insimpl->get(_accs);
210 return out.item();
211 }
212
213 // when used to get elems:
214 operator Tensor() const {
215 Tensor out;
216 out._impl = _insimpl->get(_accs);
217 return out;
218 }
219
220 Storage storage() const {
221 Tensor out;
222 out._impl = _insimpl->get(_accs);
223 return out.storage();
224 }
225
226 }; // proxy class of Tensor.
227
229
231 // these two are using the python way!
232 //----------------------------------------
233 template <class... Ts>
234 Tproxy operator()(const std::string &e1, const Ts &...elems) {
235 // std::cout << e1 << std::endl;
236 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
237 return (*this)[tmp];
238 }
239 template <class... Ts>
240 Tproxy operator()(const cytnx_int64 &e1, const Ts &...elems) {
241 // std::cout << e1<< std::endl;
242 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
243 return (*this)[tmp];
244 }
245 template <class... Ts>
246 Tproxy operator()(const cytnx::Accessor &e1, const Ts &...elems) {
247 // std::cout << e1 << std::endl;
248 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
249 return (*this)[tmp];
250 }
251 template <class... Ts>
252 const Tproxy operator()(const std::string &e1, const Ts &...elems) const {
253 // std::cout << e1 << std::endl;
254 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
255 return (*this)[tmp];
256 }
257 template <class... Ts>
258 const Tproxy operator()(const cytnx_int64 &e1, const Ts &...elems) const {
259 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
260 return (*this)[tmp];
261 }
262 template <class... Ts>
263 const Tproxy operator()(const cytnx::Accessor &e1, const Ts &...elems) const {
264 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
265 return (*this)[tmp];
266 }
267
268 //-----------------------------------------
269
270 Tproxy operator[](const std::initializer_list<cytnx::Accessor> &accs) {
271 std::vector<cytnx::Accessor> tmp = accs;
272 return (*this)[tmp];
273 }
274 Tproxy operator[](const std::vector<cytnx::Accessor> &accs) {
275 return Tproxy(this->_impl, accs);
276 }
277
278 const Tproxy operator[](const std::vector<cytnx::Accessor> &accs) const {
279 return Tproxy(this->_impl, accs);
280 }
281 const Tproxy operator[](const std::initializer_list<cytnx::Accessor> &accs) const {
282 std::vector<cytnx::Accessor> tmp = accs;
283 return (*this)[tmp];
284 }
285
286 Tproxy operator[](const std::initializer_list<cytnx_int64> &accs) {
287 std::vector<cytnx_int64> tmp = accs;
288 return (*this)[tmp];
289 }
290 Tproxy operator[](const std::vector<cytnx_int64> &accs) {
291 std::vector<cytnx::Accessor> acc_in;
292 for (int i = 0; i < accs.size(); i++) {
293 acc_in.push_back(cytnx::Accessor(accs[i]));
294 }
295 return Tproxy(this->_impl, acc_in);
296 }
297 const Tproxy operator[](const std::initializer_list<cytnx_int64> &accs) const {
298 std::vector<cytnx_int64> tmp = accs;
299 return (*this)[tmp];
300 }
301 const Tproxy operator[](const std::vector<cytnx_uint64> &accs) const {
302 std::vector<cytnx::Accessor> acc_in;
303 for (int i = 0; i < accs.size(); i++) {
304 acc_in.push_back(cytnx::Accessor(accs[i]));
305 }
306 return Tproxy(this->_impl, acc_in);
307 }
308 const Tproxy operator[](const std::vector<cytnx_int64> &accs) const {
309 std::vector<cytnx::Accessor> acc_in;
310 for (int i = 0; i < accs.size(); i++) {
311 acc_in.push_back(cytnx::Accessor(accs[i]));
312 }
313 return Tproxy(this->_impl, acc_in);
314 }
316 //-------------------------------------------
317
319 void _Save(std::fstream &f) const;
320 void _Load(std::fstream &f);
321
323
332 void Save(const std::string &fname) const;
336 void Save(const char *fname) const;
337
346 void Tofile(const std::string &fname) const;
347
351 void Tofile(const char *fname) const;
352
356 void Tofile(std::fstream &f) const;
357
366 static Tensor Load(const std::string &fname);
370 static Tensor Load(const char *fname);
371
392 static Tensor Fromfile(const std::string &fname, const unsigned int &dtype,
393 const cytnx_int64 &count = -1);
394 static Tensor Fromfile(const char *fname, const unsigned int &dtype,
395 const cytnx_int64 &count = -1);
396
397 // static Tensor Frombinary(const std::string &fname);
398
400 boost::intrusive_ptr<Tensor_impl> _impl;
401 Tensor() : _impl(new Tensor_impl()){};
402 Tensor(const Tensor &rhs) { _impl = rhs._impl; }
403
404 /*
405 template<class Tp>
406 Tensor(const std::initializer_list<Tp> &rhs){
407 Storage stmp = std::vector<Tp>(rhs);
408 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
409 tmp->Init(stmp);
410 this->_impl = tmp;
411 }
412 */
413
414 Tensor &operator=(const Tensor &rhs) {
415 _impl = rhs._impl;
416 return *this;
417 }
418
419 void operator=(const Tproxy &rhsp) { // this is used to handle proxy assignment
420 this->_impl = rhsp._insimpl->get(rhsp._accs);
421 }
423
425 // default device==Device.cpu (-1)
450 void Init(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype = Type.Double,
451 const int &device = -1, const bool &init_zero = true) {
452 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
453 this->_impl = tmp;
454 this->_impl->Init(shape, dtype, device, init_zero);
455 }
456 // void Init(const Storage& storage) {
457 // boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
458 // this->_impl = tmp;
459 // this->_impl->Init(storage);
460 // }
461 // void Init(const Storage& storage, const std::vector<cytnx_uint64> &shape,
462 // const unsigned int &dtype = Type.Double, const int &device = -1) {
463 // boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
464 // this->_impl = tmp;
465 // this->_impl->Init(storage, shape, dtype, device);
466 // }
467
480 Tensor(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype = Type.Double,
481 const int &device = -1, const bool &init_zero = 1)
482 : _impl(new Tensor_impl()) {
483 this->Init(shape, dtype, device, init_zero);
484 }
485 // Tensor(const Storage& storage)
486 // : _impl(new Tensor_impl()) {
487 // this->Init(storage);
488 // }
489 // Tensor(const Storage& storage, const std::vector<cytnx_uint64> &shape,
490 // const unsigned int &dtype = Type.Double, const int &device = -1)
491 // : _impl(new Tensor_impl()) {
492 // this->Init(storage, shape, dtype, device);
493 // }
495
496 // This mechanism is to remove the 'void' type from Type_list. Taking advantage of it
497 // appearing first ...
498
500 struct internal {
501 template <typename Variant>
502 struct exclude_first;
503
504 template <typename First, typename... Rest>
505 struct exclude_first<std::variant<First, Rest...>> {
506 using type = std::variant<Rest...>;
507 };
508 }; // internal
510
511 // std::variant of pointers to Type_list, without void ....
514 std::add_pointer>;
515
516 // convert this->_impl->_storage._impl->Mem to a typed variant of pointers, excluding void*
518
519 // convert this->_impl->_strorage->Mem to the given pointer type.
520 // Throws an exception if T does not match this->dtype
521 template <typename T>
522 T *ptr_as() const {
523 cytnx_error_msg(this->dtype() != Type_class::cy_typeid_v<std::remove_cv_t<T>>,
524 "[ERROR] Attempt to convert dtype %d (%s) to pointer of type %s",
525 this->dtype(), Type_class::getname(this->dtype()).c_str(),
526 Type_class::getname(Type_class::cy_typeid_v<std::remove_cv_t<T>>).c_str());
527 return static_cast<T *>(this->_impl->_storage._impl->data());
528 }
529
530 #ifdef UNI_GPU
531 // std::variant of pointers to Type_list_gpu, without void ....
532 using gpu_pointer_types =
534 std::add_pointer>;
535
536 // convert this->_impl->_storage->Mem to a typed variant of pointers, excluding void*
538
539 // convert this->_impl->_strorage->Mem to the given pointer type.
540 // Throws an exception if T does not match this->dtype
541 template <typename T>
542 T *gpu_ptr_as() const {
544 this->dtype() != Type_class::cy_typeid_gpu_v<std::remove_cv_t<T>>,
545 "[ERROR] Attempt to convert dtype %d (%s) to GPU pointer of type %s", this->dtype(),
546 Type_class::getname(this->dtype()).c_str(),
547 Type_class::getname(Type_class::cy_typeid_gpu_v<std::remove_cv_t<T>>).c_str());
548 return static_cast<T *>(this->_impl->_storage._impl->data());
549 }
550 #endif
551
557 static Tensor from_storage(const Storage &in) {
558 Tensor out;
559 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
560 out._impl = tmp;
561 out._impl->Init(in);
562 return out;
563 }
564
570 unsigned int dtype() const { return this->_impl->dtype(); }
571
577 int device() const { return this->_impl->device(); }
578
584 std::string dtype_str() const { return this->_impl->dtype_str(); }
585
591 std::string device_str() const { return this->_impl->device_str(); }
592
597 const std::vector<cytnx_uint64> &shape() const { return this->_impl->shape(); }
598
603 cytnx_uint64 rank() const { return this->_impl->shape().size(); }
604
622 Tensor clone() const {
623 Tensor out;
624 out._impl = this->_impl->clone();
625 return out;
626 }
627
648 Tensor to(const int &device) const {
649 Tensor out;
650 out._impl = this->_impl->to(device);
651 return out;
652 }
653
671 void to_(const int &device) { this->_impl->to_(device); }
672
677 const bool &is_contiguous() const { return this->_impl->is_contiguous(); }
678
679 Tensor &permute_(const std::vector<cytnx_uint64> &rnks) {
680 this->_impl->permute_(rnks);
681 return *this;
682 }
684 template <class... Ts>
685 Tensor &permute_(const cytnx_uint64 &e1, const Ts &...elems) {
686 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1, elems...);
687 this->_impl->permute_(argv);
688 return *this;
689 }
691
710 Tensor permute(const std::vector<cytnx_uint64> &rnks) const {
711 Tensor out;
712 out._impl = this->_impl->permute(rnks);
713 return out;
714 }
716 template <class... Ts>
717 Tensor permute(const cytnx_uint64 &e1, const Ts &...elems) const {
718 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1, elems...);
719 return this->permute(argv);
720 }
722
739 Tensor out;
740 out._impl = this->_impl->contiguous();
741 return out;
742 }
743
759 this->_impl->contiguous_();
760 return *this;
761 }
762
784 Tensor &reshape_(const std::vector<cytnx_int64> &new_shape) {
785 this->_impl->reshape_(new_shape);
786 return *this;
787 }
789 Tensor &reshape_(const std::vector<cytnx_uint64> &new_shape) {
790 std::vector<cytnx_int64> shape(new_shape.begin(), new_shape.end());
791 this->_impl->reshape_(shape);
792 return *this;
793 }
794 Tensor &reshape_(const std::initializer_list<cytnx_int64> &new_shape) {
795 std::vector<cytnx_int64> shape = new_shape;
796 this->_impl->reshape_(shape);
797 return *this;
798 }
799 template <class... Ts>
800 Tensor &reshape_(const cytnx_int64 &e1, const Ts... elems) {
801 std::vector<cytnx_int64> shape = dynamic_arg_int64_resolver(e1, elems...);
802 // std::cout << shape << std::endl;
803 this->_impl->reshape_(shape);
804 return *this;
805 }
807
832 Tensor reshape(const std::vector<cytnx_int64> &new_shape) const {
833 Tensor out;
834 out._impl = this->_impl->reshape(new_shape);
835 return out;
836 }
837
841 Tensor reshape(const std::vector<cytnx_uint64> &new_shape) const {
842 std::vector<cytnx_int64> tmp(new_shape.size());
843 memcpy(&tmp[0], &new_shape[0], sizeof(cytnx_uint64) * new_shape.size());
844 Tensor out;
845 out._impl = this->_impl->reshape(tmp);
846 return out;
847 }
848
852 Tensor reshape(const std::initializer_list<cytnx_int64> &new_shape) const {
853 return this->reshape(std::vector<cytnx_int64>(new_shape));
854 }
855
857 template <class... Ts>
858 Tensor reshape(const cytnx_int64 &e1, const Ts &...elems) const {
859 std::vector<cytnx_int64> argv = dynamic_arg_int64_resolver(e1, elems...);
860 return this->reshape(argv);
861 }
863
885 Tensor astype(const int &new_type) const {
886 Tensor out;
887 out._impl = this->_impl->astype(new_type);
888 return out;
889 }
890
891 // Tensor diagonal(){
892 // for(unsigned int i=0;i<this->shape().size();i++){
893 // if(this->shape()[i] != this->shape()[0],"[ERROR] Tensor.diagonal() can only be called
894 // when the subject has equal dimension in each rank.%s","\n");
895 // }
896 //
897 // }
898
919 template <class T>
920 T &at(const std::vector<cytnx_uint64> &locator) {
921 return this->_impl->at<T>(locator);
922 }
923
927 template <class T>
928 const T &at(const std::vector<cytnx_uint64> &locator) const {
929 return this->_impl->at<T>(locator);
930 }
932 template <class T, class... Ts>
933 const T &at(const cytnx_uint64 &e1, const Ts &...elems) const {
934 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1, elems...);
935 return this->at<T>(argv);
936 }
937 template <class T, class... Ts>
938 T &at(const cytnx_uint64 &e1, const Ts &...elems) {
939 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1, elems...);
940 return this->at<T>(argv);
941 }
942
943 const Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) const {
944 return this->_impl->at(locator);
945 }
946
947 Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) { return this->_impl->at(locator); }
949
975 template <class T>
976 T &item() {
977 cytnx_error_msg(this->_impl->storage().size() != 1, "[ERROR][Tensor.item<T>]%s",
978 "item can only be called from a Tensor with only one element\n");
979 return this->_impl->storage().at<T>(0);
980 }
981
983 template <class T>
984 const T &item() const {
985 cytnx_error_msg(this->_impl->storage().size() != 1, "[ERROR][Tensor.item<T>]%s",
986 "item can only be called from a Tensor with only one element\n");
987 return this->_impl->storage().at<T>(0);
988 }
989
990 const Scalar::Sproxy item() const {
991 Scalar::Sproxy out(this->storage()._impl, 0);
992 return out;
993 }
994
995 Scalar::Sproxy item() {
996 Scalar::Sproxy out(this->storage()._impl, 0);
997 return out;
998 }
999
1001
1023 Tensor get(const std::vector<cytnx::Accessor> &accessors) const {
1024 Tensor out;
1025 out._impl = this->_impl->get(accessors);
1026 return out;
1027 }
1028
1029 /*
1030 Tensor get_v2(const std::vector<cytnx::Accessor> &accessors) const{
1031 Tensor out;
1032 out._impl = this->_impl->get_v2(accessors);
1033 return out;
1034 }
1035 */
1036
1055 void set(const std::vector<cytnx::Accessor> &accessors, const Tensor &rhs) {
1056 this->_impl->set(accessors, rhs._impl);
1057 }
1058
1077 template <class T>
1078 void set(const std::vector<cytnx::Accessor> &accessors, const T &rc) {
1079 this->_impl->set(accessors, rc);
1080 }
1082 template <class T>
1083 void set(const std::initializer_list<cytnx::Accessor> &accessors, const T &rc) {
1084 std::vector<cytnx::Accessor> args = accessors;
1085 this->set(args, rc);
1086 }
1088
1098 Storage &storage() const { return this->_impl->storage(); }
1099
1114 template <class T>
1115 void fill(const T &val) {
1116 this->_impl->fill(val);
1117 }
1118
1123 bool equivshape(const Tensor &rhs) {
1124 if (this->shape() != rhs.shape()) return false;
1125 return true;
1126 }
1127
1136
1145
1146 // Arithmic:
1163 template <class T>
1165
1182 template <class T>
1184
1201 template <class T>
1203
1221 template <class T>
1223
1224 // Tensor &operator+=(const Tproxy &rc);
1225 // Tensor &operator-=(const Tproxy &rc);
1226 // Tensor &operator*=(const Tproxy &rc);
1227 // Tensor &operator/=(const Tproxy &rc);
1228 /*
1229 Tensor operator+(const Tproxy &rc){
1230 return *this + Tensor(rc);
1231 }
1232 Tensor operator-(const Tproxy &rc){
1233 return *this - Tensor(rc);
1234 }
1235 Tensor operator*(const Tproxy &rc){
1236 return *this * Tensor(rc);
1237 }
1238 Tensor operator/(const Tproxy &rc){
1239 return *this / Tensor(rc);
1240 }
1241 */
1247 template <class T>
1248 Tensor Add(const T &rhs) {
1249 return *this + rhs;
1250 }
1251
1257 template <class T>
1258 Tensor &Add_(const T &rhs) {
1259 return *this += rhs;
1260 }
1261
1267 template <class T>
1268 Tensor Sub(const T &rhs) {
1269 return *this - rhs;
1270 }
1271
1277 template <class T>
1278 Tensor &Sub_(const T &rhs) {
1279 return *this -= rhs;
1280 }
1281
1287 template <class T>
1288 Tensor Mul(const T &rhs) {
1289 return *this * rhs;
1290 }
1291
1297 template <class T>
1298 Tensor &Mul_(const T &rhs) {
1299 return *this *= rhs;
1300 }
1301
1308 template <class T>
1309 Tensor Div(const T &rhs) {
1310 return *this / rhs;
1311 }
1312
1319 template <class T>
1320 Tensor &Div_(const T &rhs) {
1321 return *this /= rhs;
1322 }
1323
1330 template <class T>
1331 Tensor Cpr(const T &rhs) {
1332 return *this == rhs;
1333 }
1334
1335 // /**
1336 // * @brief Compare each element of the current tensor with the input tensor.
1337 // * @details This function Compare each element of the current tensor with the input tensor.
1338 // * @param[in] rhs the compared tensor.
1339 // */
1340 // bool approx_eq(const Tensor &rhs, const cytnx_double tol = 0) {
1341 // if (this->device() != rhs.device()) {
1342 // if (User_debug)
1343 // std::cout << "[approx_eq] Tensor device " << this->device()
1344 // << "not equal to rhs tensor device " << rhs.device() << std::endl;
1345 // return false;
1346 // }
1347 // // if (this->dtype() != rhs.dtype()) {
1348 // // std::cout << "[approx_eq] Tensor dtype " << this->dtype()
1349 // // << "not equal to rhs tensor dtype " << rhs.dtype() << std::endl;
1350 // // return false;
1351 // // }
1352 // if (this->shape() != rhs.shape()) {
1353 // if (User_debug)
1354 // std::cout << "[approx_eq] Tensor shape " << this->shape()
1355 // << "not equal to rhs tensor shape " << rhs.shape() << std::endl;
1356 // return false;
1357 // }
1358 // if (this->is_contiguous() != rhs.is_contiguous()) {
1359 // if (User_debug)
1360 // std::cout << "[AreNearlyEqTensor] Tensor contiguous flag " << this->is_contiguous()
1361 // << "not equal to rhs tensor flag " << rhs.is_contiguous() << std::endl;
1362 // return false;
1363 // }
1364 // return this->_impl->_storage.approx_eq(rhs._impl->_storage._impl, tol);
1365 // }
1366
1367 // template<class T>
1368 // Tensor& Cpr_(const T &rhs){
1369 //
1370 // return *this == rhs;
1371 // }
1372
1373 template <class T>
1374 Tensor Mod(const T &rhs) {
1375 return *this % rhs;
1376 }
1377
1384 Tensor operator-() { return this->Mul(-1.); }
1385
1393 Tensor flatten() const {
1394 Tensor out = this->clone();
1395 out.contiguous_();
1396 out.reshape_({-1});
1397 return out;
1398 }
1399
1407 void flatten_() {
1408 this->contiguous_();
1409 this->reshape_({-1});
1410 }
1411
1438 void append(const Tensor &rhs) {
1439 // Tensor in;
1440 if (!this->is_contiguous()) this->contiguous_();
1441
1442 // check Tensor in shape:
1443 cytnx_error_msg(rhs.shape().size() == 0 || this->shape().size() == 0,
1444 "[ERROR] try to append a null Tensor.%s", "\n");
1445 cytnx_error_msg(rhs.shape().size() != (this->shape().size() - 1),
1446 "[ERROR] try to append a Tensor with rank not match.%s", "\n");
1447 cytnx_uint64 Nelem = 1;
1448 for (unsigned int i = 0; i < rhs.shape().size(); i++) {
1449 cytnx_error_msg(rhs.shape()[i] != this->shape()[i + 1],
1450 "[ERROR] dimension mismatch @ rhs.rank: [%d] this: [%d] rhs: [%d]\n", i,
1451 this->shape()[i + 1], rhs.shape()[i]);
1452 Nelem *= rhs.shape()[i];
1453 }
1454
1455 // check type:
1456 Tensor in;
1457 if (rhs.dtype() != this->dtype()) {
1458 in = rhs.astype(this->dtype());
1459 if (!in.is_contiguous()) in.contiguous_();
1460 } else {
1461 if (!in.is_contiguous())
1462 in = rhs.contiguous();
1463 else
1464 in = rhs;
1465 }
1466 this->_impl->_shape[0] += 1;
1467 cytnx_uint64 oldsize = this->_impl->_storage.size();
1468 this->_impl->_storage.resize(oldsize + Nelem);
1469 memcpy(((char *)this->_impl->_storage.data()) +
1470 oldsize * Type.typeSize(this->dtype()) / sizeof(char),
1471 in._impl->_storage.data(), Type.typeSize(in.dtype()) * Nelem);
1472 }
1500 void append(const Storage &srhs) {
1501 if (!this->is_contiguous()) this->contiguous_();
1502
1503 // check Tensor in shape:
1504 cytnx_error_msg(srhs.size() == 0 || this->shape().size() == 0,
1505 "[ERROR] try to append a null Tensor.%s", "\n");
1506 cytnx_error_msg((this->shape().size() - 1) != 1,
1507 "[ERROR] append a storage to Tensor can only accept rank-2 Tensor.%s", "\n");
1508 cytnx_error_msg(this->shape().back() != srhs.size(), "[ERROR] Tensor dmension mismatch!%s",
1509 "\n");
1510
1511 // check type:
1512 Storage in;
1513 if (srhs.dtype() != this->dtype()) {
1514 in = srhs.astype(this->dtype());
1515 } else {
1516 in = srhs;
1517 }
1518 this->_impl->_shape[0] += 1;
1519 cytnx_uint64 oldsize = this->_impl->_storage.size();
1520 this->_impl->_storage.resize(oldsize + in.size());
1521 memcpy(((char *)this->_impl->_storage.data()) +
1522 oldsize * Type.typeSize(this->dtype()) / sizeof(char),
1523 in._impl->data(), Type.typeSize(in.dtype()) * in.size());
1524 }
1525 /*
1526 void append(const Tensor &rhs){
1527 // convert to the same type.
1528 Tensor in;
1529 if(rhs.dtype() != this->dtype()){
1530 in = rhs.astype(this->dtype());
1531 }else{
1532 in = rhs;
1533 }
1534
1535 // 1) check rank
1536 if(this->shape().size()==1){
1537 // check if rhs is a scalar tensor (only one element)
1538 cytnx_error_msg(!(rhs.shape().size()==1 && rhs.shape()[0]==1),"[ERROR] trying to append
1539 a scalar into multidimentional Tensor is not allow.\n Only rank-1 Tensor can accept scalar
1540 append.%s","\n"); this->_impl->_shape[0]+=1; this->_impl->_storage.append(0);
1541
1542 }else{
1543 cytnx_error_msg(rhs.shape().size() != this->shape().size()-1,"[ERROR] try to append a
1544 Tensor with rank not match.%s","\n");
1545
1546 }
1547 cytnx_error_msg(!this->is_contiguous(),"[ERROR] append require the Tensor to be contiguous.
1548 suggestion: call contiguous() or contiguous_() first.","\n");
1549 }
1550 */
1562 template <class T>
1563 void append(const T &rhs) {
1564 cytnx_error_msg(this->shape().size() != 1,
1565 "[ERROR] trying to append a scalar into multidimentional Tensor is not "
1566 "allow.\n Only rank-1 Tensor can accept scalar append.%s",
1567 "\n");
1569 "[ERROR] append require the Tensor to be contiguous. suggestion: call "
1570 "contiguous() or contiguous_() first.",
1571 "\n");
1572 this->_impl->_shape[0] += 1;
1573 this->_impl->_storage.append(rhs);
1574 }
1575
1584 bool same_data(const Tensor &rhs) const;
1585
1586 // linalg:
1592 std::vector<Tensor> Svd(const bool &is_UvT = true) const;
1593
1599 std::vector<Tensor> Eigh(const bool &is_V = true, const bool &row_v = false) const;
1600
1606
1611 Tensor InvM() const;
1612
1617 Tensor &Inv_(const double &clip);
1618
1623 Tensor Inv(const double &clip) const;
1624
1630
1635 Tensor Conj() const;
1636
1642
1647 Tensor Exp() const;
1648
1653 Tensor Norm() const;
1654
1659 Tensor Pow(const cytnx_double &p) const;
1660
1666
1671 Tensor Trace(const cytnx_uint64 &a = 0, const cytnx_uint64 &b = 1) const;
1672
1677 Tensor Abs() const;
1678
1684
1689 Tensor Max() const;
1690
1695 Tensor Min() const;
1696
1697 }; // class Tensor
1698
1699 Tensor operator+(const Tensor &lhs, const Tensor::Tproxy &rhs);
1700 Tensor operator-(const Tensor &lhs, const Tensor::Tproxy &rhs);
1701 Tensor operator*(const Tensor &lhs, const Tensor::Tproxy &rhs);
1702 Tensor operator/(const Tensor &lhs, const Tensor::Tproxy &rhs);
1703
1704 Tensor operator+(const Tensor &lhs, const Scalar::Sproxy &rhs);
1705 Tensor operator-(const Tensor &lhs, const Scalar::Sproxy &rhs);
1706 Tensor operator*(const Tensor &lhs, const Scalar::Sproxy &rhs);
1707 Tensor operator/(const Tensor &lhs, const Scalar::Sproxy &rhs);
1708
1710 std::ostream &operator<<(std::ostream &os, const Tensor &in);
1711 std::ostream &operator<<(std::ostream &os, const Tensor::Tproxy &in);
1713 //{ os << Tensor(in);};
1714} // namespace cytnx
1715
1716#endif // BACKEND_TORCH
1717
1718#endif // CYTNX_TENSOR_H_
constexpr Type_class Type
data type
Definition Type.hpp:426
object that mimic the python slice to access elements in C++ [this is for c++ API only].
Definition Accessor.hpp:17
an tensor (multi-dimensional array)
Definition Tensor.hpp:41
void append(const Storage &srhs)
the append function of the Storage.
Definition Tensor.hpp:1500
Tensor & operator*=(const T &rc)
multiplication assignment operator with a Tensor or a scalar.
Tensor & Inv_(const double &clip)
the Inv_ member function. Same as cytnx::linalg::Inv_(Tensor &Tin, const double &clip)
Tensor & operator/=(const T &rc)
division assignment operator with a Tensor or a scalar.
Tensor operator-()
The negation function.
Definition Tensor.hpp:1384
void fill(const T &val)
fill all the element of current Tensor with the value.
Definition Tensor.hpp:1115
Tensor InvM() const
the InvM member function. Same as cytnx::linalg::InvM(const Tensor &Tin), where Tin is the current Te...
bool same_data(const Tensor &rhs) const
Check whether two tensors share the same internal memory.
void to_(const int &device)
move the current Tensor to the device.
Definition Tensor.hpp:671
Tensor & permute_(const std::vector< cytnx_uint64 > &rnks)
Definition Tensor.hpp:679
Tensor reshape(const std::vector< cytnx_uint64 > &new_shape) const
Definition Tensor.hpp:841
Tensor(const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1, const bool &init_zero=1)
Construct a new Tensor object.
Definition Tensor.hpp:480
void append(const T &rhs)
the append function of the scalar.
Definition Tensor.hpp:1563
Tensor & operator-=(const T &rc)
subtraction assignment operator with a Tensor or a scalar.
Tensor & Add_(const T &rhs)
Addition function with a Tensor or a scalar, inplacely. Same as operator+=(const T &rhs).
Definition Tensor.hpp:1258
Tensor Abs() const
the Abs member function. Same as linalg::Abs(const Tensor &Tin), where Tin is the current Tensor.
Tensor reshape(const std::initializer_list< cytnx_int64 > &new_shape) const
Definition Tensor.hpp:852
std::string device_str() const
the device (in string) of the Tensor
Definition Tensor.hpp:591
Tensor contiguous_()
Make the Tensor contiguous by coalescing the memory (storage), inplacely.
Definition Tensor.hpp:758
Tensor Mul(const T &rhs)
Multiplication function with a Tensor or a scalar. Same as cytnx::operator*(const Tensor &self,...
Definition Tensor.hpp:1288
unsigned int dtype() const
the dtype-id of the Tensor
Definition Tensor.hpp:570
Tensor Sub(const T &rhs)
Subtraction function with a Tensor or a scalar. Same as cytnx::operator-(const Tensor &self,...
Definition Tensor.hpp:1268
Tensor Inv(const double &clip) const
the Inv member function. Same as cytnx::linalg::Inv(const Tensor &Tin, const double &clip)
Tensor contiguous() const
Make the Tensor contiguous by coalescing the memory (storage).
Definition Tensor.hpp:738
void Tofile(const std::string &fname) const
Save current Tensor to the binary file.
T & at(const std::vector< cytnx_uint64 > &locator)
Get an element at specific location.
Definition Tensor.hpp:920
Tensor reshape(const std::vector< cytnx_int64 > &new_shape) const
return a new Tensor that is reshaped.
Definition Tensor.hpp:832
static Tensor Fromfile(const std::string &fname, const unsigned int &dtype, const cytnx_int64 &count=-1)
Load current Tensor from the binary file.
T & item()
get the element from a rank-0 Tensor.
Definition Tensor.hpp:976
Tensor clone() const
return a clone of the current Tensor.
Definition Tensor.hpp:622
std::vector< Tensor > Eigh(const bool &is_V=true, const bool &row_v=false) const
the Eigh member function. Same as cytnx::linalg::Eigh(const Tensor &Tin, const bool &is_V,...
void Tofile(const char *fname) const
void append(const Tensor &rhs)
the append function.
Definition Tensor.hpp:1438
static Tensor Load(const char *fname)
void Save(const char *fname) const
void set(const std::vector< cytnx::Accessor > &accessors, const Tensor &rhs)
set elements with the input Tensor using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1055
static Tensor Fromfile(const char *fname, const unsigned int &dtype, const cytnx_int64 &count=-1)
Tensor Norm() const
the Norm member function. Same as linalg::Norm(const Tensor &Tin), where Tin is the current Tensor.
Tensor astype(const int &new_type) const
return a new Tensor that cast to different dtype.
Definition Tensor.hpp:885
Tensor & Div_(const T &rhs)
Division function with a Tensor or a scalar, inplacely. Same as operator/=(const T &rhs).
Definition Tensor.hpp:1320
make_variant_from_transform_t< typename internal::exclude_first< Type_list >::type, std::add_pointer > pointer_types
Definition Tensor.hpp:514
pointer_types ptr() const
static Tensor Load(const std::string &fname)
Load current Tensor from file.
Tensor & operator+=(const T &rc)
addition assignment operator with a Tensor or a scalar.
Tensor Conj() const
the Conj member function. Same as cytnx::linalg::Conj(const Tensor &Tin), where Tin is the current Te...
Tensor Trace(const cytnx_uint64 &a=0, const cytnx_uint64 &b=1) const
the Trace member function. Same as linalg::Trace(const Tensor &Tin, const cytnx_uint64 &a,...
bool equivshape(const Tensor &rhs)
compare the shape of two tensors.
Definition Tensor.hpp:1123
Tensor & Pow_(const cytnx_double &p)
the Pow_ member function. Same as linalg::Pow_(Tensor &Tin, const cytnx_double &p),...
std::vector< Tensor > Svd(const bool &is_UvT=true) const
the SVD member function. Same as cytnx::linalg::Svd(const Tensor &Tin, const bool &is_UvT) ,...
std::string dtype_str() const
the dtype (in string) of the Tensor
Definition Tensor.hpp:584
Tensor & Mul_(const T &rhs)
Multiplication function with a Tensor or a scalar, inplacely. Same as operator*=(const T &rhs).
Definition Tensor.hpp:1298
cytnx_uint64 rank() const
the rank of the Tensor
Definition Tensor.hpp:603
const bool & is_contiguous() const
return whether the Tensor is contiguous or not.
Definition Tensor.hpp:677
Tensor Exp() const
the Exp member function. Same as linalg::Exp(const Tensor &Tin), where Tin is the current Tensor.
Tensor & Abs_()
the Abs_ member function. Same as linalg::Abs_(Tensor &Tin), where Tin is the current Tensor.
Tensor Add(const T &rhs)
Addition function with a Tensor or a scalar. Same as cytnx::operator+(const Tensor &self,...
Definition Tensor.hpp:1248
void flatten_()
The flatten function, inplacely.
Definition Tensor.hpp:1407
void Save(const std::string &fname) const
Save current Tensor to file.
Tensor flatten() const
The flatten function.
Definition Tensor.hpp:1393
Tensor & Conj_()
the Conj_ member function. Same as cytnx::linalg::Conj_(Tensor &Tin), where Tin is the current Tensor...
T * ptr_as() const
Definition Tensor.hpp:522
Tensor Pow(const cytnx_double &p) const
the Pow member function. Same as linalg::Pow(const Tensor &Tin, const cytnx_double &p),...
int device() const
the device-id of the Tensor
Definition Tensor.hpp:577
Tensor real()
return the real part of the tensor.
Tensor imag()
return the imaginary part of the tensor.
Tensor to(const int &device) const
copy a tensor to new device
Definition Tensor.hpp:648
Tensor & reshape_(const std::vector< cytnx_int64 > &new_shape)
reshape the Tensor, inplacely
Definition Tensor.hpp:784
void Tofile(std::fstream &f) const
Tensor get(const std::vector< cytnx::Accessor > &accessors) const
get elements using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1023
void set(const std::vector< cytnx::Accessor > &accessors, const T &rc)
set elements with the input constant using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1078
Tensor Max() const
the Max member function. Same as linalg::Max(const Tensor &Tin), where Tin is the current Tensor.
Tensor permute(const std::vector< cytnx_uint64 > &rnks) const
perform tensor permute on the cytnx::Tensor and return a new instance.
Definition Tensor.hpp:710
Tensor Div(const T &rhs)
Division function with a Tensor or a scalar. Same as cytnx::operator/(const Tensor &self,...
Definition Tensor.hpp:1309
Tensor Mod(const T &rhs)
Definition Tensor.hpp:1374
void Init(const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1, const bool &init_zero=true)
initialize a Tensor
Definition Tensor.hpp:450
Tensor Cpr(const T &rhs)
The comparison function.
Definition Tensor.hpp:1331
Tensor & Exp_()
the Exp_ member function. Same as linalg::Exp_(Tensor &Tin), where Tin is the current Tensor.
Tensor & InvM_()
the InvM_ member function. Same as cytnx::linalg::InvM_(Tensor &Tin), where Tin is the current Tensor...
const std::vector< cytnx_uint64 > & shape() const
the shape of the Tensor
Definition Tensor.hpp:597
Tensor Min() const
the Min member function. Same as linalg::Min(const Tensor &Tin), where Tin is the current Tensor.
const T & at(const std::vector< cytnx_uint64 > &locator) const
Definition Tensor.hpp:928
Storage & storage() const
return the storage of current Tensor.
Definition Tensor.hpp:1098
static Tensor from_storage(const Storage &in)
Convert a Storage to Tensor.
Definition Tensor.hpp:557
Tensor & Sub_(const T &rhs)
Subtraction function with a Tensor or a scalar, inplacely. Same as operator-=(const T &rhs).
Definition Tensor.hpp:1278
#define cytnx_error_msg(is_true, format,...)
Definition cytnx_error.hpp:18
Definition Accessor.hpp:12
cytnx::UniTensor operator*(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
The multiplication operator between two UniTensor.
cytnx::UniTensor operator-(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
The subtraction operator between two UniTensor.
cytnx::UniTensor operator+(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
The addition operator between two UniTensor.
cytnx::UniTensor operator/(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
The division operator between two UniTensor.