Cytnx v0.9.1
Loading...
Searching...
No Matches
Tensor.hpp
Go to the documentation of this file.
1#ifndef _H_Tensor_
2#define _H_Tensor_
3
4#include "Type.hpp"
5#include "cytnx_error.hpp"
6#include "Storage.hpp"
7#include "Device.hpp"
9#include <iostream>
10#include <fstream>
11#include "utils/vec_range.hpp"
12#include "utils/vec_cast.hpp"
13#include "utils/dynamic_arg_resolver.hpp"
14//#include "linalg.hpp"
15#include "Accessor.hpp"
16#include <utility>
17#include <vector>
18#include <initializer_list>
19#include <string>
20#include "Scalar.hpp"
21
22namespace cytnx {
23
25 // real implementation
26 class Tensor_impl : public intrusive_ptr_base<Tensor_impl> {
27 private:
28 // Interface:
29 Storage_init_interface __SII;
30
31 // Memory:
32 Storage _storage;
33
34 // tensor shape
35 std::vector<cytnx_uint64> _shape;
36
37 // pseudo-perm info
38 std::vector<cytnx_uint64> _mapper;
39 std::vector<cytnx_uint64> _invmapper;
40 bool _contiguous;
41
42 public:
43 friend class Tensor;
44 boost::intrusive_ptr<Tensor_impl> _clone_meta_only() const {
45 boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
46 out->_mapper = this->_mapper;
47 out->_invmapper = this->_invmapper;
48 out->_shape = this->_shape;
49 out->_contiguous = this->_contiguous;
50 return out;
51 }
52 Tensor_impl() : _contiguous(true){};
53
54 void Init(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype = Type.Double,
55 int device = -1, const bool &init_zero = true);
56 void Init(const Storage &in);
57 // void Init(const Storage &in, const std::vector<cytnx_uint64> &shape,
58 // const unsigned int &dtype, int device);
59 /*
60 template<class T>
61 void From_vec(const T &ndvec){
62 cytnx_error_msg(std::string(typeid(T).name()).find("vector") ==
63 std::string::npos,"[ERROR][Tensor][From_vec] the input argument should be a nd vector.%s","\n");
64 //dispatch the rank!:
65
66
67
68 }
69 */
70 // clone&assignment constr., use intrusive_ptr's
71 Tensor_impl(const Tensor_impl &rhs);
72 Tensor_impl &operator=(const Tensor_impl &rhs); // add const
73
74 unsigned int dtype() const { return this->_storage.dtype(); }
75 int device() const { return this->_storage.device(); }
76
77 std::string dtype_str() const { return Type.getname(this->_storage.dtype()); }
78 std::string device_str() const { return Device.getname(this->_storage.device()); }
79
80 const std::vector<cytnx_uint64> &shape() const { return _shape; }
81
82 const bool &is_contiguous() const { return this->_contiguous; }
83
84 const std::vector<cytnx_uint64> &mapper() const { return this->_mapper; }
85 const std::vector<cytnx_uint64> &invmapper() const { return this->_invmapper; }
86 Storage &storage() { return _storage; }
87
88 const Storage &storage() const { return _storage; }
89
90 boost::intrusive_ptr<Tensor_impl> clone() const {
91 boost::intrusive_ptr<Tensor_impl> out = this->_clone_meta_only();
92 out->_storage = this->_storage.clone();
93 return out;
94 }
95
96 void to_(const int &device) { this->_storage.to_(device); }
97 boost::intrusive_ptr<Tensor_impl> to(const int &device) {
98 if (this->device() == device) {
99 // boost::intrusive_ptr<Tensor_impl> out(this);
100 return this;
101 } else {
102 boost::intrusive_ptr<Tensor_impl> out = this->_clone_meta_only();
103 out->_storage = this->_storage.to(device);
104 return out;
105 }
106 }
107
108 void permute_(const std::vector<cytnx_uint64> &rnks);
109
110 boost::intrusive_ptr<Tensor_impl> permute(const std::vector<cytnx_uint64> &rnks);
111
112 template <class T>
113 T &at(const std::vector<cytnx_uint64> &locator) const {
114 cytnx_error_msg(locator.size() != this->_shape.size(), "%s",
115 "The input index does not match Tensor's rank.");
116
117 cytnx_uint64 RealRank, mtplyr;
118 // std::vector<cytnx_uint64> c_shape(this->_shape.size());
119 // std::vector<cytnx_uint64> c_loc(this->_shape.size());
120 cytnx_uint64 c_shape, c_loc;
121
122 RealRank = 0;
123 mtplyr = 1;
124
125 for (cytnx_int64 i = this->_shape.size() - 1; i >= 0; i--) {
126 if (locator[i] >= this->_shape[i]) {
127 cytnx_error_msg(true, "%s", "Attempting to access out-of-bound index in Tensor.");
128 }
129 // c_shape[i] = this->_shape[this->_invmapper[i]];
130 // c_loc[i] = locator[this->_invmapper[i]];
131 c_shape = this->_shape[this->_invmapper[i]];
132 c_loc = locator[this->_invmapper[i]];
133 RealRank += mtplyr * c_loc;
134 mtplyr *= c_shape;
135 }
136 return this->_storage.at<T>(RealRank);
137 }
138
139 const Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) const {
140 cytnx_error_msg(locator.size() != this->_shape.size(), "%s",
141 "The input index does not match Tensor's rank.");
142
143 cytnx_uint64 RealRank, mtplyr;
144 // std::vector<cytnx_uint64> c_shape(this->_shape.size());
145 // std::vector<cytnx_uint64> c_loc(this->_shape.size());
146
147 cytnx_uint64 c_shape, c_loc;
148 RealRank = 0;
149 mtplyr = 1;
150
151 for (cytnx_int64 i = this->_shape.size() - 1; i >= 0; i--) {
152 if (locator[i] >= this->_shape[i]) {
153 cytnx_error_msg(true, "%s", "Attempting to access out-of-bound index in Tensor.");
154 }
155 // c_shape[i] = this->_shape[this->_invmapper[i]];
156 // c_loc[i] = locator[this->_invmapper[i]];
157 c_shape = this->_shape[this->_invmapper[i]];
158 c_loc = locator[this->_invmapper[i]];
159 RealRank += mtplyr * c_loc;
160 mtplyr *= c_shape;
161 }
162 return this->_storage.at(RealRank);
163 }
164
165 Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) {
166 cytnx_error_msg(locator.size() != this->_shape.size(), "%s",
167 "The input index does not match Tensor's rank.");
168
169 cytnx_uint64 RealRank, mtplyr;
170 // std::vector<cytnx_uint64> c_shape(this->_shape.size());
171 // std::vector<cytnx_uint64> c_loc(this->_shape.size());
172 cytnx_uint64 c_shape, c_loc;
173
174 RealRank = 0;
175 mtplyr = 1;
176
177 for (cytnx_int64 i = this->_shape.size() - 1; i >= 0; i--) {
178 if (locator[i] >= this->_shape[i]) {
179 cytnx_error_msg(true, "%s", "Attempting to access out-of-bound index in Tensor.");
180 }
181 // c_shape[i] = this->_shape[this->_invmapper[i]];
182 // c_loc[i] = locator[this->_invmapper[i]];
183 c_shape = this->_shape[this->_invmapper[i]];
184 c_loc = locator[this->_invmapper[i]];
185 RealRank += mtplyr * c_loc;
186 mtplyr *= c_shape;
187 }
188 return this->_storage.at(RealRank);
189 }
190
191 boost::intrusive_ptr<Tensor_impl> get(const std::vector<cytnx::Accessor> &accessors);
192 boost::intrusive_ptr<Tensor_impl> get_deprecated(const std::vector<cytnx::Accessor> &accessors);
193 void set(const std::vector<cytnx::Accessor> &accessors,
194 const boost::intrusive_ptr<Tensor_impl> &rhs);
195
196 template <class T>
197 void set(const std::vector<cytnx::Accessor> &accessors, const T &rc);
198
199 void set(const std::vector<cytnx::Accessor> &accessors, const Scalar::Sproxy &rc);
200
201 template <class Tx>
202 void fill(const Tx &val) {
203 this->storage().fill(val);
204 }
205
206 boost::intrusive_ptr<Tensor_impl> contiguous() {
207 // return new instance if act on non-contiguous tensor
208 // return self if act on contiguous tensor
209 if (this->_contiguous) {
210 boost::intrusive_ptr<Tensor_impl> out(this);
211 // out->_storage = this->_storage;
212 return out;
213 } else {
214 boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
215 std::vector<cytnx_uint64> oldshape(this->_shape.size());
216 for (cytnx_uint64 i = 0; i < this->_shape.size(); i++) {
217 oldshape[i] = this->_shape[this->_invmapper[i]];
218 }
219
220 out->_storage._impl =
221 this->_storage._impl->Move_memory(oldshape, this->_mapper, this->_invmapper);
222 // this->_storage._impl->Move_memory_(oldshape, this->_mapper, this->_invmapper);
223 // out->_storage._impl = this->_storage._impl;
224 // std::cout << out->_storage << std::endl;
225 out->_invmapper = vec_range(this->_invmapper.size());
226 out->_mapper = out->_invmapper;
227 out->_shape = this->_shape;
228 out->_contiguous = true;
229 return out;
230 }
231 }
232
233 void contiguous_() {
234 // return new instance if act on non-contiguous tensor
235 // return self if act on contiguous tensor
236 if (!this->_contiguous) {
237 std::vector<cytnx_uint64> oldshape(this->_shape.size());
238 for (cytnx_uint64 i = 0; i < this->_shape.size(); i++) {
239 oldshape[i] = this->_shape[this->_invmapper[i]];
240 }
241
242 this->_storage._impl =
243 this->_storage._impl->Move_memory(oldshape, this->_mapper, this->_invmapper);
244 // this->_storage._impl->Move_memory_(oldshape, this->_mapper, this->_invmapper);
245 // this->_mapper = vec_range(this->_invmapper.size());
246 vec_range_(this->_mapper, this->invmapper().size());
247 this->_invmapper = this->_mapper;
248 this->_contiguous = true;
249 }
250 }
251
252 void reshape_(const std::vector<cytnx_int64> &new_shape) {
253 if (!this->_contiguous) {
254 this->contiguous_();
255 }
256 // std::vector<cytnx_uint64> result_shape(new_shape.size());
257 cytnx_uint64 new_N = 1;
258 bool has_undetermine = false;
259 unsigned int Udet_id = 0;
260 // this->_shape = vec_cast<cytnx_int64,cytnx_uint64>(new_shape);
261 this->_shape.resize(new_shape.size());
262 for (cytnx_uint64 i = 0; i < new_shape.size(); i++) {
263 this->_shape[i] = new_shape[i];
264 }
265 for (int i = 0; i < new_shape.size(); i++) {
266 if (new_shape[i] < 0) {
267 if (new_shape[i] != -1)
269 new_shape[i] != -1, "%s",
270 "[ERROR] reshape can only have dimension > 0 and one undetermine rank specify as -1");
271 if (has_undetermine)
273 new_shape[i] != -1, "%s",
274 "[ERROR] reshape can only have dimension > 0 and one undetermine rank specify as -1");
275 Udet_id = i;
276 has_undetermine = true;
277 } else {
278 new_N *= new_shape[i];
279 // result_shape[i] = new_shape[i];
280 }
281 }
282
283 if (has_undetermine) {
284 cytnx_error_msg(new_N > this->_storage.size(), "%s",
285 "[ERROR] new shape exceed the total number of elements.");
286 cytnx_error_msg(this->_storage.size() % new_N, "%s",
287 "[ERROR] unmatch size when reshape with undetermine dimension");
288 // result_shape[Udet_id] = this->_storage.size() / new_N;
289 this->_shape[Udet_id] = this->_storage.size() / new_N;
290 } else {
291 cytnx_error_msg(new_N != this->_storage.size(), "%s",
292 "[ERROR] new shape does not match the number of elements.");
293 }
294
295 // this->_shape = result_shape;
296 // this->_mapper = std::move(vec_range(new_shape.size()));
297 this->_mapper.resize(new_shape.size());
298 vec_range_(this->_mapper, new_shape.size());
299 this->_invmapper = this->_mapper;
300 }
301
302 boost::intrusive_ptr<Tensor_impl> reshape(const std::vector<cytnx_int64> &new_shape) {
303 boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
304 if (this->is_contiguous()) {
305 out = this->_clone_meta_only();
306 out->_storage = this->_storage;
307 } else {
308 out = this->contiguous();
309 }
310 // out = this->clone();
311
312 out->reshape_(new_shape);
313 return out;
314 }
315
316 boost::intrusive_ptr<Tensor_impl> astype(const int &new_type) {
317 // boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
318 // out->_storage = this->_storage.astype(new_type);
319 if (this->dtype() == new_type) {
320 return this;
321 } else {
322 boost::intrusive_ptr<Tensor_impl> out = this->_clone_meta_only();
323 out->_storage = this->_storage.astype(new_type);
324 return out;
325 }
326 }
327 };
329
330 class Tensor;
331
333 // [Note] these are fwd from linalg.hpp
334 template <class T>
335 Tensor operator+(const Tensor &lhs, const T &rc);
336 template <class T>
337 Tensor operator-(const Tensor &lhs, const T &rhs);
338 template <class T>
339 Tensor operator*(const Tensor &lhs, const T &rhs);
340 template <class T>
341 Tensor operator/(const Tensor &lhs, const T &rhs);
343
345 class Tensor {
346 private:
347 public:
349 // this is a proxy class to allow get/set element using [] as python!
350 struct Tproxy {
351 boost::intrusive_ptr<Tensor_impl> _insimpl;
352 std::vector<cytnx::Accessor> _accs;
353 Tproxy(boost::intrusive_ptr<Tensor_impl> _ptr, const std::vector<cytnx::Accessor> &accs)
354 : _insimpl(std::move(_ptr)), _accs(accs) {}
355
356 // when used to set elems:
357 const Tensor &operator=(const Tensor &rhs) {
358 this->_insimpl->set(_accs, rhs._impl);
359 return rhs;
360 }
361
362 template <class T>
363 const T &operator=(const T &rc) {
364 this->_insimpl->set(_accs, rc);
365 return rc;
366 }
367 const Tproxy &operator=(const Tproxy &rc) {
368 Tensor tmp = Tensor(rc);
369 this->_insimpl->set(_accs, tmp._impl);
370 return rc;
371 }
372
373 template <class T>
374 Tensor operator+=(const T &rc) {
375 Tensor self;
376 self._impl = _insimpl->get(_accs);
377 self += rc;
378 _insimpl->set(_accs, self._impl);
379 self._impl = this->_insimpl;
380 return self;
381 }
382 Tensor operator+=(const Tproxy &rc);
383
384 template <class T>
385 Tensor operator-=(const T &rc) {
386 Tensor self;
387 self._impl = _insimpl->get(_accs);
388 self -= rc;
389 _insimpl->set(_accs, self._impl);
390 self._impl = this->_insimpl;
391 return self;
392 }
393 Tensor operator-=(const Tproxy &rc);
394
395 template <class T>
396 Tensor operator/=(const T &rc) {
397 Tensor self;
398 self._impl = _insimpl->get(_accs);
399 self /= rc;
400 _insimpl->set(_accs, self._impl);
401 self._impl = this->_insimpl;
402 return self;
403 }
404 Tensor operator/=(const Tproxy &rc);
405
406 template <class T>
407 Tensor operator*=(const T &rc) {
408 Tensor self;
409 self._impl = _insimpl->get(_accs);
410 self *= rc;
411 _insimpl->set(_accs, self._impl);
412 self._impl = this->_insimpl;
413 return self;
414 }
415 Tensor operator*=(const Tproxy &rc);
416
417 // alias to resolve conflict with op ovld for rc=Tensor
418 /*
419 template<class T>
420 Tensor _operatorADD(const T &rc) const{
421 Tensor out;
422 out._impl = _insimpl->get(_accs);
423 return out.Add(rc);
424 }
425 */
426 Tensor operator+(const cytnx_complex128 &rc) const; //{return this->_operatorADD(rc);};
427 Tensor operator+(const cytnx_complex64 &rc) const; //{return this->_operatorADD(rc);};
428 Tensor operator+(const cytnx_double &rc) const; //{return this->_operatorADD(rc);};
429 Tensor operator+(const cytnx_float &rc) const; //{return this->_operatorADD(rc);};
430 Tensor operator+(const cytnx_uint64 &rc) const; //{return this->_operatorADD(rc);};
431 Tensor operator+(const cytnx_int64 &rc) const; //{return this->_operatorADD(rc);};
432 Tensor operator+(const cytnx_uint32 &rc) const; //{return this->_operatorADD(rc);};
433 Tensor operator+(const cytnx_int32 &rc) const; //{return this->_operatorADD(rc);};
434 Tensor operator+(const cytnx_uint16 &rc) const; //{return this->_operatorADD(rc);};
435 Tensor operator+(const cytnx_int16 &rc) const; //{return this->_operatorADD(rc);};
436 Tensor operator+(const cytnx_bool &rc) const; //{return this->_operatorADD(rc);};
437 Tensor operator+(const Tproxy &rc) const;
438
439 /*
440 template<class T>
441 Tensor _operatorSUB(const T &rc) const{
442 Tensor out;
443 out._impl = _insimpl->get(_accs);
444 return out.Sub(rc);
445 }
446 */
447 Tensor operator-(const cytnx_complex128 &rc) const; //{return this->_operatorSUB(rc);};
448 Tensor operator-(const cytnx_complex64 &rc) const; //{return this->_operatorSUB(rc);};
449 Tensor operator-(const cytnx_double &rc) const; //{return this->_operatorSUB(rc);};
450 Tensor operator-(const cytnx_float &rc) const; //{return this->_operatorSUB(rc);};
451 Tensor operator-(const cytnx_uint64 &rc) const; //{return this->_operatorSUB(rc);};
452 Tensor operator-(const cytnx_int64 &rc) const; //{return this->_operatorSUB(rc);};
453 Tensor operator-(const cytnx_uint32 &rc) const; //{return this->_operatorSUB(rc);};
454 Tensor operator-(const cytnx_int32 &rc) const; //{return this->_operatorSUB(rc);};
455 Tensor operator-(const cytnx_uint16 &rc) const; //{return this->_operatorSUB(rc);};
456 Tensor operator-(const cytnx_int16 &rc) const; //{return this->_operatorSUB(rc);};
457 Tensor operator-(const cytnx_bool &rc) const; //{return this->_operatorSUB(rc);};
458 Tensor operator-(const Tproxy &rc) const;
459
460 Tensor operator-() const;
461
462 /*
463 template<class T>
464 Tensor _operatorMUL(const T &rc) const{
465 Tensor out;
466 out._impl = _insimpl->get(_accs);
467 return out.Mul(rc);
468 }
469 */
470 Tensor operator*(const cytnx_complex128 &rc) const; //{return this->_operatorMUL(rc);};
471 Tensor operator*(const cytnx_complex64 &rc) const; //{return this->_operatorMUL(rc);};
472 Tensor operator*(const cytnx_double &rc) const; //{return this->_operatorMUL(rc);};
473 Tensor operator*(const cytnx_float &rc) const; //{return this->_operatorMUL(rc);};
474 Tensor operator*(const cytnx_uint64 &rc) const; //{return this->_operatorMUL(rc);};
475 Tensor operator*(const cytnx_int64 &rc) const; //{return this->_operatorMUL(rc);};
476 Tensor operator*(const cytnx_uint32 &rc) const; //{return this->_operatorMUL(rc);};
477 Tensor operator*(const cytnx_int32 &rc) const; //{return this->_operatorMUL(rc);};
478 Tensor operator*(const cytnx_uint16 &rc) const; //{return this->_operatorMUL(rc);};
479 Tensor operator*(const cytnx_int16 &rc) const; //{return this->_operatorMUL(rc);};
480 Tensor operator*(const cytnx_bool &rc) const; //{return this->_operatorMUL(rc);};
481 Tensor operator*(const Tproxy &rc) const;
482
483 /*
484 template<class T>
485 Tensor _operatorDIV(const T &rc) const{
486 Tensor out;
487 out._impl = _insimpl->get(_accs);
488 return out.Div(rc);
489 }
490 */
491 Tensor operator/(const cytnx_complex128 &rc) const; //{return this->_operatorDIV(rc);};
492 Tensor operator/(const cytnx_complex64 &rc) const; //{return this->_operatorDIV(rc);};
493 Tensor operator/(const cytnx_double &rc) const; //{return this->_operatorDIV(rc);};
494 Tensor operator/(const cytnx_float &rc) const; //{return this->_operatorDIV(rc);};
495 Tensor operator/(const cytnx_uint64 &rc) const; //{return this->_operatorDIV(rc);};
496 Tensor operator/(const cytnx_int64 &rc) const; //{return this->_operatorDIV(rc);};
497 Tensor operator/(const cytnx_uint32 &rc) const; //{return this->_operatorDIV(rc);};
498 Tensor operator/(const cytnx_int32 &rc) const; //{return this->_operatorDIV(rc);};
499 Tensor operator/(const cytnx_uint16 &rc) const; //{return this->_operatorDIV(rc);};
500 Tensor operator/(const cytnx_int16 &rc) const; //{return this->_operatorDIV(rc);};
501 Tensor operator/(const cytnx_bool &rc) const; //{return this->_operatorDIV(rc);};
502 Tensor operator/(const Tproxy &rc) const;
503
504 template <class T>
505 T item() const {
506 Tensor out;
507 out._impl = _insimpl->get(_accs);
508 return out.item<T>();
509 }
510
511 Scalar::Sproxy item() const {
512 Tensor out;
513 out._impl = _insimpl->get(_accs);
514 return out.item();
515 }
516
517 // when used to get elems:
518 operator Tensor() const {
519 Tensor out;
520 out._impl = _insimpl->get(_accs);
521 return out;
522 }
523
524 Storage storage() const {
525 Tensor out;
526 out._impl = _insimpl->get(_accs);
527 return out.storage();
528 }
529
530 }; // proxy class of Tensor.
531
533
535 // these two are using the python way!
536 //----------------------------------------
537 template <class... Ts>
538 Tproxy operator()(const std::string &e1, const Ts &...elems) {
539 // std::cout << e1 << std::endl;
540 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
541 return (*this)[tmp];
542 }
543 template <class... Ts>
544 Tproxy operator()(const cytnx_int64 &e1, const Ts &...elems) {
545 // std::cout << e1<< std::endl;
546 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
547 return (*this)[tmp];
548 }
549 template <class... Ts>
550 Tproxy operator()(const cytnx::Accessor &e1, const Ts &...elems) {
551 // std::cout << e1 << std::endl;
552 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
553 return (*this)[tmp];
554 }
555 template <class... Ts>
556 const Tproxy operator()(const std::string &e1, const Ts &...elems) const {
557 // std::cout << e1 << std::endl;
558 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
559 return (*this)[tmp];
560 }
561 template <class... Ts>
562 const Tproxy operator()(const cytnx_int64 &e1, const Ts &...elems) const {
563 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
564 return (*this)[tmp];
565 }
566 template <class... Ts>
567 const Tproxy operator()(const cytnx::Accessor &e1, const Ts &...elems) const {
568 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
569 return (*this)[tmp];
570 }
571
572 //-----------------------------------------
573
574 Tproxy operator[](const std::initializer_list<cytnx::Accessor> &accs) {
575 std::vector<cytnx::Accessor> tmp = accs;
576 return (*this)[tmp];
577 }
578 Tproxy operator[](const std::vector<cytnx::Accessor> &accs) {
579 return Tproxy(this->_impl, accs);
580 }
581
582 const Tproxy operator[](const std::vector<cytnx::Accessor> &accs) const {
583 return Tproxy(this->_impl, accs);
584 }
585 const Tproxy operator[](const std::initializer_list<cytnx::Accessor> &accs) const {
586 std::vector<cytnx::Accessor> tmp = accs;
587 return (*this)[tmp];
588 }
589
590 Tproxy operator[](const std::initializer_list<cytnx_int64> &accs) {
591 std::vector<cytnx_int64> tmp = accs;
592 return (*this)[tmp];
593 }
594 Tproxy operator[](const std::vector<cytnx_int64> &accs) {
595 std::vector<cytnx::Accessor> acc_in;
596 for (int i = 0; i < accs.size(); i++) {
597 acc_in.push_back(cytnx::Accessor(accs[i]));
598 }
599 return Tproxy(this->_impl, acc_in);
600 }
601 const Tproxy operator[](const std::initializer_list<cytnx_int64> &accs) const {
602 std::vector<cytnx_int64> tmp = accs;
603 return (*this)[tmp];
604 }
605 const Tproxy operator[](const std::vector<cytnx_int64> &accs) const {
606 std::vector<cytnx::Accessor> acc_in;
607 for (int i = 0; i < accs.size(); i++) {
608 acc_in.push_back(cytnx::Accessor(accs[i]));
609 }
610 return Tproxy(this->_impl, acc_in);
611 }
613 //-------------------------------------------
614
616 void _Save(std::fstream &f) const;
617 void _Load(std::fstream &f);
618
620
629 void Save(const std::string &fname) const;
633 void Save(const char *fname) const;
634
643 void Tofile(const std::string &fname) const;
644
648 void Tofile(const char *fname) const;
649
653 void Tofile(std::fstream &f) const;
654
663 static Tensor Load(const std::string &fname);
667 static Tensor Load(const char *fname);
668
689 static Tensor Fromfile(const std::string &fname, const unsigned int &dtype,
690 const cytnx_int64 &count = -1);
691 static Tensor Fromfile(const char *fname, const unsigned int &dtype,
692 const cytnx_int64 &count = -1);
693
694 // static Tensor Frombinary(const std::string &fname);
695
697 boost::intrusive_ptr<Tensor_impl> _impl;
698 Tensor() : _impl(new Tensor_impl()){};
699 Tensor(const Tensor &rhs) { _impl = rhs._impl; }
700
701 /*
702 template<class Tp>
703 Tensor(const std::initializer_list<Tp> &rhs){
704 Storage stmp = std::vector<Tp>(rhs);
705 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
706 tmp->Init(stmp);
707 this->_impl = tmp;
708 }
709 */
710
711 Tensor &operator=(const Tensor &rhs) {
712 _impl = rhs._impl;
713 return *this;
714 }
715
716 void operator=(const Tproxy &rhsp) { // this is used to handle proxy assignment
717 this->_impl = rhsp._insimpl->get(rhsp._accs);
718 }
720
722 // default device==Device.cpu (-1)
747 void Init(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype = Type.Double,
748 const int &device = -1, const bool &init_zero = true) {
749 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
750 this->_impl = tmp;
751 this->_impl->Init(shape, dtype, device, init_zero);
752 }
753 // void Init(const Storage& storage) {
754 // boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
755 // this->_impl = tmp;
756 // this->_impl->Init(storage);
757 // }
758 // void Init(const Storage& storage, const std::vector<cytnx_uint64> &shape,
759 // const unsigned int &dtype = Type.Double, const int &device = -1) {
760 // boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
761 // this->_impl = tmp;
762 // this->_impl->Init(storage, shape, dtype, device);
763 // }
764
777 Tensor(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype = Type.Double,
778 const int &device = -1, const bool &init_zero = 1)
779 : _impl(new Tensor_impl()) {
780 this->Init(shape, dtype, device, init_zero);
781 }
782 // Tensor(const Storage& storage)
783 // : _impl(new Tensor_impl()) {
784 // this->Init(storage);
785 // }
786 // Tensor(const Storage& storage, const std::vector<cytnx_uint64> &shape,
787 // const unsigned int &dtype = Type.Double, const int &device = -1)
788 // : _impl(new Tensor_impl()) {
789 // this->Init(storage, shape, dtype, device);
790 // }
792
798 static Tensor from_storage(const Storage &in) {
799 Tensor out;
800 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
801 out._impl = tmp;
802 out._impl->Init(in);
803 return out;
804 }
805
811 unsigned int dtype() const { return this->_impl->dtype(); }
812
818 int device() const { return this->_impl->device(); }
819
825 std::string dtype_str() const { return this->_impl->dtype_str(); }
826
832 std::string device_str() const { return this->_impl->device_str(); }
833
838 const std::vector<cytnx_uint64> &shape() const { return this->_impl->shape(); }
839
844 cytnx_uint64 rank() const { return this->_impl->shape().size(); }
845
863 Tensor clone() const {
864 Tensor out;
865 out._impl = this->_impl->clone();
866 return out;
867 }
868
889 Tensor to(const int &device) const {
890 Tensor out;
891 out._impl = this->_impl->to(device);
892 return out;
893 }
894
912 void to_(const int &device) { this->_impl->to_(device); }
913
918 const bool &is_contiguous() const { return this->_impl->is_contiguous(); }
919
920 Tensor permute_(const std::vector<cytnx_uint64> &rnks) {
921 this->_impl->permute_(rnks);
922 return *this;
923 }
925 template <class... Ts>
926 Tensor permute_(const cytnx_uint64 &e1, const Ts &...elems) {
927 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1, elems...);
928 this->_impl->permute_(argv);
929 return *this;
930 }
932
951 Tensor permute(const std::vector<cytnx_uint64> &rnks) const {
952 Tensor out;
953 out._impl = this->_impl->permute(rnks);
954 return out;
955 }
957 template <class... Ts>
958 Tensor permute(const cytnx_uint64 &e1, const Ts &...elems) const {
959 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1, elems...);
960 return this->permute(argv);
961 }
963
980 Tensor out;
981 out._impl = this->_impl->contiguous();
982 return out;
983 }
984
1000 this->_impl->contiguous_();
1001 return *this;
1002 }
1003
1025 void reshape_(const std::vector<cytnx_int64> &new_shape) { this->_impl->reshape_(new_shape); }
1027 void reshape_(const std::vector<cytnx_uint64> &new_shape) {
1028 std::vector<cytnx_int64> shape(new_shape.begin(), new_shape.end());
1029 this->_impl->reshape_(shape);
1030 }
1031 void reshape_(const std::initializer_list<cytnx_int64> &new_shape) {
1032 std::vector<cytnx_int64> shape = new_shape;
1033 this->_impl->reshape_(shape);
1034 }
1035 template <class... Ts>
1036 void reshape_(const cytnx_int64 &e1, const Ts... elems) {
1037 std::vector<cytnx_int64> shape = dynamic_arg_int64_resolver(e1, elems...);
1038 // std::cout << shape << std::endl;
1039 this->_impl->reshape_(shape);
1040 }
1042
1067 Tensor reshape(const std::vector<cytnx_int64> &new_shape) const {
1068 Tensor out;
1069 out._impl = this->_impl->reshape(new_shape);
1070 return out;
1071 }
1072
1076 Tensor reshape(const std::vector<cytnx_uint64> &new_shape) const {
1077 std::vector<cytnx_int64> tmp(new_shape.size());
1078 memcpy(&tmp[0], &new_shape[0], sizeof(cytnx_uint64) * new_shape.size());
1079 Tensor out;
1080 out._impl = this->_impl->reshape(tmp);
1081 return out;
1082 }
1083
1087 Tensor reshape(const std::initializer_list<cytnx_int64> &new_shape) const {
1088 return this->reshape(std::vector<cytnx_int64>(new_shape));
1089 }
1090
1092 template <class... Ts>
1093 Tensor reshape(const cytnx_int64 &e1, const Ts &...elems) const {
1094 std::vector<cytnx_int64> argv = dynamic_arg_int64_resolver(e1, elems...);
1095 return this->reshape(argv);
1096 }
1098
1120 Tensor astype(const int &new_type) const {
1121 Tensor out;
1122 out._impl = this->_impl->astype(new_type);
1123 return out;
1124 }
1125
1126 // Tensor diagonal(){
1127 // for(unsigned int i=0;i<this->shape().size();i++){
1128 // if(this->shape()[i] != this->shape()[0],"[ERROR] Tensor.diagonal() can only be called
1129 // when the subject has equal dimension in each rank.%s","\n");
1130 // }
1131 //
1132 // }
1133
1154 template <class T>
1155 T &at(const std::vector<cytnx_uint64> &locator) {
1156 return this->_impl->at<T>(locator);
1157 }
1158
1162 template <class T>
1163 const T &at(const std::vector<cytnx_uint64> &locator) const {
1164 return this->_impl->at<T>(locator);
1165 }
1167 template <class T, class... Ts>
1168 const T &at(const cytnx_uint64 &e1, const Ts &...elems) const {
1169 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1, elems...);
1170 return this->at<T>(argv);
1171 }
1172 template <class T, class... Ts>
1173 T &at(const cytnx_uint64 &e1, const Ts &...elems) {
1174 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1, elems...);
1175 return this->at<T>(argv);
1176 }
1177
1178 const Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) const {
1179 return this->_impl->at(locator);
1180 }
1181
1182 Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) { return this->_impl->at(locator); }
1184
1210 template <class T>
1211 T &item() {
1212 cytnx_error_msg(this->_impl->storage().size() != 1, "[ERROR][Tensor.item<T>]%s",
1213 "item can only be called from a Tensor with only one element\n");
1214 return this->_impl->storage().at<T>(0);
1215 }
1216
1218 template <class T>
1219 const T &item() const {
1220 cytnx_error_msg(this->_impl->storage().size() != 1, "[ERROR][Tensor.item<T>]%s",
1221 "item can only be called from a Tensor with only one element\n");
1222 return this->_impl->storage().at<T>(0);
1223 }
1224
1225 const Scalar::Sproxy item() const {
1226 Scalar::Sproxy out(this->storage()._impl, 0);
1227 return out;
1228 }
1229
1230 Scalar::Sproxy item() {
1231 Scalar::Sproxy out(this->storage()._impl, 0);
1232 return out;
1233 }
1234
1236
1258 Tensor get(const std::vector<cytnx::Accessor> &accessors) const {
1259 Tensor out;
1260 out._impl = this->_impl->get(accessors);
1261 return out;
1262 }
1263
1264 /*
1265 Tensor get_v2(const std::vector<cytnx::Accessor> &accessors) const{
1266 Tensor out;
1267 out._impl = this->_impl->get_v2(accessors);
1268 return out;
1269 }
1270 */
1271
1290 void set(const std::vector<cytnx::Accessor> &accessors, const Tensor &rhs) {
1291 this->_impl->set(accessors, rhs._impl);
1292 }
1293
1312 template <class T>
1313 void set(const std::vector<cytnx::Accessor> &accessors, const T &rc) {
1314 this->_impl->set(accessors, rc);
1315 }
1317 template <class T>
1318 void set(const std::initializer_list<cytnx::Accessor> &accessors, const T &rc) {
1319 std::vector<cytnx::Accessor> args = accessors;
1320 this->set(args, rc);
1321 }
1323
1333 Storage &storage() const { return this->_impl->storage(); }
1334
1349 template <class T>
1350 void fill(const T &val) {
1351 this->_impl->fill(val);
1352 }
1353
1358 bool equiv(const Tensor &rhs) {
1359 if (this->shape() != rhs.shape()) return false;
1360 return true;
1361 }
1362
1371
1380
1381 // Arithmic:
1398 template <class T>
1399 Tensor &operator+=(const T &rc);
1400
1417 template <class T>
1418 Tensor &operator-=(const T &rc);
1419
1436 template <class T>
1437 Tensor &operator*=(const T &rc);
1438
1456 template <class T>
1457 Tensor &operator/=(const T &rc);
1458
1459 // Tensor &operator+=(const Tproxy &rc);
1460 // Tensor &operator-=(const Tproxy &rc);
1461 // Tensor &operator*=(const Tproxy &rc);
1462 // Tensor &operator/=(const Tproxy &rc);
1463 /*
1464 Tensor operator+(const Tproxy &rc){
1465 return *this + Tensor(rc);
1466 }
1467 Tensor operator-(const Tproxy &rc){
1468 return *this - Tensor(rc);
1469 }
1470 Tensor operator*(const Tproxy &rc){
1471 return *this * Tensor(rc);
1472 }
1473 Tensor operator/(const Tproxy &rc){
1474 return *this / Tensor(rc);
1475 }
1476 */
1482 template <class T>
1483 Tensor Add(const T &rhs) {
1484 return *this + rhs;
1485 }
1486
1492 template <class T>
1493 Tensor &Add_(const T &rhs) {
1494 return *this += rhs;
1495 }
1496
1502 template <class T>
1503 Tensor Sub(const T &rhs) {
1504 return *this - rhs;
1505 }
1506
1512 template <class T>
1513 Tensor &Sub_(const T &rhs) {
1514 return *this -= rhs;
1515 }
1516
1522 template <class T>
1523 Tensor Mul(const T &rhs) {
1524 return *this * rhs;
1525 }
1526
1532 template <class T>
1533 Tensor &Mul_(const T &rhs) {
1534 return *this *= rhs;
1535 }
1536
1543 template <class T>
1544 Tensor Div(const T &rhs) {
1545 return *this / rhs;
1546 }
1547
1554 template <class T>
1555 Tensor &Div_(const T &rhs) {
1556 return *this /= rhs;
1557 }
1558
1565 template <class T>
1566 Tensor Cpr(const T &rhs) {
1567 return *this == rhs;
1568 }
1569
1570 // template<class T>
1571 // Tensor& Cpr_(const T &rhs){
1572 //
1573 // return *this == rhs;
1574 // }
1575
1576 template <class T>
1577 Tensor Mod(const T &rhs) {
1578 return *this % rhs;
1579 }
1580
1587 Tensor operator-() { return this->Mul(-1.); }
1588
1596 Tensor flatten() const {
1597 Tensor out = this->clone();
1598 out.contiguous_();
1599 out.reshape_({-1});
1600 return out;
1601 }
1602
1610 void flatten_() {
1611 this->contiguous_();
1612 this->reshape_({-1});
1613 }
1614
1641 void append(const Tensor &rhs) {
1642 // Tensor in;
1643 if (!this->is_contiguous()) this->contiguous_();
1644
1645 // check Tensor in shape:
1646 cytnx_error_msg(rhs.shape().size() == 0 || this->shape().size() == 0,
1647 "[ERROR] try to append a null Tensor.%s", "\n");
1648 cytnx_error_msg(rhs.shape().size() != (this->shape().size() - 1),
1649 "[ERROR] try to append a Tensor with rank not match.%s", "\n");
1650 cytnx_uint64 Nelem = 1;
1651 for (unsigned int i = 0; i < rhs.shape().size(); i++) {
1652 cytnx_error_msg(rhs.shape()[i] != this->shape()[i + 1],
1653 "[ERROR] dimension mismatch @ rhs.rank: [%d] this: [%d] rhs: [%d]\n", i,
1654 this->shape()[i + 1], rhs.shape()[i]);
1655 Nelem *= rhs.shape()[i];
1656 }
1657
1658 // check type:
1659 Tensor in;
1660 if (rhs.dtype() != this->dtype()) {
1661 in = rhs.astype(this->dtype());
1662 if (!in.is_contiguous()) in.contiguous_();
1663 } else {
1664 if (!in.is_contiguous())
1665 in = rhs.contiguous();
1666 else
1667 in = rhs;
1668 }
1669 this->_impl->_shape[0] += 1;
1670 cytnx_uint64 oldsize = this->_impl->_storage.size();
1671 this->_impl->_storage.resize(oldsize + Nelem);
1672 memcpy(((char *)this->_impl->_storage.data()) +
1673 oldsize * Type.typeSize(this->dtype()) / sizeof(char),
1674 in._impl->_storage.data(), Type.typeSize(in.dtype()) * Nelem);
1675 }
1703 void append(const Storage &srhs) {
1704 if (!this->is_contiguous()) this->contiguous_();
1705
1706 // check Tensor in shape:
1707 cytnx_error_msg(srhs.size() == 0 || this->shape().size() == 0,
1708 "[ERROR] try to append a null Tensor.%s", "\n");
1709 cytnx_error_msg((this->shape().size() - 1) != 1,
1710 "[ERROR] append a storage to Tensor can only accept rank-2 Tensor.%s", "\n");
1711 cytnx_error_msg(this->shape().back() != srhs.size(), "[ERROR] Tensor dmension mismatch!%s",
1712 "\n");
1713
1714 // check type:
1715 Storage in;
1716 if (srhs.dtype() != this->dtype()) {
1717 in = srhs.astype(this->dtype());
1718 } else {
1719 in = srhs;
1720 }
1721 this->_impl->_shape[0] += 1;
1722 cytnx_uint64 oldsize = this->_impl->_storage.size();
1723 this->_impl->_storage.resize(oldsize + in.size());
1724 memcpy(((char *)this->_impl->_storage.data()) +
1725 oldsize * Type.typeSize(this->dtype()) / sizeof(char),
1726 in._impl->Mem, Type.typeSize(in.dtype()) * in.size());
1727 }
1728 /*
1729 void append(const Tensor &rhs){
1730 // convert to the same type.
1731 Tensor in;
1732 if(rhs.dtype() != this->dtype()){
1733 in = rhs.astype(this->dtype());
1734 }else{
1735 in = rhs;
1736 }
1737
1738 // 1) check rank
1739 if(this->shape().size()==1){
1740 // check if rhs is a scalar tensor (only one element)
1741 cytnx_error_msg(!(rhs.shape().size()==1 && rhs.shape()[0]==1),"[ERROR] trying to append
1742 a scalar into multidimentional Tensor is not allow.\n Only rank-1 Tensor can accept scalar
1743 append.%s","\n"); this->_impl->_shape[0]+=1; this->_impl->_storage.append(0);
1744
1745 }else{
1746 cytnx_error_msg(rhs.shape().size() != this->shape().size()-1,"[ERROR] try to append a
1747 Tensor with rank not match.%s","\n");
1748
1749 }
1750 cytnx_error_msg(!this->is_contiguous(),"[ERROR] append require the Tensor to be contiguous.
1751 suggestion: call contiguous() or contiguous_() first.","\n");
1752 }
1753 */
1765 template <class T>
1766 void append(const T &rhs) {
1767 cytnx_error_msg(this->shape().size() != 1,
1768 "[ERROR] trying to append a scalar into multidimentional Tensor is not "
1769 "allow.\n Only rank-1 Tensor can accept scalar append.%s",
1770 "\n");
1772 "[ERROR] append require the Tensor to be contiguous. suggestion: call "
1773 "contiguous() or contiguous_() first.",
1774 "\n");
1775 this->_impl->_shape[0] += 1;
1776 this->_impl->_storage.append(rhs);
1777 }
1778
1787 bool same_data(const Tensor &rhs) const;
1788
1789 // linalg:
1795 std::vector<Tensor> Svd(const bool &is_UvT = true) const;
1796
1802 std::vector<Tensor> Eigh(const bool &is_V = true, const bool &row_v = false) const;
1803
1809
1814 Tensor InvM() const;
1815
1820 Tensor &Inv_(const double &clip);
1821
1826 Tensor Inv(const double &clip) const;
1827
1833
1838 Tensor Conj() const;
1839
1845
1850 Tensor Exp() const;
1851
1856 Tensor Norm() const;
1857
1862 Tensor Pow(const cytnx_double &p) const;
1863
1869
1874 Tensor Trace(const cytnx_uint64 &a = 0, const cytnx_uint64 &b = 1) const;
1875
1880 Tensor Abs() const;
1881
1887
1892 Tensor Max() const;
1893
1898 Tensor Min() const;
1899
1900 }; // class Tensor
1901
1902 Tensor operator+(const Tensor &lhs, const Tensor::Tproxy &rhs);
1903 Tensor operator-(const Tensor &lhs, const Tensor::Tproxy &rhs);
1904 Tensor operator*(const Tensor &lhs, const Tensor::Tproxy &rhs);
1905 Tensor operator/(const Tensor &lhs, const Tensor::Tproxy &rhs);
1906
1907 Tensor operator+(const Tensor &lhs, const Scalar::Sproxy &rhs);
1908 Tensor operator-(const Tensor &lhs, const Scalar::Sproxy &rhs);
1909 Tensor operator*(const Tensor &lhs, const Scalar::Sproxy &rhs);
1910 Tensor operator/(const Tensor &lhs, const Scalar::Sproxy &rhs);
1911
1913 std::ostream &operator<<(std::ostream &os, const Tensor &in);
1914 std::ostream &operator<<(std::ostream &os, const Tensor::Tproxy &in);
1916 //{ os << Tensor(in);};
1917} // namespace cytnx
1918
1919#endif
object that mimic the python slice to access elements in C++ [this is for c++ API only].
Definition Accessor.hpp:16
an memeory storage with multi-type/multi-device support
Definition Storage.hpp:1051
const unsigned int & dtype() const
the dtype-id of current Storage, see cytnx::Type for more details.
Definition Storage.hpp:1241
Storage astype(const unsigned int &new_type) const
cast the type of current Storage
Definition Storage.hpp:1235
const unsigned long long & size() const
the size ( no. of elements ) in the Storage
Definition Storage.hpp:1357
an tensor (multi-dimensional array)
Definition Tensor.hpp:345
void append(const Storage &srhs)
the append function of the Storage.
Definition Tensor.hpp:1703
Tensor & operator*=(const T &rc)
multiplication assignment operator with a Tensor or a scalar.
Tensor & Inv_(const double &clip)
the Inv_ member function. Same as cytnx::linalg::Inv_(Tensor &Tin, const double &clip)
Tensor & operator/=(const T &rc)
division assignment operator with a Tensor or a scalar.
Tensor operator-()
The negation function.
Definition Tensor.hpp:1587
void fill(const T &val)
fill all the element of current Tensor with the value.
Definition Tensor.hpp:1350
Tensor InvM() const
the InvM member function. Same as cytnx::linalg::InvM(const Tensor &Tin), where Tin is the current Te...
bool same_data(const Tensor &rhs) const
Check whether two tensors share the same internal memory.
void to_(const int &device)
move the current Tensor to the device.
Definition Tensor.hpp:912
Tensor reshape(const std::vector< cytnx_uint64 > &new_shape) const
Definition Tensor.hpp:1076
Tensor(const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1, const bool &init_zero=1)
Construct a new Tensor object.
Definition Tensor.hpp:777
void append(const T &rhs)
the append function of the scalar.
Definition Tensor.hpp:1766
Tensor & operator-=(const T &rc)
subtraction assignment operator with a Tensor or a scalar.
Tensor & Add_(const T &rhs)
Addition function with a Tensor or a scalar, inplacely. Same as operator+=(const T &rhs).
Definition Tensor.hpp:1493
Tensor Abs() const
the Abs member function. Same as linalg::Abs(const Tensor &Tin), where Tin is the current Tensor.
Tensor reshape(const std::initializer_list< cytnx_int64 > &new_shape) const
Definition Tensor.hpp:1087
std::string device_str() const
the device (in string) of the Tensor
Definition Tensor.hpp:832
void reshape_(const std::vector< cytnx_int64 > &new_shape)
reshape the Tensor, inplacely
Definition Tensor.hpp:1025
Tensor contiguous_()
Make the Tensor contiguous by coalescing the memory (storage), inplacely.
Definition Tensor.hpp:999
Tensor permute_(const std::vector< cytnx_uint64 > &rnks)
Definition Tensor.hpp:920
Tensor Mul(const T &rhs)
Multiplication function with a Tensor or a scalar. Same as cytnx::operator*(const Tensor &self,...
Definition Tensor.hpp:1523
unsigned int dtype() const
the dtype-id of the Tensor
Definition Tensor.hpp:811
Tensor Sub(const T &rhs)
Subtraction function with a Tensor or a scalar. Same as cytnx::operator-(const Tensor &self,...
Definition Tensor.hpp:1503
Tensor Inv(const double &clip) const
the Inv member function. Same as cytnx::linalg::Inv(const Tensor &Tin, const double &clip)
Tensor contiguous() const
Make the Tensor contiguous by coalescing the memory (storage).
Definition Tensor.hpp:979
void Tofile(const std::string &fname) const
Save current Tensor to the binary file.
T & at(const std::vector< cytnx_uint64 > &locator)
Get an element at specific location.
Definition Tensor.hpp:1155
Tensor reshape(const std::vector< cytnx_int64 > &new_shape) const
return a new Tensor that is reshaped.
Definition Tensor.hpp:1067
static Tensor Fromfile(const std::string &fname, const unsigned int &dtype, const cytnx_int64 &count=-1)
Load current Tensor from the binary file.
T & item()
get the element from a rank-0 Tensor.
Definition Tensor.hpp:1211
Tensor clone() const
return a clone of the current Tensor.
Definition Tensor.hpp:863
std::vector< Tensor > Eigh(const bool &is_V=true, const bool &row_v=false) const
the Eigh member function. Same as cytnx::linalg::Eigh(const Tensor &Tin, const bool &is_V,...
void Tofile(const char *fname) const
void append(const Tensor &rhs)
the append function.
Definition Tensor.hpp:1641
static Tensor Load(const char *fname)
void Save(const char *fname) const
void set(const std::vector< cytnx::Accessor > &accessors, const Tensor &rhs)
set elements with the input Tensor using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1290
static Tensor Fromfile(const char *fname, const unsigned int &dtype, const cytnx_int64 &count=-1)
Tensor Norm() const
the Norm member function. Same as linalg::Norm(const Tensor &Tin), where Tin is the current Tensor.
Tensor astype(const int &new_type) const
return a new Tensor that cast to different dtype.
Definition Tensor.hpp:1120
Tensor & Div_(const T &rhs)
Division function with a Tensor or a scalar, inplacely. Same as operator/=(const T &rhs).
Definition Tensor.hpp:1555
static Tensor Load(const std::string &fname)
Load current Tensor from file.
Tensor & operator+=(const T &rc)
addition assignment operator with a Tensor or a scalar.
Tensor Conj() const
the Conj member function. Same as cytnx::linalg::Conj(const Tensor &Tin), where Tin is the current Te...
Tensor Trace(const cytnx_uint64 &a=0, const cytnx_uint64 &b=1) const
the Trace member function. Same as linalg::Trace(const Tensor &Tin, const cytnx_uint64 &a,...
Tensor & Pow_(const cytnx_double &p)
the Pow_ member function. Same as linalg::Pow_(Tensor &Tin, const cytnx_double &p),...
std::vector< Tensor > Svd(const bool &is_UvT=true) const
the SVD member function. Same as cytnx::linalg::Svd(const Tensor &Tin, const bool &is_UvT) ,...
std::string dtype_str() const
the dtype (in string) of the Tensor
Definition Tensor.hpp:825
Tensor & Mul_(const T &rhs)
Multiplication function with a Tensor or a scalar, inplacely. Same as operator*=(const T &rhs).
Definition Tensor.hpp:1533
cytnx_uint64 rank() const
the rank of the Tensor
Definition Tensor.hpp:844
const bool & is_contiguous() const
return whether the Tensor is contiguous or not.
Definition Tensor.hpp:918
Tensor Exp() const
the Exp member function. Same as linalg::Exp(const Tensor &Tin), where Tin is the current Tensor.
Tensor & Abs_()
the Abs_ member function. Same as linalg::Abs_(Tensor &Tin), where Tin is the current Tensor.
Tensor Add(const T &rhs)
Addition function with a Tensor or a scalar. Same as cytnx::operator+(const Tensor &self,...
Definition Tensor.hpp:1483
void flatten_()
The flatten function, inplacely.
Definition Tensor.hpp:1610
void Save(const std::string &fname) const
Save current Tensor to file.
Tensor flatten() const
The flatten function.
Definition Tensor.hpp:1596
Tensor & Conj_()
the Conj_ member function. Same as cytnx::linalg::Conj_(Tensor &Tin), where Tin is the current Tensor...
Tensor Pow(const cytnx_double &p) const
the Pow member function. Same as linalg::Pow(const Tensor &Tin, const cytnx_double &p),...
int device() const
the device-id of the Tensor
Definition Tensor.hpp:818
Tensor real()
return the real part of the tensor.
Tensor imag()
return the imaginary part of the tensor.
Tensor to(const int &device) const
copy a tensor to new device
Definition Tensor.hpp:889
void Tofile(std::fstream &f) const
Tensor get(const std::vector< cytnx::Accessor > &accessors) const
get elements using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1258
void set(const std::vector< cytnx::Accessor > &accessors, const T &rc)
set elements with the input constant using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1313
Tensor Max() const
the Max member function. Same as linalg::Max(const Tensor &Tin), where Tin is the current Tensor.
Tensor permute(const std::vector< cytnx_uint64 > &rnks) const
perform tensor permute on the cytnx::Tensor and return a new instance.
Definition Tensor.hpp:951
Tensor Div(const T &rhs)
Division function with a Tensor or a scalar. Same as cytnx::operator/(const Tensor &self,...
Definition Tensor.hpp:1544
Tensor Mod(const T &rhs)
Definition Tensor.hpp:1577
bool equiv(const Tensor &rhs)
compare the shape of two tensors.
Definition Tensor.hpp:1358
void Init(const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1, const bool &init_zero=true)
initialize a Tensor
Definition Tensor.hpp:747
Tensor Cpr(const T &rhs)
The comparison function.
Definition Tensor.hpp:1566
Tensor & Exp_()
the Exp_ member function. Same as linalg::Exp_(Tensor &Tin), where Tin is the current Tensor.
Tensor & InvM_()
the InvM_ member function. Same as cytnx::linalg::InvM_(Tensor &Tin), where Tin is the current Tensor...
const std::vector< cytnx_uint64 > & shape() const
the shape of the Tensor
Definition Tensor.hpp:838
Tensor Min() const
the Min member function. Same as linalg::Min(const Tensor &Tin), where Tin is the current Tensor.
const T & at(const std::vector< cytnx_uint64 > &locator) const
Definition Tensor.hpp:1163
Storage & storage() const
return the storage of current Tensor.
Definition Tensor.hpp:1333
static Tensor from_storage(const Storage &in)
Convert a Storage to Tensor.
Definition Tensor.hpp:798
Tensor & Sub_(const T &rhs)
Subtraction function with a Tensor or a scalar, inplacely. Same as operator-=(const T &rhs).
Definition Tensor.hpp:1513
#define cytnx_error_msg(is_true, format,...)
Definition cytnx_error.hpp:16
Definition Accessor.hpp:12
Device_class Device
data on which devices.
cytnx::UniTensor operator*(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
The multiplication operator between two UniTensor.
double cytnx_double
Definition Type.hpp:43
uint32_t cytnx_uint32
Definition Type.hpp:46
bool cytnx_bool
Definition Type.hpp:54
std::complex< double > cytnx_complex128
Definition Type.hpp:53
float cytnx_float
Definition Type.hpp:44
int16_t cytnx_int16
Definition Type.hpp:50
std::complex< float > cytnx_complex64
Definition Type.hpp:52
cytnx::UniTensor operator-(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
The subtraction operator between two UniTensor.
int32_t cytnx_int32
Definition Type.hpp:49
uint16_t cytnx_uint16
Definition Type.hpp:47
uint64_t cytnx_uint64
Definition Type.hpp:45
int64_t cytnx_int64
Definition Type.hpp:48
Type_class Type
data type
cytnx::UniTensor operator+(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
The addtion operator between two UniTensor.
cytnx::UniTensor operator/(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
The division operator between two UniTensor.