Cytnx v0.9.4
Loading...
Searching...
No Matches
Tensor.old.hpp
Go to the documentation of this file.
1#ifndef _H_Tensor_
2#define _H_Tensor_
3
4#include "Type.hpp"
5#include "cytnx_error.hpp"
6#include "backend/Storage.hpp"
7#include "Device.hpp"
9#include <iostream>
10#include <fstream>
11#include "utils/vec_range.hpp"
12#include "utils/vec_cast.hpp"
13#include "utils/dynamic_arg_resolver.hpp"
14// #include "linalg.hpp"
15#include "Accessor.hpp"
16#include <utility>
17#include <vector>
18#include <initializer_list>
19#include <string>
20#include "backend/Scalar.hpp"
21
22namespace cytnx {
23
25 // real implementation
26 class Tensor_impl : public intrusive_ptr_base<Tensor_impl> {
27 private:
28 // Interface:
29 Storage_init_interface __SII;
30
31 // Memory:
32 Storage _storage;
33
34 // tensor shape
35 std::vector<cytnx_uint64> _shape;
36
37 // pseudo-perm info
38 std::vector<cytnx_uint64> _mapper;
39 std::vector<cytnx_uint64> _invmapper;
40 bool _contiguous;
41
42 public:
43 friend class Tensor;
44 boost::intrusive_ptr<Tensor_impl> _clone_meta_only() const {
45 boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
46 out->_mapper = this->_mapper;
47 out->_invmapper = this->_invmapper;
48 out->_shape = this->_shape;
49 out->_contiguous = this->_contiguous;
50 return out;
51 }
52 Tensor_impl() : _contiguous(true){};
53
54 void Init(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype = Type.Double,
55 int device = -1, const bool &init_zero = true);
56 void Init(const Storage &in);
57 // void Init(const Storage &in, const std::vector<cytnx_uint64> &shape,
58 // const unsigned int &dtype, int device);
59 /*
60 template<class T>
61 void From_vec(const T &ndvec){
62 cytnx_error_msg(std::string(typeid(T).name()).find("vector") ==
63 std::string::npos,"[ERROR][Tensor][From_vec] the input argument should be a nd vector.%s","\n");
64 //dispatch the rank!:
65
66
67
68 }
69 */
70 // clone&assignment constr., use intrusive_ptr's
71 Tensor_impl(const Tensor_impl &rhs);
72 Tensor_impl &operator=(const Tensor_impl &rhs); // add const
73
74 unsigned int dtype() const { return this->_storage.dtype(); }
75 int device() const { return this->_storage.device(); }
76
77 std::string dtype_str() const { return Type.getname(this->_storage.dtype()); }
78 std::string device_str() const { return Device.getname(this->_storage.device()); }
79
80 const std::vector<cytnx_uint64> &shape() const { return _shape; }
81
82 const bool &is_contiguous() const { return this->_contiguous; }
83
84 const std::vector<cytnx_uint64> &mapper() const { return this->_mapper; }
85 const std::vector<cytnx_uint64> &invmapper() const { return this->_invmapper; }
86 Storage &storage() { return _storage; }
87
88 const Storage &storage() const { return _storage; }
89
90 boost::intrusive_ptr<Tensor_impl> clone() const {
91 boost::intrusive_ptr<Tensor_impl> out = this->_clone_meta_only();
92 out->_storage = this->_storage.clone();
93 return out;
94 }
95
96 void to_(const int &device) { this->_storage.to_(device); }
97 boost::intrusive_ptr<Tensor_impl> to(const int &device) {
98 if (this->device() == device) {
99 // boost::intrusive_ptr<Tensor_impl> out(this);
100 return this;
101 } else {
102 boost::intrusive_ptr<Tensor_impl> out = this->_clone_meta_only();
103 out->_storage = this->_storage.to(device);
104 return out;
105 }
106 }
107
108 void permute_(const std::vector<cytnx_uint64> &rnks);
109
110 boost::intrusive_ptr<Tensor_impl> permute(const std::vector<cytnx_uint64> &rnks);
111
112 template <class T>
113 T &at(const std::vector<cytnx_uint64> &locator) const {
114 cytnx_error_msg(locator.size() != this->_shape.size(), "%s",
115 "The input index does not match Tensor's rank.");
116
117 cytnx_uint64 RealRank, mtplyr;
118 // std::vector<cytnx_uint64> c_shape(this->_shape.size());
119 // std::vector<cytnx_uint64> c_loc(this->_shape.size());
120 cytnx_uint64 c_shape, c_loc;
121
122 RealRank = 0;
123 mtplyr = 1;
124
125 for (cytnx_int64 i = this->_shape.size() - 1; i >= 0; i--) {
126 if (locator[i] >= this->_shape[i]) {
127 cytnx_error_msg(true, "%s", "Attempting to access out-of-bound index in Tensor.");
128 }
129 // c_shape[i] = this->_shape[this->_invmapper[i]];
130 // c_loc[i] = locator[this->_invmapper[i]];
131 c_shape = this->_shape[this->_invmapper[i]];
132 c_loc = locator[this->_invmapper[i]];
133 RealRank += mtplyr * c_loc;
134 mtplyr *= c_shape;
135 }
136 return this->_storage.at<T>(RealRank);
137 }
138
139 const Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) const {
140 cytnx_error_msg(locator.size() != this->_shape.size(), "%s",
141 "The input index does not match Tensor's rank.");
142
143 cytnx_uint64 RealRank, mtplyr;
144 // std::vector<cytnx_uint64> c_shape(this->_shape.size());
145 // std::vector<cytnx_uint64> c_loc(this->_shape.size());
146
147 cytnx_uint64 c_shape, c_loc;
148 RealRank = 0;
149 mtplyr = 1;
150
151 for (cytnx_int64 i = this->_shape.size() - 1; i >= 0; i--) {
152 if (locator[i] >= this->_shape[i]) {
153 cytnx_error_msg(true, "%s", "Attempting to access out-of-bound index in Tensor.");
154 }
155 // c_shape[i] = this->_shape[this->_invmapper[i]];
156 // c_loc[i] = locator[this->_invmapper[i]];
157 c_shape = this->_shape[this->_invmapper[i]];
158 c_loc = locator[this->_invmapper[i]];
159 RealRank += mtplyr * c_loc;
160 mtplyr *= c_shape;
161 }
162 return this->_storage.at(RealRank);
163 }
164
165 Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) {
166 cytnx_error_msg(locator.size() != this->_shape.size(), "%s",
167 "The input index does not match Tensor's rank.");
168
169 cytnx_uint64 RealRank, mtplyr;
170 // std::vector<cytnx_uint64> c_shape(this->_shape.size());
171 // std::vector<cytnx_uint64> c_loc(this->_shape.size());
172 cytnx_uint64 c_shape, c_loc;
173
174 RealRank = 0;
175 mtplyr = 1;
176
177 for (cytnx_int64 i = this->_shape.size() - 1; i >= 0; i--) {
178 if (locator[i] >= this->_shape[i]) {
179 cytnx_error_msg(true, "%s", "Attempting to access out-of-bound index in Tensor.");
180 }
181 // c_shape[i] = this->_shape[this->_invmapper[i]];
182 // c_loc[i] = locator[this->_invmapper[i]];
183 c_shape = this->_shape[this->_invmapper[i]];
184 c_loc = locator[this->_invmapper[i]];
185 RealRank += mtplyr * c_loc;
186 mtplyr *= c_shape;
187 }
188 return this->_storage.at(RealRank);
189 }
190
191 boost::intrusive_ptr<Tensor_impl> get(const std::vector<cytnx::Accessor> &accessors);
192 boost::intrusive_ptr<Tensor_impl> get_deprecated(const std::vector<cytnx::Accessor> &accessors);
193 void set(const std::vector<cytnx::Accessor> &accessors,
194 const boost::intrusive_ptr<Tensor_impl> &rhs);
195
196 template <class T>
197 void set(const std::vector<cytnx::Accessor> &accessors, const T &rc);
198
199 void set(const std::vector<cytnx::Accessor> &accessors, const Scalar::Sproxy &rc);
200
201 template <class Tx>
202 void fill(const Tx &val) {
203 this->storage().fill(val);
204 }
205
206 boost::intrusive_ptr<Tensor_impl> contiguous() {
207 // return new instance if act on non-contiguous tensor
208 // return self if act on contiguous tensor
209 if (this->_contiguous) {
210 boost::intrusive_ptr<Tensor_impl> out(this);
211 // out->_storage = this->_storage;
212 return out;
213 } else {
214 boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
215 std::vector<cytnx_uint64> oldshape(this->_shape.size());
216 for (cytnx_uint64 i = 0; i < this->_shape.size(); i++) {
217 oldshape[i] = this->_shape[this->_invmapper[i]];
218 }
219
220 out->_storage._impl =
221 this->_storage._impl->Move_memory(oldshape, this->_mapper, this->_invmapper);
222 // this->_storage._impl->Move_memory_(oldshape, this->_mapper, this->_invmapper);
223 // out->_storage._impl = this->_storage._impl;
224 // std::cout << out->_storage << std::endl;
225 out->_invmapper = vec_range(this->_invmapper.size());
226 out->_mapper = out->_invmapper;
227 out->_shape = this->_shape;
228 out->_contiguous = true;
229 return out;
230 }
231 }
232
233 void contiguous_() {
234 // return new instance if act on non-contiguous tensor
235 // return self if act on contiguous tensor
236 if (!this->_contiguous) {
237 std::vector<cytnx_uint64> oldshape(this->_shape.size());
238 for (cytnx_uint64 i = 0; i < this->_shape.size(); i++) {
239 oldshape[i] = this->_shape[this->_invmapper[i]];
240 }
241
242 this->_storage._impl =
243 this->_storage._impl->Move_memory(oldshape, this->_mapper, this->_invmapper);
244 // this->_storage._impl->Move_memory_(oldshape, this->_mapper, this->_invmapper);
245 // this->_mapper = vec_range(this->_invmapper.size());
246 vec_range_(this->_mapper, this->invmapper().size());
247 this->_invmapper = this->_mapper;
248 this->_contiguous = true;
249 }
250 }
251
252 void reshape_(const std::vector<cytnx_int64> &new_shape) {
253 if (!this->_contiguous) {
254 this->contiguous_();
255 }
256 // std::vector<cytnx_uint64> result_shape(new_shape.size());
257 cytnx_uint64 new_N = 1;
258 bool has_undetermine = false;
259 unsigned int Udet_id = 0;
260 // this->_shape = vec_cast<cytnx_int64,cytnx_uint64>(new_shape);
261 this->_shape.resize(new_shape.size());
262 for (cytnx_uint64 i = 0; i < new_shape.size(); i++) {
263 this->_shape[i] = new_shape[i];
264 }
265 for (int i = 0; i < new_shape.size(); i++) {
266 if (new_shape[i] < 0) {
267 if (new_shape[i] != -1)
269 new_shape[i] != -1, "%s",
270 "[ERROR] reshape can only have dimension > 0 and one undetermine rank specify as -1");
271 if (has_undetermine)
273 new_shape[i] != -1, "%s",
274 "[ERROR] reshape can only have dimension > 0 and one undetermine rank specify as -1");
275 Udet_id = i;
276 has_undetermine = true;
277 } else {
278 new_N *= new_shape[i];
279 // result_shape[i] = new_shape[i];
280 }
281 }
282
283 if (has_undetermine) {
284 cytnx_error_msg(new_N > this->_storage.size(), "%s",
285 "[ERROR] new shape exceed the total number of elements.");
286 cytnx_error_msg(this->_storage.size() % new_N, "%s",
287 "[ERROR] unmatch size when reshape with undetermine dimension");
288 // result_shape[Udet_id] = this->_storage.size() / new_N;
289 this->_shape[Udet_id] = this->_storage.size() / new_N;
290 } else {
291 cytnx_error_msg(new_N != this->_storage.size(), "%s",
292 "[ERROR] new shape does not match the number of elements.");
293 }
294
295 // this->_shape = result_shape;
296 // this->_mapper = std::move(vec_range(new_shape.size()));
297 this->_mapper.resize(new_shape.size());
298 vec_range_(this->_mapper, new_shape.size());
299 this->_invmapper = this->_mapper;
300 }
301
302 boost::intrusive_ptr<Tensor_impl> reshape(const std::vector<cytnx_int64> &new_shape) {
303 boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
304 if (this->is_contiguous()) {
305 out = this->_clone_meta_only();
306 out->_storage = this->_storage;
307 } else {
308 out = this->contiguous();
309 }
310 // out = this->clone();
311
312 out->reshape_(new_shape);
313 return out;
314 }
315
316 boost::intrusive_ptr<Tensor_impl> astype(const int &new_type) {
317 // boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
318 // out->_storage = this->_storage.astype(new_type);
319 if (this->dtype() == new_type) {
320 return this;
321 } else {
322 boost::intrusive_ptr<Tensor_impl> out = this->_clone_meta_only();
323 out->_storage = this->_storage.astype(new_type);
324 return out;
325 }
326 }
327 };
329
330 class Tensor;
331
333 // [Note] these are fwd from linalg.hpp
334 template <class T>
335 Tensor operator+(const Tensor &lhs, const T &rc);
336 template <class T>
337 Tensor operator-(const Tensor &lhs, const T &rhs);
338 template <class T>
339 Tensor operator*(const Tensor &lhs, const T &rhs);
340 template <class T>
341 Tensor operator/(const Tensor &lhs, const T &rhs);
343
345 class Tensor {
346 private:
347 public:
349 // this is a proxy class to allow get/set element using [] as python!
350 struct Tproxy {
351 boost::intrusive_ptr<Tensor_impl> _insimpl;
352 std::vector<cytnx::Accessor> _accs;
353 Tproxy(boost::intrusive_ptr<Tensor_impl> _ptr, const std::vector<cytnx::Accessor> &accs)
354 : _insimpl(std::move(_ptr)), _accs(accs) {}
355
356 // when used to set elems:
357 const Tensor &operator=(const Tensor &rhs) {
358 this->_insimpl->set(_accs, rhs._impl);
359 return rhs;
360 }
361
362 template <class T>
363 const T &operator=(const T &rc) {
364 this->_insimpl->set(_accs, rc);
365 return rc;
366 }
367 const Tproxy &operator=(const Tproxy &rc) {
368 Tensor tmp = Tensor(rc);
369 this->_insimpl->set(_accs, tmp._impl);
370 return rc;
371 }
372
373 template <class T>
374 Tensor operator+=(const T &rc) {
375 Tensor self;
376 self._impl = _insimpl->get(_accs);
377 self += rc;
378 _insimpl->set(_accs, self._impl);
379 self._impl = this->_insimpl;
380 return self;
381 }
382 Tensor operator+=(const Tproxy &rc);
383
384 template <class T>
385 Tensor operator-=(const T &rc) {
386 Tensor self;
387 self._impl = _insimpl->get(_accs);
388 self -= rc;
389 _insimpl->set(_accs, self._impl);
390 self._impl = this->_insimpl;
391 return self;
392 }
393 Tensor operator-=(const Tproxy &rc);
394
395 template <class T>
396 Tensor operator/=(const T &rc) {
397 Tensor self;
398 self._impl = _insimpl->get(_accs);
399 self /= rc;
400 _insimpl->set(_accs, self._impl);
401 self._impl = this->_insimpl;
402 return self;
403 }
404 Tensor operator/=(const Tproxy &rc);
405
406 template <class T>
407 Tensor operator*=(const T &rc) {
408 Tensor self;
409 self._impl = _insimpl->get(_accs);
410 self *= rc;
411 _insimpl->set(_accs, self._impl);
412 self._impl = this->_insimpl;
413 return self;
414 }
415 Tensor operator*=(const Tproxy &rc);
416
417 // alias to resolve conflict with op ovld for rc=Tensor
418 /*
419 template<class T>
420 Tensor _operatorADD(const T &rc) const{
421 Tensor out;
422 out._impl = _insimpl->get(_accs);
423 return out.Add(rc);
424 }
425 */
426 Tensor operator+(const cytnx_complex128 &rc) const; //{return this->_operatorADD(rc);};
427 Tensor operator+(const cytnx_complex64 &rc) const; //{return this->_operatorADD(rc);};
428 Tensor operator+(const cytnx_double &rc) const; //{return this->_operatorADD(rc);};
429 Tensor operator+(const cytnx_float &rc) const; //{return this->_operatorADD(rc);};
430 Tensor operator+(const cytnx_uint64 &rc) const; //{return this->_operatorADD(rc);};
431 Tensor operator+(const cytnx_int64 &rc) const; //{return this->_operatorADD(rc);};
432 Tensor operator+(const cytnx_uint32 &rc) const; //{return this->_operatorADD(rc);};
433 Tensor operator+(const cytnx_int32 &rc) const; //{return this->_operatorADD(rc);};
434 Tensor operator+(const cytnx_uint16 &rc) const; //{return this->_operatorADD(rc);};
435 Tensor operator+(const cytnx_int16 &rc) const; //{return this->_operatorADD(rc);};
436 Tensor operator+(const cytnx_bool &rc) const; //{return this->_operatorADD(rc);};
437 Tensor operator+(const Tproxy &rc) const;
438
439 /*
440 template<class T>
441 Tensor _operatorSUB(const T &rc) const{
442 Tensor out;
443 out._impl = _insimpl->get(_accs);
444 return out.Sub(rc);
445 }
446 */
447 Tensor operator-(const cytnx_complex128 &rc) const; //{return this->_operatorSUB(rc);};
448 Tensor operator-(const cytnx_complex64 &rc) const; //{return this->_operatorSUB(rc);};
449 Tensor operator-(const cytnx_double &rc) const; //{return this->_operatorSUB(rc);};
450 Tensor operator-(const cytnx_float &rc) const; //{return this->_operatorSUB(rc);};
451 Tensor operator-(const cytnx_uint64 &rc) const; //{return this->_operatorSUB(rc);};
452 Tensor operator-(const cytnx_int64 &rc) const; //{return this->_operatorSUB(rc);};
453 Tensor operator-(const cytnx_uint32 &rc) const; //{return this->_operatorSUB(rc);};
454 Tensor operator-(const cytnx_int32 &rc) const; //{return this->_operatorSUB(rc);};
455 Tensor operator-(const cytnx_uint16 &rc) const; //{return this->_operatorSUB(rc);};
456 Tensor operator-(const cytnx_int16 &rc) const; //{return this->_operatorSUB(rc);};
457 Tensor operator-(const cytnx_bool &rc) const; //{return this->_operatorSUB(rc);};
458 Tensor operator-(const Tproxy &rc) const;
459
460 Tensor operator-() const;
461
462 /*
463 template<class T>
464 Tensor _operatorMUL(const T &rc) const{
465 Tensor out;
466 out._impl = _insimpl->get(_accs);
467 return out.Mul(rc);
468 }
469 */
470 Tensor operator*(const cytnx_complex128 &rc) const; //{return this->_operatorMUL(rc);};
471 Tensor operator*(const cytnx_complex64 &rc) const; //{return this->_operatorMUL(rc);};
472 Tensor operator*(const cytnx_double &rc) const; //{return this->_operatorMUL(rc);};
473 Tensor operator*(const cytnx_float &rc) const; //{return this->_operatorMUL(rc);};
474 Tensor operator*(const cytnx_uint64 &rc) const; //{return this->_operatorMUL(rc);};
475 Tensor operator*(const cytnx_int64 &rc) const; //{return this->_operatorMUL(rc);};
476 Tensor operator*(const cytnx_uint32 &rc) const; //{return this->_operatorMUL(rc);};
477 Tensor operator*(const cytnx_int32 &rc) const; //{return this->_operatorMUL(rc);};
478 Tensor operator*(const cytnx_uint16 &rc) const; //{return this->_operatorMUL(rc);};
479 Tensor operator*(const cytnx_int16 &rc) const; //{return this->_operatorMUL(rc);};
480 Tensor operator*(const cytnx_bool &rc) const; //{return this->_operatorMUL(rc);};
481 Tensor operator*(const Tproxy &rc) const;
482
483 /*
484 template<class T>
485 Tensor _operatorDIV(const T &rc) const{
486 Tensor out;
487 out._impl = _insimpl->get(_accs);
488 return out.Div(rc);
489 }
490 */
491 Tensor operator/(const cytnx_complex128 &rc) const; //{return this->_operatorDIV(rc);};
492 Tensor operator/(const cytnx_complex64 &rc) const; //{return this->_operatorDIV(rc);};
493 Tensor operator/(const cytnx_double &rc) const; //{return this->_operatorDIV(rc);};
494 Tensor operator/(const cytnx_float &rc) const; //{return this->_operatorDIV(rc);};
495 Tensor operator/(const cytnx_uint64 &rc) const; //{return this->_operatorDIV(rc);};
496 Tensor operator/(const cytnx_int64 &rc) const; //{return this->_operatorDIV(rc);};
497 Tensor operator/(const cytnx_uint32 &rc) const; //{return this->_operatorDIV(rc);};
498 Tensor operator/(const cytnx_int32 &rc) const; //{return this->_operatorDIV(rc);};
499 Tensor operator/(const cytnx_uint16 &rc) const; //{return this->_operatorDIV(rc);};
500 Tensor operator/(const cytnx_int16 &rc) const; //{return this->_operatorDIV(rc);};
501 Tensor operator/(const cytnx_bool &rc) const; //{return this->_operatorDIV(rc);};
502 Tensor operator/(const Tproxy &rc) const;
503
504 template <class T>
505 T item() const {
506 Tensor out;
507 out._impl = _insimpl->get(_accs);
508 return out.item<T>();
509 }
510
511 Scalar::Sproxy item() const {
512 Tensor out;
513 out._impl = _insimpl->get(_accs);
514 return out.item();
515 }
516
517 // when used to get elems:
518 operator Tensor() const {
519 Tensor out;
520 out._impl = _insimpl->get(_accs);
521 return out;
522 }
523
524 Storage storage() const {
525 Tensor out;
526 out._impl = _insimpl->get(_accs);
527 return out.storage();
528 }
529
530 }; // proxy class of Tensor.
531
533
535 // these two are using the python way!
536 //----------------------------------------
537 template <class... Ts>
538 Tproxy operator()(const std::string &e1, const Ts &...elems) {
539 // std::cout << e1 << std::endl;
540 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
541 return (*this)[tmp];
542 }
543 template <class... Ts>
544 Tproxy operator()(const cytnx_int64 &e1, const Ts &...elems) {
545 // std::cout << e1<< std::endl;
546 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
547 return (*this)[tmp];
548 }
549 template <class... Ts>
550 Tproxy operator()(const cytnx::Accessor &e1, const Ts &...elems) {
551 // std::cout << e1 << std::endl;
552 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
553 return (*this)[tmp];
554 }
555 template <class... Ts>
556 const Tproxy operator()(const std::string &e1, const Ts &...elems) const {
557 // std::cout << e1 << std::endl;
558 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
559 return (*this)[tmp];
560 }
561 template <class... Ts>
562 const Tproxy operator()(const cytnx_int64 &e1, const Ts &...elems) const {
563 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
564 return (*this)[tmp];
565 }
566 template <class... Ts>
567 const Tproxy operator()(const cytnx::Accessor &e1, const Ts &...elems) const {
568 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
569 return (*this)[tmp];
570 }
571
572 //-----------------------------------------
573
574 Tproxy operator[](const std::initializer_list<cytnx::Accessor> &accs) {
575 std::vector<cytnx::Accessor> tmp = accs;
576 return (*this)[tmp];
577 }
578 Tproxy operator[](const std::vector<cytnx::Accessor> &accs) {
579 return Tproxy(this->_impl, accs);
580 }
581
582 const Tproxy operator[](const std::vector<cytnx::Accessor> &accs) const {
583 return Tproxy(this->_impl, accs);
584 }
585 const Tproxy operator[](const std::initializer_list<cytnx::Accessor> &accs) const {
586 std::vector<cytnx::Accessor> tmp = accs;
587 return (*this)[tmp];
588 }
589
590 Tproxy operator[](const std::initializer_list<cytnx_int64> &accs) {
591 std::vector<cytnx_int64> tmp = accs;
592 return (*this)[tmp];
593 }
594 Tproxy operator[](const std::vector<cytnx_int64> &accs) {
595 std::vector<cytnx::Accessor> acc_in;
596 for (int i = 0; i < accs.size(); i++) {
597 acc_in.push_back(cytnx::Accessor(accs[i]));
598 }
599 return Tproxy(this->_impl, acc_in);
600 }
601 const Tproxy operator[](const std::initializer_list<cytnx_int64> &accs) const {
602 std::vector<cytnx_int64> tmp = accs;
603 return (*this)[tmp];
604 }
605 const Tproxy operator[](const std::vector<cytnx_uint64> &accs) const {
606 std::vector<cytnx::Accessor> acc_in;
607 for (int i = 0; i < accs.size(); i++) {
608 acc_in.push_back(cytnx::Accessor(accs[i]));
609 }
610 return Tproxy(this->_impl, acc_in);
611 }
612 const Tproxy operator[](const std::vector<cytnx_int64> &accs) const {
613 std::vector<cytnx::Accessor> acc_in;
614 for (int i = 0; i < accs.size(); i++) {
615 acc_in.push_back(cytnx::Accessor(accs[i]));
616 }
617 return Tproxy(this->_impl, acc_in);
618 }
620 //-------------------------------------------
621
623 void _Save(std::fstream &f) const;
624 void _Load(std::fstream &f);
625
627
636 void Save(const std::string &fname) const;
640 void Save(const char *fname) const;
641
650 void Tofile(const std::string &fname) const;
651
655 void Tofile(const char *fname) const;
656
660 void Tofile(std::fstream &f) const;
661
670 static Tensor Load(const std::string &fname);
674 static Tensor Load(const char *fname);
675
696 static Tensor Fromfile(const std::string &fname, const unsigned int &dtype,
697 const cytnx_int64 &count = -1);
698 static Tensor Fromfile(const char *fname, const unsigned int &dtype,
699 const cytnx_int64 &count = -1);
700
701 // static Tensor Frombinary(const std::string &fname);
702
704 boost::intrusive_ptr<Tensor_impl> _impl;
705 Tensor() : _impl(new Tensor_impl()){};
706 Tensor(const Tensor &rhs) { _impl = rhs._impl; }
707
708 /*
709 template<class Tp>
710 Tensor(const std::initializer_list<Tp> &rhs){
711 Storage stmp = std::vector<Tp>(rhs);
712 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
713 tmp->Init(stmp);
714 this->_impl = tmp;
715 }
716 */
717
718 Tensor &operator=(const Tensor &rhs) {
719 _impl = rhs._impl;
720 return *this;
721 }
722
723 void operator=(const Tproxy &rhsp) { // this is used to handle proxy assignment
724 this->_impl = rhsp._insimpl->get(rhsp._accs);
725 }
727
729 // default device==Device.cpu (-1)
754 void Init(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype = Type.Double,
755 const int &device = -1, const bool &init_zero = true) {
756 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
757 this->_impl = tmp;
758 this->_impl->Init(shape, dtype, device, init_zero);
759 }
760 // void Init(const Storage& storage) {
761 // boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
762 // this->_impl = tmp;
763 // this->_impl->Init(storage);
764 // }
765 // void Init(const Storage& storage, const std::vector<cytnx_uint64> &shape,
766 // const unsigned int &dtype = Type.Double, const int &device = -1) {
767 // boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
768 // this->_impl = tmp;
769 // this->_impl->Init(storage, shape, dtype, device);
770 // }
771
784 Tensor(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype = Type.Double,
785 const int &device = -1, const bool &init_zero = 1)
786 : _impl(new Tensor_impl()) {
787 this->Init(shape, dtype, device, init_zero);
788 }
789 // Tensor(const Storage& storage)
790 // : _impl(new Tensor_impl()) {
791 // this->Init(storage);
792 // }
793 // Tensor(const Storage& storage, const std::vector<cytnx_uint64> &shape,
794 // const unsigned int &dtype = Type.Double, const int &device = -1)
795 // : _impl(new Tensor_impl()) {
796 // this->Init(storage, shape, dtype, device);
797 // }
799
805 static Tensor from_storage(const Storage &in) {
806 Tensor out;
807 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
808 out._impl = tmp;
809 out._impl->Init(in);
810 return out;
811 }
812
818 unsigned int dtype() const { return this->_impl->dtype(); }
819
825 int device() const { return this->_impl->device(); }
826
832 std::string dtype_str() const { return this->_impl->dtype_str(); }
833
839 std::string device_str() const { return this->_impl->device_str(); }
840
845 const std::vector<cytnx_uint64> &shape() const { return this->_impl->shape(); }
846
851 cytnx_uint64 rank() const { return this->_impl->shape().size(); }
852
870 Tensor clone() const {
871 Tensor out;
872 out._impl = this->_impl->clone();
873 return out;
874 }
875
896 Tensor to(const int &device) const {
897 Tensor out;
898 out._impl = this->_impl->to(device);
899 return out;
900 }
901
919 void to_(const int &device) { this->_impl->to_(device); }
920
925 const bool &is_contiguous() const { return this->_impl->is_contiguous(); }
926
927 Tensor permute_(const std::vector<cytnx_uint64> &rnks) {
928 this->_impl->permute_(rnks);
929 return *this;
930 }
932 template <class... Ts>
933 Tensor permute_(const cytnx_uint64 &e1, const Ts &...elems) {
934 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1, elems...);
935 this->_impl->permute_(argv);
936 return *this;
937 }
939
958 Tensor permute(const std::vector<cytnx_uint64> &rnks) const {
959 Tensor out;
960 out._impl = this->_impl->permute(rnks);
961 return out;
962 }
964 template <class... Ts>
965 Tensor permute(const cytnx_uint64 &e1, const Ts &...elems) const {
966 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1, elems...);
967 return this->permute(argv);
968 }
970
987 Tensor out;
988 out._impl = this->_impl->contiguous();
989 return out;
990 }
991
1007 this->_impl->contiguous_();
1008 return *this;
1009 }
1010
1032 void reshape_(const std::vector<cytnx_int64> &new_shape) { this->_impl->reshape_(new_shape); }
1034 void reshape_(const std::vector<cytnx_uint64> &new_shape) {
1035 std::vector<cytnx_int64> shape(new_shape.begin(), new_shape.end());
1036 this->_impl->reshape_(shape);
1037 }
1038 void reshape_(const std::initializer_list<cytnx_int64> &new_shape) {
1039 std::vector<cytnx_int64> shape = new_shape;
1040 this->_impl->reshape_(shape);
1041 }
1042 template <class... Ts>
1043 void reshape_(const cytnx_int64 &e1, const Ts... elems) {
1044 std::vector<cytnx_int64> shape = dynamic_arg_int64_resolver(e1, elems...);
1045 // std::cout << shape << std::endl;
1046 this->_impl->reshape_(shape);
1047 }
1049
1074 Tensor reshape(const std::vector<cytnx_int64> &new_shape) const {
1075 Tensor out;
1076 out._impl = this->_impl->reshape(new_shape);
1077 return out;
1078 }
1079
1083 Tensor reshape(const std::vector<cytnx_uint64> &new_shape) const {
1084 std::vector<cytnx_int64> tmp(new_shape.size());
1085 memcpy(&tmp[0], &new_shape[0], sizeof(cytnx_uint64) * new_shape.size());
1086 Tensor out;
1087 out._impl = this->_impl->reshape(tmp);
1088 return out;
1089 }
1090
1094 Tensor reshape(const std::initializer_list<cytnx_int64> &new_shape) const {
1095 return this->reshape(std::vector<cytnx_int64>(new_shape));
1096 }
1097
1099 template <class... Ts>
1100 Tensor reshape(const cytnx_int64 &e1, const Ts &...elems) const {
1101 std::vector<cytnx_int64> argv = dynamic_arg_int64_resolver(e1, elems...);
1102 return this->reshape(argv);
1103 }
1105
1127 Tensor astype(const int &new_type) const {
1128 Tensor out;
1129 out._impl = this->_impl->astype(new_type);
1130 return out;
1131 }
1132
1133 // Tensor diagonal(){
1134 // for(unsigned int i=0;i<this->shape().size();i++){
1135 // if(this->shape()[i] != this->shape()[0],"[ERROR] Tensor.diagonal() can only be called
1136 // when the subject has equal dimension in each rank.%s","\n");
1137 // }
1138 //
1139 // }
1140
1161 template <class T>
1162 T &at(const std::vector<cytnx_uint64> &locator) {
1163 return this->_impl->at<T>(locator);
1164 }
1165
1169 template <class T>
1170 const T &at(const std::vector<cytnx_uint64> &locator) const {
1171 return this->_impl->at<T>(locator);
1172 }
1174 template <class T, class... Ts>
1175 const T &at(const cytnx_uint64 &e1, const Ts &...elems) const {
1176 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1, elems...);
1177 return this->at<T>(argv);
1178 }
1179 template <class T, class... Ts>
1180 T &at(const cytnx_uint64 &e1, const Ts &...elems) {
1181 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1, elems...);
1182 return this->at<T>(argv);
1183 }
1184
1185 const Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) const {
1186 return this->_impl->at(locator);
1187 }
1188
1189 Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) { return this->_impl->at(locator); }
1191
1217 template <class T>
1218 T &item() {
1219 cytnx_error_msg(this->_impl->storage().size() != 1, "[ERROR][Tensor.item<T>]%s",
1220 "item can only be called from a Tensor with only one element\n");
1221 return this->_impl->storage().at<T>(0);
1222 }
1223
1225 template <class T>
1226 const T &item() const {
1227 cytnx_error_msg(this->_impl->storage().size() != 1, "[ERROR][Tensor.item<T>]%s",
1228 "item can only be called from a Tensor with only one element\n");
1229 return this->_impl->storage().at<T>(0);
1230 }
1231
1232 const Scalar::Sproxy item() const {
1233 Scalar::Sproxy out(this->storage()._impl, 0);
1234 return out;
1235 }
1236
1237 Scalar::Sproxy item() {
1238 Scalar::Sproxy out(this->storage()._impl, 0);
1239 return out;
1240 }
1241
1243
1265 Tensor get(const std::vector<cytnx::Accessor> &accessors) const {
1266 Tensor out;
1267 out._impl = this->_impl->get(accessors);
1268 return out;
1269 }
1270
1271 /*
1272 Tensor get_v2(const std::vector<cytnx::Accessor> &accessors) const{
1273 Tensor out;
1274 out._impl = this->_impl->get_v2(accessors);
1275 return out;
1276 }
1277 */
1278
1297 void set(const std::vector<cytnx::Accessor> &accessors, const Tensor &rhs) {
1298 this->_impl->set(accessors, rhs._impl);
1299 }
1300
1319 template <class T>
1320 void set(const std::vector<cytnx::Accessor> &accessors, const T &rc) {
1321 this->_impl->set(accessors, rc);
1322 }
1324 template <class T>
1325 void set(const std::initializer_list<cytnx::Accessor> &accessors, const T &rc) {
1326 std::vector<cytnx::Accessor> args = accessors;
1327 this->set(args, rc);
1328 }
1330
1340 Storage &storage() const { return this->_impl->storage(); }
1341
1356 template <class T>
1357 void fill(const T &val) {
1358 this->_impl->fill(val);
1359 }
1360
1365 bool equivshape(const Tensor &rhs) {
1366 if (this->shape() != rhs.shape()) return false;
1367 return true;
1368 }
1369
1378
1387
1388 // Arithmic:
1405 template <class T>
1407
1424 template <class T>
1426
1443 template <class T>
1445
1463 template <class T>
1465
1466 // Tensor &operator+=(const Tproxy &rc);
1467 // Tensor &operator-=(const Tproxy &rc);
1468 // Tensor &operator*=(const Tproxy &rc);
1469 // Tensor &operator/=(const Tproxy &rc);
1470 /*
1471 Tensor operator+(const Tproxy &rc){
1472 return *this + Tensor(rc);
1473 }
1474 Tensor operator-(const Tproxy &rc){
1475 return *this - Tensor(rc);
1476 }
1477 Tensor operator*(const Tproxy &rc){
1478 return *this * Tensor(rc);
1479 }
1480 Tensor operator/(const Tproxy &rc){
1481 return *this / Tensor(rc);
1482 }
1483 */
1489 template <class T>
1490 Tensor Add(const T &rhs) {
1491 return *this + rhs;
1492 }
1493
1499 template <class T>
1500 Tensor &Add_(const T &rhs) {
1501 return *this += rhs;
1502 }
1503
1509 template <class T>
1510 Tensor Sub(const T &rhs) {
1511 return *this - rhs;
1512 }
1513
1519 template <class T>
1520 Tensor &Sub_(const T &rhs) {
1521 return *this -= rhs;
1522 }
1523
1529 template <class T>
1530 Tensor Mul(const T &rhs) {
1531 return *this * rhs;
1532 }
1533
1539 template <class T>
1540 Tensor &Mul_(const T &rhs) {
1541 return *this *= rhs;
1542 }
1543
1550 template <class T>
1551 Tensor Div(const T &rhs) {
1552 return *this / rhs;
1553 }
1554
1561 template <class T>
1562 Tensor &Div_(const T &rhs) {
1563 return *this /= rhs;
1564 }
1565
1572 template <class T>
1573 Tensor Cpr(const T &rhs) {
1574 return *this == rhs;
1575 }
1576
1577 // /**
1578 // * @brief Compare each element of the current tensor with the input tensor.
1579 // * @details This function Compare each element of the current tensor with the input tensor.
1580 // * @param[in] rhs the compared tensor.
1581 // */
1582 // bool approx_eq(const Tensor &rhs, const cytnx_double tol = 0) {
1583 // if (this->device() != rhs.device()) {
1584 // if (User_debug)
1585 // std::cout << "[approx_eq] Tensor device " << this->device()
1586 // << "not equal to rhs tensor device " << rhs.device() << std::endl;
1587 // return false;
1588 // }
1589 // // if (this->dtype() != rhs.dtype()) {
1590 // // std::cout << "[approx_eq] Tensor dtype " << this->dtype()
1591 // // << "not equal to rhs tensor dtype " << rhs.dtype() << std::endl;
1592 // // return false;
1593 // // }
1594 // if (this->shape() != rhs.shape()) {
1595 // if (User_debug)
1596 // std::cout << "[approx_eq] Tensor shape " << this->shape()
1597 // << "not equal to rhs tensor shape " << rhs.shape() << std::endl;
1598 // return false;
1599 // }
1600 // if (this->is_contiguous() != rhs.is_contiguous()) {
1601 // if (User_debug)
1602 // std::cout << "[AreNearlyEqTensor] Tensor contiguous flag " << this->is_contiguous()
1603 // << "not equal to rhs tensor flag " << rhs.is_contiguous() << std::endl;
1604 // return false;
1605 // }
1606 // return this->_impl->_storage.approx_eq(rhs._impl->_storage._impl, tol);
1607 // }
1608
1609 // template<class T>
1610 // Tensor& Cpr_(const T &rhs){
1611 //
1612 // return *this == rhs;
1613 // }
1614
1615 template <class T>
1616 Tensor Mod(const T &rhs) {
1617 return *this % rhs;
1618 }
1619
1626 Tensor operator-() { return this->Mul(-1.); }
1627
1635 Tensor flatten() const {
1636 Tensor out = this->clone();
1637 out.contiguous_();
1638 out.reshape_({-1});
1639 return out;
1640 }
1641
1649 void flatten_() {
1650 this->contiguous_();
1651 this->reshape_({-1});
1652 }
1653
1680 void append(const Tensor &rhs) {
1681 // Tensor in;
1682 if (!this->is_contiguous()) this->contiguous_();
1683
1684 // check Tensor in shape:
1685 cytnx_error_msg(rhs.shape().size() == 0 || this->shape().size() == 0,
1686 "[ERROR] try to append a null Tensor.%s", "\n");
1687 cytnx_error_msg(rhs.shape().size() != (this->shape().size() - 1),
1688 "[ERROR] try to append a Tensor with rank not match.%s", "\n");
1689 cytnx_uint64 Nelem = 1;
1690 for (unsigned int i = 0; i < rhs.shape().size(); i++) {
1691 cytnx_error_msg(rhs.shape()[i] != this->shape()[i + 1],
1692 "[ERROR] dimension mismatch @ rhs.rank: [%d] this: [%d] rhs: [%d]\n", i,
1693 this->shape()[i + 1], rhs.shape()[i]);
1694 Nelem *= rhs.shape()[i];
1695 }
1696
1697 // check type:
1698 Tensor in;
1699 if (rhs.dtype() != this->dtype()) {
1700 in = rhs.astype(this->dtype());
1701 if (!in.is_contiguous()) in.contiguous_();
1702 } else {
1703 if (!in.is_contiguous())
1704 in = rhs.contiguous();
1705 else
1706 in = rhs;
1707 }
1708 this->_impl->_shape[0] += 1;
1709 cytnx_uint64 oldsize = this->_impl->_storage.size();
1710 this->_impl->_storage.resize(oldsize + Nelem);
1711 memcpy(((char *)this->_impl->_storage.data()) +
1712 oldsize * Type.typeSize(this->dtype()) / sizeof(char),
1713 in._impl->_storage.data(), Type.typeSize(in.dtype()) * Nelem);
1714 }
1742 void append(const Storage &srhs) {
1743 if (!this->is_contiguous()) this->contiguous_();
1744
1745 // check Tensor in shape:
1746 cytnx_error_msg(srhs.size() == 0 || this->shape().size() == 0,
1747 "[ERROR] try to append a null Tensor.%s", "\n");
1748 cytnx_error_msg((this->shape().size() - 1) != 1,
1749 "[ERROR] append a storage to Tensor can only accept rank-2 Tensor.%s", "\n");
1750 cytnx_error_msg(this->shape().back() != srhs.size(), "[ERROR] Tensor dmension mismatch!%s",
1751 "\n");
1752
1753 // check type:
1754 Storage in;
1755 if (srhs.dtype() != this->dtype()) {
1756 in = srhs.astype(this->dtype());
1757 } else {
1758 in = srhs;
1759 }
1760 this->_impl->_shape[0] += 1;
1761 cytnx_uint64 oldsize = this->_impl->_storage.size();
1762 this->_impl->_storage.resize(oldsize + in.size());
1763 memcpy(((char *)this->_impl->_storage.data()) +
1764 oldsize * Type.typeSize(this->dtype()) / sizeof(char),
1765 in._impl->Mem, Type.typeSize(in.dtype()) * in.size());
1766 }
1767 /*
1768 void append(const Tensor &rhs){
1769 // convert to the same type.
1770 Tensor in;
1771 if(rhs.dtype() != this->dtype()){
1772 in = rhs.astype(this->dtype());
1773 }else{
1774 in = rhs;
1775 }
1776
1777 // 1) check rank
1778 if(this->shape().size()==1){
1779 // check if rhs is a scalar tensor (only one element)
1780 cytnx_error_msg(!(rhs.shape().size()==1 && rhs.shape()[0]==1),"[ERROR] trying to append
1781 a scalar into multidimentional Tensor is not allow.\n Only rank-1 Tensor can accept scalar
1782 append.%s","\n"); this->_impl->_shape[0]+=1; this->_impl->_storage.append(0);
1783
1784 }else{
1785 cytnx_error_msg(rhs.shape().size() != this->shape().size()-1,"[ERROR] try to append a
1786 Tensor with rank not match.%s","\n");
1787
1788 }
1789 cytnx_error_msg(!this->is_contiguous(),"[ERROR] append require the Tensor to be contiguous.
1790 suggestion: call contiguous() or contiguous_() first.","\n");
1791 }
1792 */
1804 template <class T>
1805 void append(const T &rhs) {
1806 cytnx_error_msg(this->shape().size() != 1,
1807 "[ERROR] trying to append a scalar into multidimentional Tensor is not "
1808 "allow.\n Only rank-1 Tensor can accept scalar append.%s",
1809 "\n");
1811 "[ERROR] append require the Tensor to be contiguous. suggestion: call "
1812 "contiguous() or contiguous_() first.",
1813 "\n");
1814 this->_impl->_shape[0] += 1;
1815 this->_impl->_storage.append(rhs);
1816 }
1817
1826 bool same_data(const Tensor &rhs) const;
1827
1828 // linalg:
1834 std::vector<Tensor> Svd(const bool &is_UvT = true) const;
1835
1841 std::vector<Tensor> Eigh(const bool &is_V = true, const bool &row_v = false) const;
1842
1848
1853 Tensor InvM() const;
1854
1859 Tensor &Inv_(const double &clip);
1860
1865 Tensor Inv(const double &clip) const;
1866
1872
1877 Tensor Conj() const;
1878
1884
1889 Tensor Exp() const;
1890
1895 Tensor Norm() const;
1896
1901 Tensor Pow(const cytnx_double &p) const;
1902
1908
1913 Tensor Trace(const cytnx_uint64 &a = 0, const cytnx_uint64 &b = 1) const;
1914
1919 Tensor Abs() const;
1920
1926
1931 Tensor Max() const;
1932
1937 Tensor Min() const;
1938
1939 }; // class Tensor
1940
1941 Tensor operator+(const Tensor &lhs, const Tensor::Tproxy &rhs);
1942 Tensor operator-(const Tensor &lhs, const Tensor::Tproxy &rhs);
1943 Tensor operator*(const Tensor &lhs, const Tensor::Tproxy &rhs);
1944 Tensor operator/(const Tensor &lhs, const Tensor::Tproxy &rhs);
1945
1946 Tensor operator+(const Tensor &lhs, const Scalar::Sproxy &rhs);
1947 Tensor operator-(const Tensor &lhs, const Scalar::Sproxy &rhs);
1948 Tensor operator*(const Tensor &lhs, const Scalar::Sproxy &rhs);
1949 Tensor operator/(const Tensor &lhs, const Scalar::Sproxy &rhs);
1950
1952 std::ostream &operator<<(std::ostream &os, const Tensor &in);
1953 std::ostream &operator<<(std::ostream &os, const Tensor::Tproxy &in);
1955 //{ os << Tensor(in);};
1956} // namespace cytnx
1957
1958#endif
object that mimic the python slice to access elements in C++ [this is for c++ API only].
Definition Accessor.hpp:17
an tensor (multi-dimensional array)
Definition Tensor.hpp:41
void append(const Storage &srhs)
the append function of the Storage.
Definition Tensor.old.hpp:1742
Tensor & operator*=(const T &rc)
multiplication assignment operator with a Tensor or a scalar.
Tensor & Inv_(const double &clip)
the Inv_ member function. Same as cytnx::linalg::Inv_(Tensor &Tin, const double &clip)
Tensor & operator/=(const T &rc)
division assignment operator with a Tensor or a scalar.
Tensor operator-()
The negation function.
Definition Tensor.hpp:1322
void fill(const T &val)
fill all the element of current Tensor with the value.
Definition Tensor.old.hpp:1357
Tensor InvM() const
the InvM member function. Same as cytnx::linalg::InvM(const Tensor &Tin), where Tin is the current Te...
bool same_data(const Tensor &rhs) const
Check whether two tensors share the same internal memory.
void to_(const int &device)
move the current Tensor to the device.
Definition Tensor.old.hpp:919
Tensor reshape(const std::vector< cytnx_uint64 > &new_shape) const
Definition Tensor.old.hpp:1083
Tensor(const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1, const bool &init_zero=1)
Construct a new Tensor object.
Definition Tensor.old.hpp:784
void append(const T &rhs)
the append function of the scalar.
Definition Tensor.old.hpp:1805
Tensor & operator-=(const T &rc)
subtraction assignment operator with a Tensor or a scalar.
Tensor & Add_(const T &rhs)
Addition function with a Tensor or a scalar, inplacely. Same as operator+=(const T &rhs).
Definition Tensor.old.hpp:1500
Tensor Abs() const
the Abs member function. Same as linalg::Abs(const Tensor &Tin), where Tin is the current Tensor.
Tensor reshape(const std::initializer_list< cytnx_int64 > &new_shape) const
Definition Tensor.old.hpp:1094
std::string device_str() const
the device (in string) of the Tensor
Definition Tensor.old.hpp:839
void reshape_(const std::vector< cytnx_int64 > &new_shape)
reshape the Tensor, inplacely
Definition Tensor.old.hpp:1032
Tensor contiguous_()
Make the Tensor contiguous by coalescing the memory (storage), inplacely.
Definition Tensor.old.hpp:1006
Tensor permute_(const std::vector< cytnx_uint64 > &rnks)
Definition Tensor.old.hpp:927
Tensor Mul(const T &rhs)
Multiplication function with a Tensor or a scalar. Same as cytnx::operator*(const Tensor &self,...
Definition Tensor.old.hpp:1530
unsigned int dtype() const
the dtype-id of the Tensor
Definition Tensor.hpp:514
Tensor Sub(const T &rhs)
Subtraction function with a Tensor or a scalar. Same as cytnx::operator-(const Tensor &self,...
Definition Tensor.old.hpp:1510
Tensor Inv(const double &clip) const
the Inv member function. Same as cytnx::linalg::Inv(const Tensor &Tin, const double &clip)
Tensor contiguous() const
Make the Tensor contiguous by coalescing the memory (storage).
Definition Tensor.old.hpp:986
void Tofile(const std::string &fname) const
Save current Tensor to the binary file.
T & at(const std::vector< cytnx_uint64 > &locator)
Get an element at specific location.
Definition Tensor.old.hpp:1162
Tensor reshape(const std::vector< cytnx_int64 > &new_shape) const
return a new Tensor that is reshaped.
Definition Tensor.old.hpp:1074
static Tensor Fromfile(const std::string &fname, const unsigned int &dtype, const cytnx_int64 &count=-1)
Load current Tensor from the binary file.
T & item()
get the element from a rank-0 Tensor.
Definition Tensor.hpp:914
Tensor clone() const
return a clone of the current Tensor.
Definition Tensor.old.hpp:870
std::vector< Tensor > Eigh(const bool &is_V=true, const bool &row_v=false) const
the Eigh member function. Same as cytnx::linalg::Eigh(const Tensor &Tin, const bool &is_V,...
void Tofile(const char *fname) const
void append(const Tensor &rhs)
the append function.
Definition Tensor.old.hpp:1680
static Tensor Load(const char *fname)
void Save(const char *fname) const
void set(const std::vector< cytnx::Accessor > &accessors, const Tensor &rhs)
set elements with the input Tensor using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:993
static Tensor Fromfile(const char *fname, const unsigned int &dtype, const cytnx_int64 &count=-1)
Tensor Norm() const
the Norm member function. Same as linalg::Norm(const Tensor &Tin), where Tin is the current Tensor.
Tensor astype(const int &new_type) const
return a new Tensor that cast to different dtype.
Definition Tensor.old.hpp:1127
Tensor & Div_(const T &rhs)
Division function with a Tensor or a scalar, inplacely. Same as operator/=(const T &rhs).
Definition Tensor.old.hpp:1562
static Tensor Load(const std::string &fname)
Load current Tensor from file.
Tensor & operator+=(const T &rc)
addition assignment operator with a Tensor or a scalar.
Tensor Conj() const
the Conj member function. Same as cytnx::linalg::Conj(const Tensor &Tin), where Tin is the current Te...
Tensor Trace(const cytnx_uint64 &a=0, const cytnx_uint64 &b=1) const
the Trace member function. Same as linalg::Trace(const Tensor &Tin, const cytnx_uint64 &a,...
bool equivshape(const Tensor &rhs)
compare the shape of two tensors.
Definition Tensor.old.hpp:1365
Tensor & Pow_(const cytnx_double &p)
the Pow_ member function. Same as linalg::Pow_(Tensor &Tin, const cytnx_double &p),...
std::vector< Tensor > Svd(const bool &is_UvT=true) const
the SVD member function. Same as cytnx::linalg::Svd(const Tensor &Tin, const bool &is_UvT) ,...
std::string dtype_str() const
the dtype (in string) of the Tensor
Definition Tensor.old.hpp:832
Tensor & Mul_(const T &rhs)
Multiplication function with a Tensor or a scalar, inplacely. Same as operator*=(const T &rhs).
Definition Tensor.old.hpp:1540
cytnx_uint64 rank() const
the rank of the Tensor
Definition Tensor.old.hpp:851
const bool & is_contiguous() const
return whether the Tensor is contiguous or not.
Definition Tensor.old.hpp:925
Tensor Exp() const
the Exp member function. Same as linalg::Exp(const Tensor &Tin), where Tin is the current Tensor.
Tensor & Abs_()
the Abs_ member function. Same as linalg::Abs_(Tensor &Tin), where Tin is the current Tensor.
Tensor Add(const T &rhs)
Addition function with a Tensor or a scalar. Same as cytnx::operator+(const Tensor &self,...
Definition Tensor.old.hpp:1490
void flatten_()
The flatten function, inplacely.
Definition Tensor.old.hpp:1649
void Save(const std::string &fname) const
Save current Tensor to file.
Tensor flatten() const
The flatten function.
Definition Tensor.old.hpp:1635
Tensor & Conj_()
the Conj_ member function. Same as cytnx::linalg::Conj_(Tensor &Tin), where Tin is the current Tensor...
Tensor Pow(const cytnx_double &p) const
the Pow member function. Same as linalg::Pow(const Tensor &Tin, const cytnx_double &p),...
int device() const
the device-id of the Tensor
Definition Tensor.hpp:521
Tensor real()
return the real part of the tensor.
Tensor imag()
return the imaginary part of the tensor.
Tensor to(const int &device) const
copy a tensor to new device
Definition Tensor.old.hpp:896
void Tofile(std::fstream &f) const
Tensor get(const std::vector< cytnx::Accessor > &accessors) const
get elements using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:961
void set(const std::vector< cytnx::Accessor > &accessors, const T &rc)
set elements with the input constant using Accessor (C++ API) / slices (python API)
Definition Tensor.old.hpp:1320
Tensor Max() const
the Max member function. Same as linalg::Max(const Tensor &Tin), where Tin is the current Tensor.
Tensor permute(const std::vector< cytnx_uint64 > &rnks) const
perform tensor permute on the cytnx::Tensor and return a new instance.
Definition Tensor.old.hpp:958
Tensor Div(const T &rhs)
Division function with a Tensor or a scalar. Same as cytnx::operator/(const Tensor &self,...
Definition Tensor.old.hpp:1551
Tensor Mod(const T &rhs)
Definition Tensor.old.hpp:1616
void Init(const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1, const bool &init_zero=true)
initialize a Tensor
Definition Tensor.old.hpp:754
Tensor Cpr(const T &rhs)
The comparison function.
Definition Tensor.old.hpp:1573
Tensor & Exp_()
the Exp_ member function. Same as linalg::Exp_(Tensor &Tin), where Tin is the current Tensor.
Tensor & InvM_()
the InvM_ member function. Same as cytnx::linalg::InvM_(Tensor &Tin), where Tin is the current Tensor...
const std::vector< cytnx_uint64 > & shape() const
the shape of the Tensor
Definition Tensor.hpp:541
Tensor Min() const
the Min member function. Same as linalg::Min(const Tensor &Tin), where Tin is the current Tensor.
const T & at(const std::vector< cytnx_uint64 > &locator) const
Definition Tensor.old.hpp:1170
Storage & storage() const
return the storage of current Tensor.
Definition Tensor.hpp:1036
static Tensor from_storage(const Storage &in)
Convert a Storage to Tensor.
Definition Tensor.old.hpp:805
Tensor & Sub_(const T &rhs)
Subtraction function with a Tensor or a scalar, inplacely. Same as operator-=(const T &rhs).
Definition Tensor.old.hpp:1520
#define cytnx_error_msg(is_true, format,...)
Definition cytnx_error.hpp:16
Helper function to print vector with ODT:
Definition Accessor.hpp:12
Device_class Device
data on which devices.
cytnx::UniTensor operator*(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
The multiplication operator between two UniTensor.
double cytnx_double
Definition Type.hpp:53
uint32_t cytnx_uint32
Definition Type.hpp:56
bool cytnx_bool
Definition Type.hpp:64
std::complex< double > cytnx_complex128
Definition Type.hpp:63
float cytnx_float
Definition Type.hpp:54
int16_t cytnx_int16
Definition Type.hpp:60
std::complex< float > cytnx_complex64
Definition Type.hpp:62
cytnx::UniTensor operator-(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
The subtraction operator between two UniTensor.
int32_t cytnx_int32
Definition Type.hpp:59
uint16_t cytnx_uint16
Definition Type.hpp:57
uint64_t cytnx_uint64
Definition Type.hpp:55
int64_t cytnx_int64
Definition Type.hpp:58
Type_class Type
data type
cytnx::UniTensor operator+(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
The addtion operator between two UniTensor.
cytnx::UniTensor operator/(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
The division operator between two UniTensor.
tmp
Definition sp.py:8