Cytnx v0.9.1
Loading...
Searching...
No Matches
Tensor.hpp
Go to the documentation of this file.
1#ifndef _H_Tensor_
2#define _H_Tensor_
3
4#include "Type.hpp"
5#include "cytnx_error.hpp"
6#include "Storage.hpp"
7#include "Device.hpp"
9#include <iostream>
10#include <fstream>
11#include "utils/vec_range.hpp"
12#include "utils/vec_cast.hpp"
13#include "utils/dynamic_arg_resolver.hpp"
14//#include "linalg.hpp"
15#include "Accessor.hpp"
16#include <utility>
17#include <vector>
18#include <initializer_list>
19#include <string>
20#include "Scalar.hpp"
21
22namespace cytnx {
23
25 // real implementation
26 class Tensor_impl : public intrusive_ptr_base<Tensor_impl> {
27 private:
28 // Interface:
29 Storage_init_interface __SII;
30
31 // Memory:
32 Storage _storage;
33
34 // tensor shape
35 std::vector<cytnx_uint64> _shape;
36
37 // pseudo-perm info
38 std::vector<cytnx_uint64> _mapper;
39 std::vector<cytnx_uint64> _invmapper;
40 bool _contiguous;
41
42 public:
43 friend class Tensor;
44 boost::intrusive_ptr<Tensor_impl> _clone_meta_only() const {
45 boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
46 out->_mapper = this->_mapper;
47 out->_invmapper = this->_invmapper;
48 out->_shape = this->_shape;
49 out->_contiguous = this->_contiguous;
50 return out;
51 }
52 Tensor_impl() : _contiguous(true){};
53
54 void Init(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype = Type.Double,
55 int device = -1, const bool &init_zero = true);
56 void Init(const Storage &in);
57 // void Init(const Storage &in, const std::vector<cytnx_uint64> &shape,
58 // const unsigned int &dtype, int device);
59 /*
60 template<class T>
61 void From_vec(const T &ndvec){
62 cytnx_error_msg(std::string(typeid(T).name()).find("vector") ==
63 std::string::npos,"[ERROR][Tensor][From_vec] the input argument should be a nd vector.%s","\n");
64 //dispatch the rank!:
65
66
67
68 }
69 */
70 // clone&assignment constr., use intrusive_ptr's
71 Tensor_impl(const Tensor_impl &rhs);
72 Tensor_impl &operator=(const Tensor_impl &rhs); // add const
73
74 unsigned int dtype() const { return this->_storage.dtype(); }
75 int device() const { return this->_storage.device(); }
76
77 std::string dtype_str() const { return Type.getname(this->_storage.dtype()); }
78 std::string device_str() const { return Device.getname(this->_storage.device()); }
79
80 const std::vector<cytnx_uint64> &shape() const { return _shape; }
81
82 const bool &is_contiguous() const { return this->_contiguous; }
83
84 const std::vector<cytnx_uint64> &mapper() const { return this->_mapper; }
85 const std::vector<cytnx_uint64> &invmapper() const { return this->_invmapper; }
86 Storage &storage() { return _storage; }
87
88 const Storage &storage() const { return _storage; }
89
90 boost::intrusive_ptr<Tensor_impl> clone() const {
91 boost::intrusive_ptr<Tensor_impl> out = this->_clone_meta_only();
92 out->_storage = this->_storage.clone();
93 return out;
94 }
95
96 void to_(const int &device) { this->_storage.to_(device); }
97 boost::intrusive_ptr<Tensor_impl> to(const int &device) {
98 if (this->device() == device) {
99 // boost::intrusive_ptr<Tensor_impl> out(this);
100 return this;
101 } else {
102 boost::intrusive_ptr<Tensor_impl> out = this->_clone_meta_only();
103 out->_storage = this->_storage.to(device);
104 return out;
105 }
106 }
107
108 void permute_(const std::vector<cytnx_uint64> &rnks);
109
110 boost::intrusive_ptr<Tensor_impl> permute(const std::vector<cytnx_uint64> &rnks);
111
112 template <class T>
113 T &at(const std::vector<cytnx_uint64> &locator) const {
114 cytnx_error_msg(locator.size() != this->_shape.size(), "%s",
115 "The input index does not match Tensor's rank.");
116
117 cytnx_uint64 RealRank, mtplyr;
118 // std::vector<cytnx_uint64> c_shape(this->_shape.size());
119 // std::vector<cytnx_uint64> c_loc(this->_shape.size());
120 cytnx_uint64 c_shape, c_loc;
121
122 RealRank = 0;
123 mtplyr = 1;
124
125 for (cytnx_int64 i = this->_shape.size() - 1; i >= 0; i--) {
126 if (locator[i] >= this->_shape[i]) {
127 cytnx_error_msg(true, "%s", "Attempting to access out-of-bound index in Tensor.");
128 }
129 // c_shape[i] = this->_shape[this->_invmapper[i]];
130 // c_loc[i] = locator[this->_invmapper[i]];
131 c_shape = this->_shape[this->_invmapper[i]];
132 c_loc = locator[this->_invmapper[i]];
133 RealRank += mtplyr * c_loc;
134 mtplyr *= c_shape;
135 }
136 return this->_storage.at<T>(RealRank);
137 }
138
139 const Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) const {
140 cytnx_error_msg(locator.size() != this->_shape.size(), "%s",
141 "The input index does not match Tensor's rank.");
142
143 cytnx_uint64 RealRank, mtplyr;
144 // std::vector<cytnx_uint64> c_shape(this->_shape.size());
145 // std::vector<cytnx_uint64> c_loc(this->_shape.size());
146
147 cytnx_uint64 c_shape, c_loc;
148 RealRank = 0;
149 mtplyr = 1;
150
151 for (cytnx_int64 i = this->_shape.size() - 1; i >= 0; i--) {
152 if (locator[i] >= this->_shape[i]) {
153 cytnx_error_msg(true, "%s", "Attempting to access out-of-bound index in Tensor.");
154 }
155 // c_shape[i] = this->_shape[this->_invmapper[i]];
156 // c_loc[i] = locator[this->_invmapper[i]];
157 c_shape = this->_shape[this->_invmapper[i]];
158 c_loc = locator[this->_invmapper[i]];
159 RealRank += mtplyr * c_loc;
160 mtplyr *= c_shape;
161 }
162 return this->_storage.at(RealRank);
163 }
164
165 Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) {
166 cytnx_error_msg(locator.size() != this->_shape.size(), "%s",
167 "The input index does not match Tensor's rank.");
168
169 cytnx_uint64 RealRank, mtplyr;
170 // std::vector<cytnx_uint64> c_shape(this->_shape.size());
171 // std::vector<cytnx_uint64> c_loc(this->_shape.size());
172 cytnx_uint64 c_shape, c_loc;
173
174 RealRank = 0;
175 mtplyr = 1;
176
177 for (cytnx_int64 i = this->_shape.size() - 1; i >= 0; i--) {
178 if (locator[i] >= this->_shape[i]) {
179 cytnx_error_msg(true, "%s", "Attempting to access out-of-bound index in Tensor.");
180 }
181 // c_shape[i] = this->_shape[this->_invmapper[i]];
182 // c_loc[i] = locator[this->_invmapper[i]];
183 c_shape = this->_shape[this->_invmapper[i]];
184 c_loc = locator[this->_invmapper[i]];
185 RealRank += mtplyr * c_loc;
186 mtplyr *= c_shape;
187 }
188 return this->_storage.at(RealRank);
189 }
190
191 boost::intrusive_ptr<Tensor_impl> get(const std::vector<cytnx::Accessor> &accessors);
192 boost::intrusive_ptr<Tensor_impl> get_deprecated(const std::vector<cytnx::Accessor> &accessors);
193 void set(const std::vector<cytnx::Accessor> &accessors,
194 const boost::intrusive_ptr<Tensor_impl> &rhs);
195
196 template <class T>
197 void set(const std::vector<cytnx::Accessor> &accessors, const T &rc);
198
199 void set(const std::vector<cytnx::Accessor> &accessors, const Scalar::Sproxy &rc);
200
201 template <class Tx>
202 void fill(const Tx &val) {
203 this->storage().fill(val);
204 }
205
206 boost::intrusive_ptr<Tensor_impl> contiguous() {
207 // return new instance if act on non-contiguous tensor
208 // return self if act on contiguous tensor
209 if (this->_contiguous) {
210 boost::intrusive_ptr<Tensor_impl> out(this);
211 // out->_storage = this->_storage;
212 return out;
213 } else {
214 boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
215 std::vector<cytnx_uint64> oldshape(this->_shape.size());
216 for (cytnx_uint64 i = 0; i < this->_shape.size(); i++) {
217 oldshape[i] = this->_shape[this->_invmapper[i]];
218 }
219
220 out->_storage._impl =
221 this->_storage._impl->Move_memory(oldshape, this->_mapper, this->_invmapper);
222 // this->_storage._impl->Move_memory_(oldshape, this->_mapper, this->_invmapper);
223 // out->_storage._impl = this->_storage._impl;
224 // std::cout << out->_storage << std::endl;
225 out->_invmapper = vec_range(this->_invmapper.size());
226 out->_mapper = out->_invmapper;
227 out->_shape = this->_shape;
228 out->_contiguous = true;
229 return out;
230 }
231 }
232
233 void contiguous_() {
234 // return new instance if act on non-contiguous tensor
235 // return self if act on contiguous tensor
236 if (!this->_contiguous) {
237 std::vector<cytnx_uint64> oldshape(this->_shape.size());
238 for (cytnx_uint64 i = 0; i < this->_shape.size(); i++) {
239 oldshape[i] = this->_shape[this->_invmapper[i]];
240 }
241
242 this->_storage._impl =
243 this->_storage._impl->Move_memory(oldshape, this->_mapper, this->_invmapper);
244 // this->_storage._impl->Move_memory_(oldshape, this->_mapper, this->_invmapper);
245 // this->_mapper = vec_range(this->_invmapper.size());
246 vec_range_(this->_mapper, this->invmapper().size());
247 this->_invmapper = this->_mapper;
248 this->_contiguous = true;
249 }
250 }
251
252 void reshape_(const std::vector<cytnx_int64> &new_shape) {
253 if (!this->_contiguous) {
254 this->contiguous_();
255 }
256 // std::vector<cytnx_uint64> result_shape(new_shape.size());
257 cytnx_uint64 new_N = 1;
258 bool has_undetermine = false;
259 unsigned int Udet_id = 0;
260 // this->_shape = vec_cast<cytnx_int64,cytnx_uint64>(new_shape);
261 this->_shape.resize(new_shape.size());
262 for(cytnx_uint64 i=0;i<new_shape.size();i++){
263 this->_shape[i] = new_shape[i];
264 }
265 for (int i = 0; i < new_shape.size(); i++) {
266 if (new_shape[i] < 0) {
267 if (new_shape[i] != -1)
269 new_shape[i] != -1, "%s",
270 "[ERROR] reshape can only have dimension > 0 and one undetermine rank specify as -1");
271 if (has_undetermine)
273 new_shape[i] != -1, "%s",
274 "[ERROR] reshape can only have dimension > 0 and one undetermine rank specify as -1");
275 Udet_id = i;
276 has_undetermine = true;
277 } else {
278 new_N *= new_shape[i];
279 // result_shape[i] = new_shape[i];
280 }
281 }
282
283 if (has_undetermine) {
284 cytnx_error_msg(new_N > this->_storage.size(), "%s",
285 "[ERROR] new shape exceed the total number of elements.");
286 cytnx_error_msg(this->_storage.size() % new_N, "%s",
287 "[ERROR] unmatch size when reshape with undetermine dimension");
288 // result_shape[Udet_id] = this->_storage.size() / new_N;
289 this->_shape[Udet_id] = this->_storage.size() / new_N;
290 } else {
291 cytnx_error_msg(new_N != this->_storage.size(), "%s",
292 "[ERROR] new shape does not match the number of elements.");
293 }
294
295 // this->_shape = result_shape;
296 // this->_mapper = std::move(vec_range(new_shape.size()));
297 this->_mapper.resize(new_shape.size());
298 vec_range_(this->_mapper, new_shape.size());
299 this->_invmapper = this->_mapper;
300 }
301
302 boost::intrusive_ptr<Tensor_impl> reshape(const std::vector<cytnx_int64> &new_shape) {
303 boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
304 if (this->is_contiguous()) {
305 out = this->_clone_meta_only();
306 out->_storage = this->_storage;
307 } else {
308 out = this->contiguous();
309 }
310 // out = this->clone();
311
312 out->reshape_(new_shape);
313 return out;
314 }
315
316 boost::intrusive_ptr<Tensor_impl> astype(const int &new_type) {
317 // boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
318 // out->_storage = this->_storage.astype(new_type);
319 if (this->dtype() == new_type) {
320 return this;
321 } else {
322 boost::intrusive_ptr<Tensor_impl> out = this->_clone_meta_only();
323 out->_storage = this->_storage.astype(new_type);
324 return out;
325 }
326 }
327 };
329
330 class Tensor;
331
333 // [Note] these are fwd from linalg.hpp
334 template <class T>
335 Tensor operator+(const Tensor &lhs, const T &rc);
336 template <class T>
337 Tensor operator-(const Tensor &lhs, const T &rhs);
338 template <class T>
339 Tensor operator*(const Tensor &lhs, const T &rhs);
340 template <class T>
341 Tensor operator/(const Tensor &lhs, const T &rhs);
343
345 class Tensor {
346 private:
347 public:
349 // this is a proxy class to allow get/set element using [] as python!
350 struct Tproxy {
351 boost::intrusive_ptr<Tensor_impl> _insimpl;
352 std::vector<cytnx::Accessor> _accs;
353 Tproxy(boost::intrusive_ptr<Tensor_impl> _ptr, const std::vector<cytnx::Accessor> &accs)
354 : _insimpl(std::move(_ptr)), _accs(accs) {}
355
356 // when used to set elems:
357 const Tensor &operator=(const Tensor &rhs) {
358 this->_insimpl->set(_accs, rhs._impl);
359 return rhs;
360 }
361
362 template <class T>
363 const T &operator=(const T &rc) {
364 this->_insimpl->set(_accs, rc);
365 return rc;
366 }
367 const Tproxy &operator=(const Tproxy &rc) {
368 Tensor tmp = Tensor(rc);
369 this->_insimpl->set(_accs, tmp._impl);
370 return rc;
371 }
372
373 template <class T>
374 Tensor operator+=(const T &rc) {
375 Tensor self;
376 self._impl = _insimpl->get(_accs);
377 self += rc;
378 _insimpl->set(_accs, self._impl);
379 self._impl = this->_insimpl;
380 return self;
381 }
382 Tensor operator+=(const Tproxy &rc);
383
384 template <class T>
385 Tensor operator-=(const T &rc) {
386 Tensor self;
387 self._impl = _insimpl->get(_accs);
388 self -= rc;
389 _insimpl->set(_accs, self._impl);
390 self._impl = this->_insimpl;
391 return self;
392 }
393 Tensor operator-=(const Tproxy &rc);
394
395 template <class T>
396 Tensor operator/=(const T &rc) {
397 Tensor self;
398 self._impl = _insimpl->get(_accs);
399 self /= rc;
400 _insimpl->set(_accs, self._impl);
401 self._impl = this->_insimpl;
402 return self;
403 }
404 Tensor operator/=(const Tproxy &rc);
405
406 template <class T>
407 Tensor operator*=(const T &rc) {
408 Tensor self;
409 self._impl = _insimpl->get(_accs);
410 self *= rc;
411 _insimpl->set(_accs, self._impl);
412 self._impl = this->_insimpl;
413 return self;
414 }
415 Tensor operator*=(const Tproxy &rc);
416
417 // alias to resolve conflict with op ovld for rc=Tensor
418 /*
419 template<class T>
420 Tensor _operatorADD(const T &rc) const{
421 Tensor out;
422 out._impl = _insimpl->get(_accs);
423 return out.Add(rc);
424 }
425 */
426 Tensor operator+(const cytnx_complex128 &rc) const; //{return this->_operatorADD(rc);};
427 Tensor operator+(const cytnx_complex64 &rc) const; //{return this->_operatorADD(rc);};
428 Tensor operator+(const cytnx_double &rc) const; //{return this->_operatorADD(rc);};
429 Tensor operator+(const cytnx_float &rc) const; //{return this->_operatorADD(rc);};
430 Tensor operator+(const cytnx_uint64 &rc) const; //{return this->_operatorADD(rc);};
431 Tensor operator+(const cytnx_int64 &rc) const; //{return this->_operatorADD(rc);};
432 Tensor operator+(const cytnx_uint32 &rc) const; //{return this->_operatorADD(rc);};
433 Tensor operator+(const cytnx_int32 &rc) const; //{return this->_operatorADD(rc);};
434 Tensor operator+(const cytnx_uint16 &rc) const; //{return this->_operatorADD(rc);};
435 Tensor operator+(const cytnx_int16 &rc) const; //{return this->_operatorADD(rc);};
436 Tensor operator+(const cytnx_bool &rc) const; //{return this->_operatorADD(rc);};
437 Tensor operator+(const Tproxy &rc) const;
438
439 /*
440 template<class T>
441 Tensor _operatorSUB(const T &rc) const{
442 Tensor out;
443 out._impl = _insimpl->get(_accs);
444 return out.Sub(rc);
445 }
446 */
447 Tensor operator-(const cytnx_complex128 &rc) const; //{return this->_operatorSUB(rc);};
448 Tensor operator-(const cytnx_complex64 &rc) const; //{return this->_operatorSUB(rc);};
449 Tensor operator-(const cytnx_double &rc) const; //{return this->_operatorSUB(rc);};
450 Tensor operator-(const cytnx_float &rc) const; //{return this->_operatorSUB(rc);};
451 Tensor operator-(const cytnx_uint64 &rc) const; //{return this->_operatorSUB(rc);};
452 Tensor operator-(const cytnx_int64 &rc) const; //{return this->_operatorSUB(rc);};
453 Tensor operator-(const cytnx_uint32 &rc) const; //{return this->_operatorSUB(rc);};
454 Tensor operator-(const cytnx_int32 &rc) const; //{return this->_operatorSUB(rc);};
455 Tensor operator-(const cytnx_uint16 &rc) const; //{return this->_operatorSUB(rc);};
456 Tensor operator-(const cytnx_int16 &rc) const; //{return this->_operatorSUB(rc);};
457 Tensor operator-(const cytnx_bool &rc) const; //{return this->_operatorSUB(rc);};
458 Tensor operator-(const Tproxy &rc) const;
459
460 Tensor operator-() const;
461
462 /*
463 template<class T>
464 Tensor _operatorMUL(const T &rc) const{
465 Tensor out;
466 out._impl = _insimpl->get(_accs);
467 return out.Mul(rc);
468 }
469 */
470 Tensor operator*(const cytnx_complex128 &rc) const; //{return this->_operatorMUL(rc);};
471 Tensor operator*(const cytnx_complex64 &rc) const; //{return this->_operatorMUL(rc);};
472 Tensor operator*(const cytnx_double &rc) const; //{return this->_operatorMUL(rc);};
473 Tensor operator*(const cytnx_float &rc) const; //{return this->_operatorMUL(rc);};
474 Tensor operator*(const cytnx_uint64 &rc) const; //{return this->_operatorMUL(rc);};
475 Tensor operator*(const cytnx_int64 &rc) const; //{return this->_operatorMUL(rc);};
476 Tensor operator*(const cytnx_uint32 &rc) const; //{return this->_operatorMUL(rc);};
477 Tensor operator*(const cytnx_int32 &rc) const; //{return this->_operatorMUL(rc);};
478 Tensor operator*(const cytnx_uint16 &rc) const; //{return this->_operatorMUL(rc);};
479 Tensor operator*(const cytnx_int16 &rc) const; //{return this->_operatorMUL(rc);};
480 Tensor operator*(const cytnx_bool &rc) const; //{return this->_operatorMUL(rc);};
481 Tensor operator*(const Tproxy &rc) const;
482
483 /*
484 template<class T>
485 Tensor _operatorDIV(const T &rc) const{
486 Tensor out;
487 out._impl = _insimpl->get(_accs);
488 return out.Div(rc);
489 }
490 */
491 Tensor operator/(const cytnx_complex128 &rc) const; //{return this->_operatorDIV(rc);};
492 Tensor operator/(const cytnx_complex64 &rc) const; //{return this->_operatorDIV(rc);};
493 Tensor operator/(const cytnx_double &rc) const; //{return this->_operatorDIV(rc);};
494 Tensor operator/(const cytnx_float &rc) const; //{return this->_operatorDIV(rc);};
495 Tensor operator/(const cytnx_uint64 &rc) const; //{return this->_operatorDIV(rc);};
496 Tensor operator/(const cytnx_int64 &rc) const; //{return this->_operatorDIV(rc);};
497 Tensor operator/(const cytnx_uint32 &rc) const; //{return this->_operatorDIV(rc);};
498 Tensor operator/(const cytnx_int32 &rc) const; //{return this->_operatorDIV(rc);};
499 Tensor operator/(const cytnx_uint16 &rc) const; //{return this->_operatorDIV(rc);};
500 Tensor operator/(const cytnx_int16 &rc) const; //{return this->_operatorDIV(rc);};
501 Tensor operator/(const cytnx_bool &rc) const; //{return this->_operatorDIV(rc);};
502 Tensor operator/(const Tproxy &rc) const;
503
504 template <class T>
505 T item() const {
506 Tensor out;
507 out._impl = _insimpl->get(_accs);
508 return out.item<T>();
509 }
510
511 Scalar::Sproxy item() const {
512 Tensor out;
513 out._impl = _insimpl->get(_accs);
514 return out.item();
515 }
516
517 // when used to get elems:
518 operator Tensor() const {
519 Tensor out;
520 out._impl = _insimpl->get(_accs);
521 return out;
522 }
523
524 Storage storage() const {
525 Tensor out;
526 out._impl = _insimpl->get(_accs);
527 return out.storage();
528 }
529
530 }; // proxy class of Tensor.
531
533
535 // these two are using the python way!
536 //----------------------------------------
537 template <class... Ts>
538 Tproxy operator()(const std::string &e1, const Ts &...elems) {
539 // std::cout << e1 << std::endl;
540 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
541 return (*this)[tmp];
542 }
543 template <class... Ts>
544 Tproxy operator()(const cytnx_int64 &e1, const Ts &...elems) {
545 // std::cout << e1<< std::endl;
546 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
547 return (*this)[tmp];
548 }
549 template <class... Ts>
550 Tproxy operator()(const cytnx::Accessor &e1, const Ts &...elems) {
551 // std::cout << e1 << std::endl;
552 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
553 return (*this)[tmp];
554 }
555 template <class... Ts>
556 const Tproxy operator()(const std::string &e1, const Ts &...elems) const {
557 // std::cout << e1 << std::endl;
558 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
559 return (*this)[tmp];
560 }
561 template <class... Ts>
562 const Tproxy operator()(const cytnx_int64 &e1, const Ts &...elems) const {
563 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
564 return (*this)[tmp];
565 }
566 template <class... Ts>
567 const Tproxy operator()(const cytnx::Accessor &e1, const Ts &...elems) const {
568 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1, elems...);
569 return (*this)[tmp];
570 }
571
572 //-----------------------------------------
573
574 Tproxy operator[](const std::initializer_list<cytnx::Accessor> &accs) {
575 std::vector<cytnx::Accessor> tmp = accs;
576 return (*this)[tmp];
577 }
578 Tproxy operator[](const std::vector<cytnx::Accessor> &accs) {
579 return Tproxy(this->_impl, accs);
580 }
581
582 const Tproxy operator[](const std::vector<cytnx::Accessor> &accs) const {
583 return Tproxy(this->_impl, accs);
584 }
585 const Tproxy operator[](const std::initializer_list<cytnx::Accessor> &accs) const {
586 std::vector<cytnx::Accessor> tmp = accs;
587 return (*this)[tmp];
588 }
589
590 Tproxy operator[](const std::initializer_list<cytnx_int64> &accs) {
591 std::vector<cytnx_int64> tmp = accs;
592 return (*this)[tmp];
593 }
594 Tproxy operator[](const std::vector<cytnx_int64> &accs) {
595 std::vector<cytnx::Accessor> acc_in;
596 for (int i = 0; i < accs.size(); i++) {
597 acc_in.push_back(cytnx::Accessor(accs[i]));
598 }
599 return Tproxy(this->_impl, acc_in);
600 }
601 const Tproxy operator[](const std::initializer_list<cytnx_int64> &accs) const {
602 std::vector<cytnx_int64> tmp = accs;
603 return (*this)[tmp];
604 }
605 const Tproxy operator[](const std::vector<cytnx_int64> &accs) const {
606 std::vector<cytnx::Accessor> acc_in;
607 for (int i = 0; i < accs.size(); i++) {
608 acc_in.push_back(cytnx::Accessor(accs[i]));
609 }
610 return Tproxy(this->_impl, acc_in);
611 }
613 //-------------------------------------------
614
616 void _Save(std::fstream &f) const;
617 void _Load(std::fstream &f);
618
620
630 void Save(const std::string &fname) const;
634 void Save(const char *fname) const;
635 void Tofile(const std::string &fname) const;
636 void Tofile(const char *fname) const;
637 void Tofile(std::fstream &f) const;
646 static Tensor Load(const std::string &fname);
650 static Tensor Load(const char *fname);
651 static Tensor Fromfile(const std::string &fname, const unsigned int &dtype,
652 const cytnx_int64 &count = -1);
653 static Tensor Fromfile(const char *fname, const unsigned int &dtype,
654 const cytnx_int64 &count = -1);
655
656 // static Tensor Frombinary(const std::string &fname);
657
659 boost::intrusive_ptr<Tensor_impl> _impl;
660 Tensor() : _impl(new Tensor_impl()){};
661 Tensor(const Tensor &rhs) { _impl = rhs._impl; }
662
663 /*
664 template<class Tp>
665 Tensor(const std::initializer_list<Tp> &rhs){
666 Storage stmp = std::vector<Tp>(rhs);
667 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
668 tmp->Init(stmp);
669 this->_impl = tmp;
670 }
671 */
672
673 Tensor &operator=(const Tensor &rhs) {
674 _impl = rhs._impl;
675 return *this;
676 }
677
678 void operator=(const Tproxy &rhsp) { // this is used to handle proxy assignment
679 this->_impl = rhsp._insimpl->get(rhsp._accs);
680 }
682
684 // default device==Device.cpu (-1)
709 void Init(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype = Type.Double,
710 const int &device = -1, const bool &init_zero = true) {
711 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
712 this->_impl = tmp;
713 this->_impl->Init(shape, dtype, device, init_zero);
714 }
715 // void Init(const Storage& storage) {
716 // boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
717 // this->_impl = tmp;
718 // this->_impl->Init(storage);
719 // }
720 // void Init(const Storage& storage, const std::vector<cytnx_uint64> &shape,
721 // const unsigned int &dtype = Type.Double, const int &device = -1) {
722 // boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
723 // this->_impl = tmp;
724 // this->_impl->Init(storage, shape, dtype, device);
725 // }
726
739 Tensor(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype = Type.Double,
740 const int &device = -1, const bool &init_zero = 1)
741 : _impl(new Tensor_impl()) {
742 this->Init(shape, dtype, device, init_zero);
743 }
744 // Tensor(const Storage& storage)
745 // : _impl(new Tensor_impl()) {
746 // this->Init(storage);
747 // }
748 // Tensor(const Storage& storage, const std::vector<cytnx_uint64> &shape,
749 // const unsigned int &dtype = Type.Double, const int &device = -1)
750 // : _impl(new Tensor_impl()) {
751 // this->Init(storage, shape, dtype, device);
752 // }
754
755 static Tensor from_storage(const Storage &in) {
756 Tensor out;
757 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
758 out._impl = tmp;
759 out._impl->Init(in);
760 return out;
761 }
762
768 unsigned int dtype() const { return this->_impl->dtype(); }
769
775 int device() const { return this->_impl->device(); }
776
782 std::string dtype_str() const { return this->_impl->dtype_str(); }
783
789 std::string device_str() const { return this->_impl->device_str(); }
790
795 const std::vector<cytnx_uint64> &shape() const { return this->_impl->shape(); }
796
801 cytnx_uint64 rank() const { return this->_impl->shape().size(); }
802
820 Tensor clone() const {
821 Tensor out;
822 out._impl = this->_impl->clone();
823 return out;
824 }
825
846 Tensor to(const int &device) const {
847 Tensor out;
848 out._impl = this->_impl->to(device);
849 return out;
850 }
851
869 void to_(const int &device) { this->_impl->to_(device); }
870
871 const bool &is_contiguous() const { return this->_impl->is_contiguous(); }
872
873 Tensor permute_(const std::vector<cytnx_uint64> &rnks) {
874 this->_impl->permute_(rnks);
875 return *this;
876 }
878 template <class... Ts>
879 Tensor permute_(const cytnx_uint64 &e1, const Ts &...elems) {
880 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1, elems...);
881 this->_impl->permute_(argv);
882 return *this;
883 }
885
904 Tensor permute(const std::vector<cytnx_uint64> &rnks) const {
905 Tensor out;
906 out._impl = this->_impl->permute(rnks);
907 return out;
908 }
910 template <class... Ts>
911 Tensor permute(const cytnx_uint64 &e1, const Ts &...elems) const {
912 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1, elems...);
913 return this->permute(argv);
914 }
916
933 Tensor out;
934 out._impl = this->_impl->contiguous();
935 return out;
936 }
937
953 this->_impl->contiguous_();
954 return *this;
955 }
956
978 void reshape_(const std::vector<cytnx_int64> &new_shape) { this->_impl->reshape_(new_shape); }
980 void reshape_(const std::vector<cytnx_uint64> &new_shape) {
981 std::vector<cytnx_int64> shape(new_shape.begin(), new_shape.end());
982 this->_impl->reshape_(shape);
983 }
984 void reshape_(const std::initializer_list<cytnx_int64> &new_shape) {
985 std::vector<cytnx_int64> shape = new_shape;
986 this->_impl->reshape_(shape);
987 }
988 template <class... Ts>
989 void reshape_(const cytnx_int64 &e1, const Ts... elems) {
990 std::vector<cytnx_int64> shape = dynamic_arg_int64_resolver(e1, elems...);
991 // std::cout << shape << std::endl;
992 this->_impl->reshape_(shape);
993 }
995
1019 Tensor reshape(const std::vector<cytnx_int64> &new_shape) const {
1020 Tensor out;
1021 out._impl = this->_impl->reshape(new_shape);
1022 return out;
1023 }
1024
1028 Tensor reshape(const std::vector<cytnx_uint64> &new_shape) const {
1029 std::vector<cytnx_int64> tmp(new_shape.size());
1030 memcpy(&tmp[0], &new_shape[0], sizeof(cytnx_uint64) * new_shape.size());
1031 Tensor out;
1032 out._impl = this->_impl->reshape(tmp);
1033 return out;
1034 }
1035
1039 Tensor reshape(const std::initializer_list<cytnx_int64> &new_shape) const {
1040 return this->reshape(std::vector<cytnx_int64>(new_shape));
1041 }
1042
1044 template <class... Ts>
1045 Tensor reshape(const cytnx_int64 &e1, const Ts &...elems) const {
1046 std::vector<cytnx_int64> argv = dynamic_arg_int64_resolver(e1, elems...);
1047 return this->reshape(argv);
1048 }
1050
1068 Tensor astype(const int &new_type) const {
1069 Tensor out;
1070 out._impl = this->_impl->astype(new_type);
1071 return out;
1072 }
1073
1074 // Tensor diagonal(){
1075 // for(unsigned int i=0;i<this->shape().size();i++){
1076 // if(this->shape()[i] != this->shape()[0],"[ERROR] Tensor.diagonal() can only be called
1077 // when the subject has equal dimension in each rank.%s","\n");
1078 // }
1079 //
1080 // }
1081
1100 template <class T>
1101 T &at(const std::vector<cytnx_uint64> &locator) {
1102 return this->_impl->at<T>(locator);
1103 }
1104
1108 template <class T>
1109 const T &at(const std::vector<cytnx_uint64> &locator) const {
1110 return this->_impl->at<T>(locator);
1111 }
1113 template <class T, class... Ts>
1114 const T &at(const cytnx_uint64 &e1, const Ts &...elems) const {
1115 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1, elems...);
1116 return this->at<T>(argv);
1117 }
1118 template <class T, class... Ts>
1119 T &at(const cytnx_uint64 &e1, const Ts &...elems) {
1120 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1, elems...);
1121 return this->at<T>(argv);
1122 }
1123
1124 const Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) const {
1125 return this->_impl->at(locator);
1126 }
1127
1128 Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) { return this->_impl->at(locator); }
1130
1154 template <class T>
1155 T &item() {
1156 cytnx_error_msg(this->_impl->storage().size() != 1, "[ERROR][Tensor.item<T>]%s",
1157 "item can only be called from a Tensor with only one element\n");
1158 return this->_impl->storage().at<T>(0);
1159 }
1160
1162 template <class T>
1163 const T &item() const {
1164 cytnx_error_msg(this->_impl->storage().size() != 1, "[ERROR][Tensor.item<T>]%s",
1165 "item can only be called from a Tensor with only one element\n");
1166 return this->_impl->storage().at<T>(0);
1167 }
1168
1169 const Scalar::Sproxy item() const {
1170 Scalar::Sproxy out(this->storage()._impl, 0);
1171 return out;
1172 }
1173
1174 Scalar::Sproxy item() {
1175 Scalar::Sproxy out(this->storage()._impl, 0);
1176 return out;
1177 }
1178
1180
1202 Tensor get(const std::vector<cytnx::Accessor> &accessors) const {
1203 Tensor out;
1204 out._impl = this->_impl->get(accessors);
1205 return out;
1206 }
1207
1208 /*
1209 Tensor get_v2(const std::vector<cytnx::Accessor> &accessors) const{
1210 Tensor out;
1211 out._impl = this->_impl->get_v2(accessors);
1212 return out;
1213 }
1214 */
1215
1234 void set(const std::vector<cytnx::Accessor> &accessors, const Tensor &rhs) {
1235 this->_impl->set(accessors, rhs._impl);
1236 }
1237
1256 template <class T>
1257 void set(const std::vector<cytnx::Accessor> &accessors, const T &rc) {
1258 this->_impl->set(accessors, rc);
1259 }
1261 template <class T>
1262 void set(const std::initializer_list<cytnx::Accessor> &accessors, const T &rc) {
1263 std::vector<cytnx::Accessor> args = accessors;
1264 this->set(args, rc);
1265 }
1267
1277 Storage &storage() const { return this->_impl->storage(); }
1278
1293 template <class T>
1294 void fill(const T &val) {
1295 this->_impl->fill(val);
1296 }
1297
1302 bool equiv(const Tensor &rhs) {
1303 if (this->shape() != rhs.shape()) return false;
1304 return true;
1305 }
1306
1314 Tensor real();
1315
1323 Tensor imag();
1324
1325 // Arithmic:
1342 template <class T>
1343 Tensor &operator+=(const T &rc);
1344
1361 template <class T>
1362 Tensor &operator-=(const T &rc);
1363
1380 template <class T>
1381 Tensor &operator*=(const T &rc);
1382
1400 template <class T>
1401 Tensor &operator/=(const T &rc);
1402
1403 // Tensor &operator+=(const Tproxy &rc);
1404 // Tensor &operator-=(const Tproxy &rc);
1405 // Tensor &operator*=(const Tproxy &rc);
1406 // Tensor &operator/=(const Tproxy &rc);
1407 /*
1408 Tensor operator+(const Tproxy &rc){
1409 return *this + Tensor(rc);
1410 }
1411 Tensor operator-(const Tproxy &rc){
1412 return *this - Tensor(rc);
1413 }
1414 Tensor operator*(const Tproxy &rc){
1415 return *this * Tensor(rc);
1416 }
1417 Tensor operator/(const Tproxy &rc){
1418 return *this / Tensor(rc);
1419 }
1420 */
1426 template <class T>
1427 Tensor Add(const T &rhs) {
1428 return *this + rhs;
1429 }
1430
1436 template <class T>
1437 Tensor &Add_(const T &rhs) {
1438 return *this += rhs;
1439 }
1440
1446 template <class T>
1447 Tensor Sub(const T &rhs) {
1448 return *this - rhs;
1449 }
1450
1456 template <class T>
1457 Tensor &Sub_(const T &rhs) {
1458 return *this -= rhs;
1459 }
1460
1466 template <class T>
1467 Tensor Mul(const T &rhs) {
1468 return *this * rhs;
1469 }
1470
1476 template <class T>
1477 Tensor &Mul_(const T &rhs) {
1478 return *this *= rhs;
1479 }
1480
1487 template <class T>
1488 Tensor Div(const T &rhs) {
1489 return *this / rhs;
1490 }
1491
1498 template <class T>
1499 Tensor &Div_(const T &rhs) {
1500 return *this /= rhs;
1501 }
1502
1509 template <class T>
1510 Tensor Cpr(const T &rhs) {
1511 return *this == rhs;
1512 }
1513
1514 // template<class T>
1515 // Tensor& Cpr_(const T &rhs){
1516 //
1517 // return *this == rhs;
1518 // }
1519
1520 template <class T>
1521 Tensor Mod(const T &rhs) {
1522 return *this % rhs;
1523 }
1524
1531 Tensor operator-() { return this->Mul(-1.); }
1532
1540 Tensor flatten() const {
1541 Tensor out = this->clone();
1542 out.contiguous_();
1543 out.reshape_({-1});
1544 return out;
1545 }
1546
1554 void flatten_() {
1555 this->contiguous_();
1556 this->reshape_({-1});
1557 }
1558
1584 void append(const Tensor &rhs) {
1585 // Tensor in;
1586 if (!this->is_contiguous()) this->contiguous_();
1587
1588 // check Tensor in shape:
1589 cytnx_error_msg(rhs.shape().size() == 0 || this->shape().size() == 0,
1590 "[ERROR] try to append a null Tensor.%s", "\n");
1591 cytnx_error_msg(rhs.shape().size() != (this->shape().size() - 1),
1592 "[ERROR] try to append a Tensor with rank not match.%s", "\n");
1593 cytnx_uint64 Nelem = 1;
1594 for (unsigned int i = 0; i < rhs.shape().size(); i++) {
1595 cytnx_error_msg(rhs.shape()[i] != this->shape()[i + 1],
1596 "[ERROR] dimension mismatch @ rhs.rank: [%d] this: [%d] rhs: [%d]\n", i,
1597 this->shape()[i + 1], rhs.shape()[i]);
1598 Nelem *= rhs.shape()[i];
1599 }
1600
1601 // check type:
1602 Tensor in;
1603 if (rhs.dtype() != this->dtype()) {
1604 in = rhs.astype(this->dtype());
1605 if (!in.is_contiguous()) in.contiguous_();
1606 } else {
1607 if (!in.is_contiguous())
1608 in = rhs.contiguous();
1609 else
1610 in = rhs;
1611 }
1612 this->_impl->_shape[0] += 1;
1613 cytnx_uint64 oldsize = this->_impl->_storage.size();
1614 this->_impl->_storage.resize(oldsize + Nelem);
1615 memcpy(((char *)this->_impl->_storage.data()) +
1616 oldsize * Type.typeSize(this->dtype()) / sizeof(char),
1617 in._impl->_storage.data(), Type.typeSize(in.dtype()) * Nelem);
1618 }
1628 void append(const Storage &srhs) {
1629 if (!this->is_contiguous()) this->contiguous_();
1630
1631 // check Tensor in shape:
1632 cytnx_error_msg(srhs.size() == 0 || this->shape().size() == 0,
1633 "[ERROR] try to append a null Tensor.%s", "\n");
1634 cytnx_error_msg((this->shape().size() - 1) != 1,
1635 "[ERROR] append a storage to Tensor can only accept rank-2 Tensor.%s", "\n");
1636 cytnx_error_msg(this->shape().back() != srhs.size(), "[ERROR] Tensor dmension mismatch!%s",
1637 "\n");
1638
1639 // check type:
1640 Storage in;
1641 if (srhs.dtype() != this->dtype()) {
1642 in = srhs.astype(this->dtype());
1643 } else {
1644 in = srhs;
1645 }
1646 this->_impl->_shape[0] += 1;
1647 cytnx_uint64 oldsize = this->_impl->_storage.size();
1648 this->_impl->_storage.resize(oldsize + in.size());
1649 memcpy(((char *)this->_impl->_storage.data()) +
1650 oldsize * Type.typeSize(this->dtype()) / sizeof(char),
1651 in._impl->Mem, Type.typeSize(in.dtype()) * in.size());
1652 }
1653 /*
1654 void append(const Tensor &rhs){
1655 // convert to the same type.
1656 Tensor in;
1657 if(rhs.dtype() != this->dtype()){
1658 in = rhs.astype(this->dtype());
1659 }else{
1660 in = rhs;
1661 }
1662
1663 // 1) check rank
1664 if(this->shape().size()==1){
1665 // check if rhs is a scalar tensor (only one element)
1666 cytnx_error_msg(!(rhs.shape().size()==1 && rhs.shape()[0]==1),"[ERROR] trying to append
1667 a scalar into multidimentional Tensor is not allow.\n Only rank-1 Tensor can accept scalar
1668 append.%s","\n"); this->_impl->_shape[0]+=1; this->_impl->_storage.append(0);
1669
1670 }else{
1671 cytnx_error_msg(rhs.shape().size() != this->shape().size()-1,"[ERROR] try to append a
1672 Tensor with rank not match.%s","\n");
1673
1674 }
1675 cytnx_error_msg(!this->is_contiguous(),"[ERROR] append require the Tensor to be contiguous.
1676 suggestion: call contiguous() or contiguous_() first.","\n");
1677 }
1678 */
1690 template <class T>
1691 void append(const T &rhs) {
1692 cytnx_error_msg(this->shape().size() != 1,
1693 "[ERROR] trying to append a scalar into multidimentional Tensor is not "
1694 "allow.\n Only rank-1 Tensor can accept scalar append.%s",
1695 "\n");
1697 "[ERROR] append require the Tensor to be contiguous. suggestion: call "
1698 "contiguous() or contiguous_() first.",
1699 "\n");
1700 this->_impl->_shape[0] += 1;
1701 this->_impl->_storage.append(rhs);
1702 }
1703
1704 bool same_data(const Tensor &rhs) const;
1705
1706 // linalg:
1712 std::vector<Tensor> Svd(const bool &is_UvT = true) const;
1713
1719 std::vector<Tensor> Eigh(const bool &is_V = true, const bool &row_v = false) const;
1720
1725 Tensor &InvM_();
1726
1731 Tensor InvM() const;
1732
1737 Tensor &Inv_(const double &clip);
1738
1743 Tensor Inv(const double &clip) const;
1744
1749 Tensor &Conj_();
1750
1755 Tensor Conj() const;
1756
1761 Tensor &Exp_();
1762
1767 Tensor Exp() const;
1768
1773 Tensor Norm() const;
1774
1779 Tensor Pow(const cytnx_double &p) const;
1780
1785 Tensor &Pow_(const cytnx_double &p);
1786
1791 Tensor Trace(const cytnx_uint64 &a = 0, const cytnx_uint64 &b = 1) const;
1792
1797 Tensor Abs() const;
1798
1803 Tensor &Abs_();
1804
1809 Tensor Max() const;
1810
1815 Tensor Min() const;
1816
1817 }; // class Tensor
1818
1819 Tensor operator+(const Tensor &lhs, const Tensor::Tproxy &rhs);
1820 Tensor operator-(const Tensor &lhs, const Tensor::Tproxy &rhs);
1821 Tensor operator*(const Tensor &lhs, const Tensor::Tproxy &rhs);
1822 Tensor operator/(const Tensor &lhs, const Tensor::Tproxy &rhs);
1823
1824 Tensor operator+(const Tensor &lhs, const Scalar::Sproxy &rhs);
1825 Tensor operator-(const Tensor &lhs, const Scalar::Sproxy &rhs);
1826 Tensor operator*(const Tensor &lhs, const Scalar::Sproxy &rhs);
1827 Tensor operator/(const Tensor &lhs, const Scalar::Sproxy &rhs);
1828
1829 std::ostream &operator<<(std::ostream &os, const Tensor &in);
1830 std::ostream &operator<<(std::ostream &os, const Tensor::Tproxy &in);
1831 //{ os << Tensor(in);};
1832} // namespace cytnx
1833
1834#endif
object that mimic the python slice to access elements in C++ [this is for c++ API only].
Definition Accessor.hpp:16
an memeory storage with multi-type/multi-device support
Definition Storage.hpp:1039
const unsigned int & dtype() const
the dtype-id of current Storage, see cytnx::Type for more details.
Definition Storage.hpp:1191
Storage astype(const unsigned int &new_type) const
cast the type of current Storage
Definition Storage.hpp:1185
const unsigned long long & size() const
the size ( no. of elements ) in the Storage
Definition Storage.hpp:1307
an tensor (multi-dimensional array)
Definition Tensor.hpp:345
void append(const Storage &srhs)
the append function of the Storage.
Definition Tensor.hpp:1628
Tensor & operator*=(const T &rc)
multiplication assignment operator with a Tensor or a scalar.
Tensor & Inv_(const double &clip)
the Inv_ member function. Same as cytnx::linalg::Inv_(Tensor &Tin, const double &clip)
Definition Tensor.cpp:1319
Tensor & operator/=(const T &rc)
division assignment operator with a Tensor or a scalar.
Tensor operator-()
The negation function.
Definition Tensor.hpp:1531
void fill(const T &val)
fill all the element of current Tensor with the value.
Definition Tensor.hpp:1294
Tensor InvM() const
the InvM member function. Same as cytnx::linalg::InvM(const Tensor &Tin), where Tin is the current Te...
Definition Tensor.cpp:1318
bool same_data(const Tensor &rhs) const
Definition Tensor.cpp:1358
void to_(const int &device)
move the current Tensor to the device.
Definition Tensor.hpp:869
Tensor reshape(const std::vector< cytnx_uint64 > &new_shape) const
Definition Tensor.hpp:1028
Tensor(const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1, const bool &init_zero=1)
Construct a new Tensor object.
Definition Tensor.hpp:739
void append(const T &rhs)
the append function of the scalar.
Definition Tensor.hpp:1691
Tensor & operator-=(const T &rc)
subtraction assignment operator with a Tensor or a scalar.
Tensor & Add_(const T &rhs)
Addition function with a Tensor or a scalar, inplacely. Same as operator+=(const T &rhs).
Definition Tensor.hpp:1437
Tensor Abs() const
the Abs member function. Same as linalg::Abs(const Tensor &Tin), where Tin is the current Tensor.
Definition Tensor.cpp:1349
Tensor reshape(const std::initializer_list< cytnx_int64 > &new_shape) const
Definition Tensor.hpp:1039
std::string device_str() const
the device (in string) of the Tensor
Definition Tensor.hpp:789
void reshape_(const std::vector< cytnx_int64 > &new_shape)
reshape the Tensor, inplacely
Definition Tensor.hpp:978
Tensor contiguous_()
Make the Tensor contiguous by coalescing the memory (storage), inplacely.
Definition Tensor.hpp:952
static Tensor Load(const std::string &fname)
Load current Tensor from file.
Definition Tensor.cpp:937
Tensor permute_(const std::vector< cytnx_uint64 > &rnks)
Definition Tensor.hpp:873
Tensor Mul(const T &rhs)
Multiplication function with a Tensor or a scalar. Same as cytnx::operator*(const Tensor &self,...
Definition Tensor.hpp:1467
unsigned int dtype() const
the dtype-id of the Tensor
Definition Tensor.hpp:768
Tensor Sub(const T &rhs)
Subtraction function with a Tensor or a scalar. Same as cytnx::operator-(const Tensor &self,...
Definition Tensor.hpp:1447
Tensor Inv(const double &clip) const
the Inv member function. Same as cytnx::linalg::Inv(const Tensor &Tin, const double &clip)
Definition Tensor.cpp:1323
Tensor contiguous() const
Make the Tensor contiguous by coalescing the memory (storage).
Definition Tensor.hpp:932
void Tofile(const std::string &fname) const
Definition Tensor.cpp:867
T & at(const std::vector< cytnx_uint64 > &locator)
[C++ only] get an element at specific location.
Definition Tensor.hpp:1101
Tensor reshape(const std::vector< cytnx_int64 > &new_shape) const
return a new Tensor that is reshaped.
Definition Tensor.hpp:1019
T & item()
get an from a rank-0 Tensor
Definition Tensor.hpp:1155
Tensor clone() const
return a clone of the current Tensor.
Definition Tensor.hpp:820
std::vector< Tensor > Eigh(const bool &is_V=true, const bool &row_v=false) const
the Eigh member function. Same as cytnx::linalg::Eigh(const Tensor &Tin, const bool &is_V,...
Definition Tensor.cpp:1310
void append(const Tensor &rhs)
the append function.
Definition Tensor.hpp:1584
void set(const std::vector< cytnx::Accessor > &accessors, const Tensor &rhs)
set elements with the input Tensor using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1234
Tensor Norm() const
the Norm member function. Same as linalg::Norm(const Tensor &Tin), where Tin is the current Tensor.
Definition Tensor.cpp:1336
Tensor astype(const int &new_type) const
return a new Tensor that cast to different dtype.
Definition Tensor.hpp:1068
Tensor & Div_(const T &rhs)
Division function with a Tensor or a scalar, inplacely. Same as operator/=(const T &rhs).
Definition Tensor.hpp:1499
Tensor & operator+=(const T &rc)
addition assignment operator with a Tensor or a scalar.
Tensor Conj() const
the Conj member function. Same as cytnx::linalg::Conj(const Tensor &Tin), where Tin is the current Te...
Definition Tensor.cpp:1329
Tensor Trace(const cytnx_uint64 &a=0, const cytnx_uint64 &b=1) const
the Trace member function. Same as linalg::Trace(const Tensor &Tin, const cytnx_uint64 &a,...
Definition Tensor.cpp:1353
Tensor & Pow_(const cytnx_double &p)
the Pow_ member function. Same as linalg::Pow_(Tensor &Tin, const cytnx_double &p),...
Definition Tensor.cpp:1340
std::vector< Tensor > Svd(const bool &is_UvT=true) const
the SVD member function. Same as cytnx::linalg::Svd(const Tensor &Tin, const bool &is_UvT) ,...
Definition Tensor.cpp:1307
std::string dtype_str() const
the dtype (in string) of the Tensor
Definition Tensor.hpp:782
Tensor & Mul_(const T &rhs)
Multiplication function with a Tensor or a scalar, inplacely. Same as operator*=(const T &rhs).
Definition Tensor.hpp:1477
cytnx_uint64 rank() const
the rank of the Tensor
Definition Tensor.hpp:801
const bool & is_contiguous() const
Definition Tensor.hpp:871
Tensor Exp() const
the Exp member function. Same as linalg::Exp(const Tensor &Tin), where Tin is the current Tensor.
Definition Tensor.cpp:1335
Tensor & Abs_()
the Abs_ member function. Same as linalg::Abs_(Tensor &Tin), where Tin is the current Tensor.
Definition Tensor.cpp:1345
Tensor Add(const T &rhs)
Addition function with a Tensor or a scalar. Same as cytnx::operator+(const Tensor &self,...
Definition Tensor.hpp:1427
void flatten_()
The flatten function, inplacely.
Definition Tensor.hpp:1554
void Save(const std::string &fname) const
Save current Tensor to file.
Definition Tensor.cpp:891
Tensor flatten() const
The flatten function.
Definition Tensor.hpp:1540
Tensor & Conj_()
the Conj_ member function. Same as cytnx::linalg::Conj_(Tensor &Tin), where Tin is the current Tensor...
Definition Tensor.cpp:1325
Tensor Pow(const cytnx_double &p) const
the Pow member function. Same as linalg::Pow(const Tensor &Tin, const cytnx_double &p),...
Definition Tensor.cpp:1338
int device() const
the device-id of the Tensor
Definition Tensor.hpp:775
Tensor real()
return the real part of the tensor.
Definition Tensor.cpp:985
Tensor imag()
return the imaginary part of the tensor.
Definition Tensor.cpp:992
Tensor to(const int &device) const
copy a tensor to new device
Definition Tensor.hpp:846
void Tofile(std::fstream &f) const
Tensor get(const std::vector< cytnx::Accessor > &accessors) const
get elements using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1202
void set(const std::vector< cytnx::Accessor > &accessors, const T &rc)
set elements with the input constant using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1257
Tensor Max() const
the Max member function. Same as linalg::Max(const Tensor &Tin), where Tin is the current Tensor.
Definition Tensor.cpp:1350
Tensor permute(const std::vector< cytnx_uint64 > &rnks) const
perform tensor permute on the cytnx::Tensor and return a new instance.
Definition Tensor.hpp:904
Tensor Div(const T &rhs)
Division function with a Tensor or a scalar. Same as cytnx::operator/(const Tensor &self,...
Definition Tensor.hpp:1488
Tensor Mod(const T &rhs)
Definition Tensor.hpp:1521
bool equiv(const Tensor &rhs)
compare the shape of two tensors.
Definition Tensor.hpp:1302
void Init(const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1, const bool &init_zero=true)
initialize a Tensor
Definition Tensor.hpp:709
Tensor Cpr(const T &rhs)
The comparison function.
Definition Tensor.hpp:1510
Tensor & Exp_()
the Exp_ member function. Same as linalg::Exp_(Tensor &Tin), where Tin is the current Tensor.
Definition Tensor.cpp:1331
Tensor & InvM_()
the InvM_ member function. Same as cytnx::linalg::InvM_(Tensor &Tin), where Tin is the current Tensor...
Definition Tensor.cpp:1314
const std::vector< cytnx_uint64 > & shape() const
the shape of the Tensor
Definition Tensor.hpp:795
Tensor Min() const
the Min member function. Same as linalg::Min(const Tensor &Tin), where Tin is the current Tensor.
Definition Tensor.cpp:1351
const T & at(const std::vector< cytnx_uint64 > &locator) const
Definition Tensor.hpp:1109
Storage & storage() const
return the storage of current Tensor.
Definition Tensor.hpp:1277
static Tensor from_storage(const Storage &in)
Definition Tensor.hpp:755
static Tensor Fromfile(const std::string &fname, const unsigned int &dtype, const cytnx_int64 &count=-1)
Definition Tensor.cpp:930
Tensor & Sub_(const T &rhs)
Subtraction function with a Tensor or a scalar, inplacely. Same as operator-=(const T &rhs).
Definition Tensor.hpp:1457
#define cytnx_error_msg(is_true, format,...)
Definition cytnx_error.hpp:16
Definition Accessor.hpp:12
Device_class Device
data on which devices.
Definition Device.cpp:140
cytnx::UniTensor operator*(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
The multiplication operator between two UniTensor.
double cytnx_double
Definition Type.hpp:43
uint32_t cytnx_uint32
Definition Type.hpp:46
bool cytnx_bool
Definition Type.hpp:54
std::complex< double > cytnx_complex128
Definition Type.hpp:53
float cytnx_float
Definition Type.hpp:44
std::ostream & operator<<(std::ostream &os, const Scalar &in)
The stream operator for Scalar objects.
Definition Scalar.cpp:10
int16_t cytnx_int16
Definition Type.hpp:50
std::complex< float > cytnx_complex64
Definition Type.hpp:52
cytnx::UniTensor operator-(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
The subtraction operator between two UniTensor.
int32_t cytnx_int32
Definition Type.hpp:49
uint16_t cytnx_uint16
Definition Type.hpp:47
uint64_t cytnx_uint64
Definition Type.hpp:45
int64_t cytnx_int64
Definition Type.hpp:48
Storage_init_interface __SII
Definition Storage.cpp:12
Type_class Type
data type
Definition Type.cpp:23
cytnx::UniTensor operator+(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
The addtion operator between two UniTensor.
cytnx::UniTensor operator/(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
The division operator between two UniTensor.