Cytnx v0.7.6
Loading...
Searching...
No Matches
Tensor.hpp
Go to the documentation of this file.
1#ifndef _H_Tensor_
2#define _H_Tensor_
3
4#include "Type.hpp"
5#include "cytnx_error.hpp"
6#include "Storage.hpp"
7#include "Device.hpp"
9#include <iostream>
10#include <fstream>
11#include "utils/vec_range.hpp"
12#include "utils/dynamic_arg_resolver.hpp"
13//#include "linalg.hpp"
14#include "Accessor.hpp"
15#include <utility>
16#include <vector>
17#include <initializer_list>
18#include <string>
19#include "Scalar.hpp"
20
21namespace cytnx{
22
23
25 // real implementation
26 class Tensor_impl: public intrusive_ptr_base<Tensor_impl>{
27 private:
28
29 //Interface:
30 Storage_init_interface __SII;
31
32
33 //Memory:
34 Storage _storage;
35
36 //tensor shape
37 std::vector<cytnx_uint64> _shape;
38
39 // pseudo-perm info
40 std::vector<cytnx_uint64> _mapper;
41 std::vector<cytnx_uint64> _invmapper;
42 bool _contiguous;
43
44 public:
45
46 friend class Tensor;
47 boost::intrusive_ptr<Tensor_impl> _clone_meta_only() const{
48 boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
49 out->_mapper = this->_mapper;
50 out->_invmapper = this->_invmapper;
51 out->_shape = this->_shape;
52 out->_contiguous = this->_contiguous;
53 return out;
54 }
55 Tensor_impl(): _contiguous(true){};
56
57 void Init(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype=Type.Double, int device=-1);
58 void Init(const Storage &in);
59 /*
60 template<class T>
61 void From_vec(const T &ndvec){
62 cytnx_error_msg(std::string(typeid(T).name()).find("vector") == std::string::npos,"[ERROR][Tensor][From_vec] the input argument should be a nd vector.%s","\n");
63 //dispatch the rank!:
64
65
66
67 }
68 */
69 //clone&assignment constr., use intrusive_ptr's
70 Tensor_impl(const Tensor_impl &rhs);
71 Tensor_impl& operator=(const Tensor_impl &rhs); // add const
72
73 unsigned int dtype() const{
74 return this->_storage.dtype();
75 }
76 int device() const {
77 return this->_storage.device();
78 }
79
80 std::string dtype_str() const {
81 return Type.getname(this->_storage.dtype());
82 }
83 std::string device_str() const{
84 return Device.getname(this->_storage.device());
85 }
86
87 const std::vector<cytnx_uint64>& shape() const{
88 return _shape;
89 }
90
91 const bool& is_contiguous() const{
92 return this->_contiguous;
93 }
94
95 const std::vector<cytnx_uint64>& mapper() const{
96 return this->_mapper;
97 }
98 const std::vector<cytnx_uint64>& invmapper() const{
99 return this->_invmapper;
100 }
101 Storage& storage(){
102 return _storage;
103 }
104
105 const Storage& storage() const{
106 return _storage;
107 }
108
109 boost::intrusive_ptr<Tensor_impl> clone() const{
110 boost::intrusive_ptr<Tensor_impl> out = this->_clone_meta_only();
111 out->_storage = this->_storage.clone();
112 return out;
113 }
114
115
116 void to_(const int &device){
117 this->_storage.to_(device);
118 }
119 boost::intrusive_ptr<Tensor_impl> to(const int &device){
120 if(this->device()==device){
121 //boost::intrusive_ptr<Tensor_impl> out(this);
122 return this;
123 }else{
124
125 boost::intrusive_ptr<Tensor_impl> out = this->_clone_meta_only();
126 out->_storage = this->_storage.to(device);
127 return out;
128 }
129 }
130
131 void permute_(const std::vector<cytnx_uint64> &rnks);
132
133 boost::intrusive_ptr<Tensor_impl> permute(const std::vector<cytnx_uint64> &rnks);
134
135 template<class T>
136 T& at(const std::vector<cytnx_uint64> &locator) const {
137 cytnx_error_msg(locator.size() != this->_shape.size(), "%s", "The input index does not match Tensor's rank.");
138
139 cytnx_uint64 RealRank,mtplyr;
140 //std::vector<cytnx_uint64> c_shape(this->_shape.size());
141 //std::vector<cytnx_uint64> c_loc(this->_shape.size());
142 cytnx_uint64 c_shape,c_loc;
143
144 RealRank=0;
145 mtplyr = 1;
146
147 for(cytnx_int64 i=this->_shape.size()-1; i>=0; i--){
148 if(locator[i]>=this->_shape[i]){
149 cytnx_error_msg(true, "%s", "Attempting to access out-of-bound index in Tensor.");
150 }
151 //c_shape[i] = this->_shape[this->_invmapper[i]];
152 //c_loc[i] = locator[this->_invmapper[i]];
153 c_shape = this->_shape[this->_invmapper[i]];
154 c_loc = locator[this->_invmapper[i]];
155 RealRank += mtplyr*c_loc;
156 mtplyr *= c_shape;
157 }
158 return this->_storage.at<T>(RealRank);
159 }
160
161 const Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) const{
162 cytnx_error_msg(locator.size() != this->_shape.size(), "%s", "The input index does not match Tensor's rank.");
163
164 cytnx_uint64 RealRank,mtplyr;
165 //std::vector<cytnx_uint64> c_shape(this->_shape.size());
166 //std::vector<cytnx_uint64> c_loc(this->_shape.size());
167
168 cytnx_uint64 c_shape,c_loc;
169 RealRank=0;
170 mtplyr = 1;
171
172 for(cytnx_int64 i=this->_shape.size()-1; i>=0; i--){
173 if(locator[i]>=this->_shape[i]){
174 cytnx_error_msg(true, "%s", "Attempting to access out-of-bound index in Tensor.");
175 }
176 //c_shape[i] = this->_shape[this->_invmapper[i]];
177 //c_loc[i] = locator[this->_invmapper[i]];
178 c_shape = this->_shape[this->_invmapper[i]];
179 c_loc = locator[this->_invmapper[i]];
180 RealRank += mtplyr*c_loc;
181 mtplyr *= c_shape;
182 }
183 return this->_storage.at(RealRank);
184 }
185
186 Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator){
187 cytnx_error_msg(locator.size() != this->_shape.size(), "%s", "The input index does not match Tensor's rank.");
188
189 cytnx_uint64 RealRank,mtplyr;
190 //std::vector<cytnx_uint64> c_shape(this->_shape.size());
191 //std::vector<cytnx_uint64> c_loc(this->_shape.size());
192 cytnx_uint64 c_shape,c_loc;
193
194 RealRank=0;
195 mtplyr = 1;
196
197 for(cytnx_int64 i=this->_shape.size()-1; i>=0; i--){
198 if(locator[i]>=this->_shape[i]){
199 cytnx_error_msg(true, "%s", "Attempting to access out-of-bound index in Tensor.");
200 }
201 //c_shape[i] = this->_shape[this->_invmapper[i]];
202 //c_loc[i] = locator[this->_invmapper[i]];
203 c_shape = this->_shape[this->_invmapper[i]];
204 c_loc = locator[this->_invmapper[i]];
205 RealRank += mtplyr*c_loc;
206 mtplyr *= c_shape;
207 }
208 return this->_storage.at(RealRank);
209 }
210
211 boost::intrusive_ptr<Tensor_impl> get(const std::vector<cytnx::Accessor> &accessors);
212 boost::intrusive_ptr<Tensor_impl> get_deprecated(const std::vector<cytnx::Accessor> &accessors);
213 void set(const std::vector<cytnx::Accessor> &accessors, const boost::intrusive_ptr<Tensor_impl> &rhs);
214
215 template<class T>
216 void set(const std::vector<cytnx::Accessor> &accessors, const T& rc);
217
218 void set(const std::vector<cytnx::Accessor> &accessors, const Scalar::Sproxy& rc);
219
220 template<class Tx>
221 void fill(const Tx& val){
222 this->storage().fill(val);
223 }
224
225 boost::intrusive_ptr<Tensor_impl> contiguous(){
226 // return new instance if act on non-contiguous tensor
227 // return self if act on contiguous tensor
228 if(this->_contiguous){
229 boost::intrusive_ptr<Tensor_impl> out(this);
230 //out->_storage = this->_storage;
231 return out;
232 }else{
233 boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
234 std::vector<cytnx_uint64> oldshape(this->_shape.size());
235 for(cytnx_uint64 i=0;i<this->_shape.size();i++){
236 oldshape[i] = this->_shape[this->_invmapper[i]];
237 }
238
239 out->_storage._impl = this->_storage._impl->Move_memory(oldshape,this->_mapper, this->_invmapper);
240 //std::cout << out->_storage << std::endl;
241 out->_invmapper = vec_range(this->_invmapper.size());
242 out->_mapper = out->_invmapper;
243 out->_shape = this->_shape;
244 out->_contiguous = true;
245 return out;
246 }
247 }
248
249 void contiguous_(){
250 // return new instance if act on non-contiguous tensor
251 // return self if act on contiguous tensor
252 if(!this->_contiguous){
253 std::vector<cytnx_uint64> oldshape(this->_shape.size());
254 for(cytnx_uint64 i=0;i<this->_shape.size();i++){
255 oldshape[i] = this->_shape[this->_invmapper[i]];
256 }
257
258 this->_storage._impl = this->_storage._impl->Move_memory(oldshape,this->_mapper, this->_invmapper);
259 this->_mapper = vec_range(this->_invmapper.size());
260 this->_invmapper = this->_mapper;
261 this->_contiguous = true;
262 }
263 }
264
265
266 void reshape_(const std::vector<cytnx_int64> &new_shape){
267 if(!this->_contiguous){
268 this->contiguous_();
269 }
270 std::vector<cytnx_uint64> result_shape(new_shape.size());
271 cytnx_uint64 new_N = 1;
272 bool has_undetermine = false;
273 unsigned int Udet_id = 0;
274 for(int i=0;i<new_shape.size();i++){
275 if(new_shape[i]<0){
276 if(new_shape[i]!=-1) cytnx_error_msg(new_shape[i]!=-1,"%s","[ERROR] reshape can only have dimension > 0 and one undetermine rank specify as -1");
277 if(has_undetermine) cytnx_error_msg(new_shape[i]!=-1,"%s","[ERROR] reshape can only have dimension > 0 and one undetermine rank specify as -1");
278 Udet_id = i;
279 has_undetermine = true;
280 }else{
281 new_N *= new_shape[i];
282 result_shape[i] = new_shape[i];
283 }
284 }
285
286
287 if(has_undetermine){
288 cytnx_error_msg(new_N > this->_storage.size(),"%s","[ERROR] new shape exceed the total number of elements.");
289 cytnx_error_msg(this->_storage.size()%new_N,"%s","[ERROR] unmatch size when reshape with undetermine dimension");
290 result_shape[Udet_id] = this->_storage.size()/new_N;
291 }else{
292 cytnx_error_msg(new_N != this->_storage.size(),"%s","[ERROR] new shape does not match the number of elements.");
293 }
294
295 this->_shape = result_shape;
296 this->_mapper = vec_range(result_shape.size());
297 this->_invmapper = this->_mapper;
298 }
299
300
301
302 boost::intrusive_ptr<Tensor_impl> reshape(const std::vector<cytnx_int64> &new_shape){
303 boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
304 if(this->is_contiguous()){
305 out = this->_clone_meta_only();
306 out->_storage = this->_storage;
307 }else{
308 out = this->contiguous();
309 }
310 //out = this->clone();
311
312 out->reshape_(new_shape);
313 return out;
314 }
315
316
317
318 boost::intrusive_ptr<Tensor_impl> astype(const int& new_type){
319 //boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
320 //out->_storage = this->_storage.astype(new_type);
321 if(this->dtype() == new_type){
322 return this;
323 }else{
324 boost::intrusive_ptr<Tensor_impl> out = this->_clone_meta_only();
325 out->_storage = this->_storage.astype(new_type);
326 return out;
327 }
328 }
329
330 };
332
333 class Tensor;
334
336 // [Note] these are fwd from linalg.hpp
337 template<class T> Tensor operator+(const Tensor &lhs, const T &rc);
338 template<class T> Tensor operator-(const Tensor &lhs, const T &rhs);
339 template<class T> Tensor operator*(const Tensor &lhs, const T &rhs);
340 template<class T> Tensor operator/(const Tensor &lhs, const T &rhs);
342
344 class Tensor{
345 private:
346 public:
347
349 // this is a proxy class to allow get/set element using [] as python!
350 struct Tproxy
351 {
352 boost::intrusive_ptr<Tensor_impl> _insimpl;
353 std::vector<cytnx::Accessor> _accs;
354 Tproxy(boost::intrusive_ptr<Tensor_impl> _ptr,const std::vector<cytnx::Accessor> &accs) : _insimpl(std::move(_ptr)), _accs(accs){}
355
356 // when used to set elems:
357 const Tensor& operator=(const Tensor &rhs){
358 this->_insimpl->set(_accs,rhs._impl);
359 return rhs;
360 }
361
362 template<class T>
363 const T& operator=(const T &rc){
364 this->_insimpl->set(_accs,rc);
365 return rc;
366 }
367 const Tproxy& operator=(const Tproxy &rc){
368 Tensor tmp = Tensor(rc);
369 this->_insimpl->set(_accs,tmp._impl);
370 return rc;
371 }
372
373
374 template<class T>
375 Tensor operator+=(const T &rc){
376 Tensor self;
377 self._impl = _insimpl->get(_accs);
378 self += rc;
379 _insimpl->set(_accs,self._impl);
380 self._impl = this->_insimpl;
381 return self;
382 }
383 Tensor operator+=(const Tproxy &rc);
384
385 template<class T>
386 Tensor operator-=(const T &rc){
387 Tensor self;
388 self._impl = _insimpl->get(_accs);
389 self -= rc;
390 _insimpl->set(_accs,self._impl);
391 self._impl = this->_insimpl;
392 return self;
393 }
394 Tensor operator-=(const Tproxy &rc);
395
396 template<class T>
397 Tensor operator/=(const T &rc){
398 Tensor self;
399 self._impl = _insimpl->get(_accs);
400 self /= rc;
401 _insimpl->set(_accs,self._impl);
402 self._impl = this->_insimpl;
403 return self;
404 }
405 Tensor operator/=(const Tproxy &rc);
406
407 template<class T>
408 Tensor operator*=(const T &rc){
409 Tensor self;
410 self._impl = _insimpl->get(_accs);
411 self *= rc;
412 _insimpl->set(_accs,self._impl);
413 self._impl = this->_insimpl;
414 return self;
415 }
416 Tensor operator*=(const Tproxy &rc);
417
418
419 //alias to resolve conflict with op ovld for rc=Tensor
420 /*
421 template<class T>
422 Tensor _operatorADD(const T &rc) const{
423 Tensor out;
424 out._impl = _insimpl->get(_accs);
425 return out.Add(rc);
426 }
427 */
428 Tensor operator+(const cytnx_complex128 &rc) const;//{return this->_operatorADD(rc);};
429 Tensor operator+(const cytnx_complex64 &rc) const;//{return this->_operatorADD(rc);};
430 Tensor operator+(const cytnx_double &rc) const;//{return this->_operatorADD(rc);};
431 Tensor operator+(const cytnx_float &rc) const;//{return this->_operatorADD(rc);};
432 Tensor operator+(const cytnx_uint64 &rc) const;//{return this->_operatorADD(rc);};
433 Tensor operator+(const cytnx_int64 &rc) const;//{return this->_operatorADD(rc);};
434 Tensor operator+(const cytnx_uint32 &rc) const;//{return this->_operatorADD(rc);};
435 Tensor operator+(const cytnx_int32 &rc) const;//{return this->_operatorADD(rc);};
436 Tensor operator+(const cytnx_uint16 &rc) const;//{return this->_operatorADD(rc);};
437 Tensor operator+(const cytnx_int16 &rc) const;//{return this->_operatorADD(rc);};
438 Tensor operator+(const cytnx_bool &rc) const;//{return this->_operatorADD(rc);};
439 Tensor operator+(const Tproxy &rc) const;
440
441 /*
442 template<class T>
443 Tensor _operatorSUB(const T &rc) const{
444 Tensor out;
445 out._impl = _insimpl->get(_accs);
446 return out.Sub(rc);
447 }
448 */
449 Tensor operator-(const cytnx_complex128 &rc) const;//{return this->_operatorSUB(rc);};
450 Tensor operator-(const cytnx_complex64 &rc) const;//{return this->_operatorSUB(rc);};
451 Tensor operator-(const cytnx_double &rc) const;//{return this->_operatorSUB(rc);};
452 Tensor operator-(const cytnx_float &rc) const;//{return this->_operatorSUB(rc);};
453 Tensor operator-(const cytnx_uint64 &rc) const;//{return this->_operatorSUB(rc);};
454 Tensor operator-(const cytnx_int64 &rc) const;//{return this->_operatorSUB(rc);};
455 Tensor operator-(const cytnx_uint32 &rc) const;//{return this->_operatorSUB(rc);};
456 Tensor operator-(const cytnx_int32 &rc) const;//{return this->_operatorSUB(rc);};
457 Tensor operator-(const cytnx_uint16 &rc) const;//{return this->_operatorSUB(rc);};
458 Tensor operator-(const cytnx_int16 &rc) const;//{return this->_operatorSUB(rc);};
459 Tensor operator-(const cytnx_bool &rc) const;//{return this->_operatorSUB(rc);};
460 Tensor operator-(const Tproxy &rc) const;
461
462 Tensor operator-() const;
463
464 /*
465 template<class T>
466 Tensor _operatorMUL(const T &rc) const{
467 Tensor out;
468 out._impl = _insimpl->get(_accs);
469 return out.Mul(rc);
470 }
471 */
472 Tensor operator*(const cytnx_complex128 &rc) const;//{return this->_operatorMUL(rc);};
473 Tensor operator*(const cytnx_complex64 &rc) const;//{return this->_operatorMUL(rc);};
474 Tensor operator*(const cytnx_double &rc) const;//{return this->_operatorMUL(rc);};
475 Tensor operator*(const cytnx_float &rc) const;//{return this->_operatorMUL(rc);};
476 Tensor operator*(const cytnx_uint64 &rc) const;//{return this->_operatorMUL(rc);};
477 Tensor operator*(const cytnx_int64 &rc) const;//{return this->_operatorMUL(rc);};
478 Tensor operator*(const cytnx_uint32 &rc) const;//{return this->_operatorMUL(rc);};
479 Tensor operator*(const cytnx_int32 &rc) const;//{return this->_operatorMUL(rc);};
480 Tensor operator*(const cytnx_uint16 &rc) const;//{return this->_operatorMUL(rc);};
481 Tensor operator*(const cytnx_int16 &rc) const;//{return this->_operatorMUL(rc);};
482 Tensor operator*(const cytnx_bool &rc) const;//{return this->_operatorMUL(rc);};
483 Tensor operator*(const Tproxy &rc) const;
484
485 /*
486 template<class T>
487 Tensor _operatorDIV(const T &rc) const{
488 Tensor out;
489 out._impl = _insimpl->get(_accs);
490 return out.Div(rc);
491 }
492 */
493 Tensor operator/(const cytnx_complex128 &rc) const;//{return this->_operatorDIV(rc);};
494 Tensor operator/(const cytnx_complex64 &rc) const;//{return this->_operatorDIV(rc);};
495 Tensor operator/(const cytnx_double &rc) const;//{return this->_operatorDIV(rc);};
496 Tensor operator/(const cytnx_float &rc) const;//{return this->_operatorDIV(rc);};
497 Tensor operator/(const cytnx_uint64 &rc) const;//{return this->_operatorDIV(rc);};
498 Tensor operator/(const cytnx_int64 &rc) const;//{return this->_operatorDIV(rc);};
499 Tensor operator/(const cytnx_uint32 &rc) const;//{return this->_operatorDIV(rc);};
500 Tensor operator/(const cytnx_int32 &rc) const;//{return this->_operatorDIV(rc);};
501 Tensor operator/(const cytnx_uint16 &rc) const;//{return this->_operatorDIV(rc);};
502 Tensor operator/(const cytnx_int16 &rc) const;//{return this->_operatorDIV(rc);};
503 Tensor operator/(const cytnx_bool &rc) const;//{return this->_operatorDIV(rc);};
504 Tensor operator/(const Tproxy &rc) const;
505
506 template<class T>
507 T item() const{
508 Tensor out;
509 out._impl = _insimpl->get(_accs);
510 return out.item<T>();
511 }
512
513 Scalar::Sproxy item() const{
514 Tensor out;
515 out._impl = _insimpl->get(_accs);
516 return out.item();
517 }
518
519 // when used to get elems:
520 operator Tensor () const{
521 Tensor out;
522 out._impl = _insimpl->get(_accs);
523 return out;
524 }
525
526
527 Storage storage() const{
528 Tensor out;
529 out._impl = _insimpl->get(_accs);
530 return out.storage();
531 }
532
533 };// proxy class of Tensor.
534
536
538 // these two are using the python way!
539 //----------------------------------------
540 template<class ... Ts>
541 Tproxy operator()(const std::string &e1, const Ts&...elems){
542 //std::cout << e1 << std::endl;
543 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1,elems...);
544 return (*this)[tmp];
545 }
546 template<class ... Ts>
547 Tproxy operator()(const cytnx_int64 &e1, const Ts&...elems){
548 //std::cout << e1<< std::endl;
549 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1,elems...);
550 return (*this)[tmp];
551 }
552 template<class ... Ts>
553 Tproxy operator()(const cytnx::Accessor &e1, const Ts&...elems){
554 //std::cout << e1 << std::endl;
555 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1,elems...);
556 return (*this)[tmp];
557 }
558 template<class ... Ts>
559 const Tproxy operator()(const std::string &e1, const Ts&...elems) const{
560 //std::cout << e1 << std::endl;
561 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1,elems...);
562 return (*this)[tmp];
563 }
564 template<class ... Ts>
565 const Tproxy operator()(const cytnx_int64 &e1, const Ts&...elems) const{
566 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1,elems...);
567 return (*this)[tmp];
568 }
569 template<class ... Ts>
570 const Tproxy operator()(const cytnx::Accessor &e1, const Ts&...elems) const{
571 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1,elems...);
572 return (*this)[tmp];
573 }
574
575 //-----------------------------------------
576
577
578 Tproxy operator[](const std::initializer_list<cytnx::Accessor> &accs){
579 std::vector<cytnx::Accessor> tmp = accs;
580 return (*this)[tmp];
581 }
582 Tproxy operator[](const std::vector<cytnx::Accessor> &accs){
583 return Tproxy(this->_impl,accs);
584 }
585
586
587 const Tproxy operator[](const std::vector<cytnx::Accessor> &accs) const{
588 return Tproxy(this->_impl,accs);
589 }
590 const Tproxy operator[](const std::initializer_list<cytnx::Accessor> &accs) const{
591 std::vector<cytnx::Accessor> tmp = accs;
592 return (*this)[tmp];
593 }
594
595 Tproxy operator[](const std::initializer_list<cytnx_int64> &accs){
596 std::vector<cytnx_int64> tmp = accs;
597 return (*this)[tmp];
598 }
599 Tproxy operator[](const std::vector<cytnx_int64> &accs){
600 std::vector<cytnx::Accessor> acc_in;
601 for(int i=0;i<accs.size();i++){
602 acc_in.push_back(cytnx::Accessor(accs[i]));
603 }
604 return Tproxy(this->_impl,acc_in);
605 }
606 const Tproxy operator[](const std::initializer_list<cytnx_int64> &accs) const{
607 std::vector<cytnx_int64> tmp = accs;
608 return (*this)[tmp];
609 }
610 const Tproxy operator[](const std::vector<cytnx_int64> &accs) const{
611 std::vector<cytnx::Accessor> acc_in;
612 for(int i=0;i<accs.size();i++){
613 acc_in.push_back(cytnx::Accessor(accs[i]));
614 }
615 return Tproxy(this->_impl,acc_in);
616 }
618 //-------------------------------------------
619
620
622 void _Save(std::fstream &f) const;
623 void _Load(std::fstream &f);
624
626
634 void Save(const std::string &fname) const;
635 void Save(const char* fname) const;
636 void Tofile(const std::string &fname) const;
637 void Tofile(const char* fname) const;
638 void Tofile(std::fstream &f) const;
647 static Tensor Load(const std::string &fname);
648 static Tensor Load(const char* fname);
649 static Tensor Fromfile(const std::string &fname, const unsigned int &dtype, const cytnx_int64 &count=-1);
650 static Tensor Fromfile(const char* fname, const unsigned int &dtype, const cytnx_int64 &count=-1);
651
652 //static Tensor Frombinary(const std::string &fname);
653
654
655
657 boost::intrusive_ptr<Tensor_impl> _impl;
658 Tensor(): _impl(new Tensor_impl()){};
659 Tensor(const Tensor &rhs){
660 _impl = rhs._impl;
661 }
662
663 /*
664 template<class Tp>
665 Tensor(const std::initializer_list<Tp> &rhs){
666 Storage stmp = std::vector<Tp>(rhs);
667 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
668 tmp->Init(stmp);
669 this->_impl = tmp;
670 }
671 */
672
673 Tensor& operator=(const Tensor &rhs){
674 _impl = rhs._impl;
675 return *this;
676 }
677
678 void operator=(const Tproxy &rhsp){ // this is used to handle proxy assignment
679 this->_impl = rhsp._insimpl->get(rhsp._accs);
680
681 }
683
685 //default device==Device.cpu (-1)
705 void Init(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype=Type.Double, const int &device=-1){
706 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
707 this->_impl = tmp;
708 this->_impl->Init(shape,dtype,device);
709 }
710 Tensor(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype=Type.Double, const int &device=-1):_impl(new Tensor_impl()){
711 this->Init(shape,dtype,device);
712 }
714
715 static Tensor from_storage(const Storage &in){
716 Tensor out;
717 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
718 out._impl = tmp;
719 out._impl->Init(in);
720 return out;
721 }
722
723
729 unsigned int dtype() const {return this->_impl->dtype();}
730
736 int device() const { return this->_impl->device();}
737
743 std::string dtype_str() const { return this->_impl->dtype_str();}
744
750 std::string device_str() const{ return this->_impl->device_str();}
751
757 const std::vector<cytnx_uint64>& shape() const{
758 return this->_impl->shape();
759 }
760
766 return this->_impl->shape().size();
767 }
768
787 Tensor clone() const{
788 Tensor out;
789 out._impl = this->_impl->clone();
790 return out;
791 }
792
813 Tensor to(const int &device) const{
814 Tensor out;
815 out._impl = this->_impl->to(device);
816 return out;
817 }
818
836 void to_(const int &device){
837 this->_impl->to_(device);
838 }
839
840 const bool& is_contiguous() const{
841 return this->_impl->is_contiguous();
842 }
843
844 Tensor permute_(const std::vector<cytnx_uint64> &rnks){
845 this->_impl->permute_(rnks);
846 return *this;
847 }
849 template<class ... Ts>
850 Tensor permute_(const cytnx_uint64 &e1, const Ts&... elems){
851 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1,elems...);
852 this->_impl->permute_(argv);
853 return *this;
854 }
856
872 Tensor permute(const std::vector<cytnx_uint64> &rnks) const{
873 Tensor out;
874 out._impl = this->_impl->permute(rnks);
875 return out;
876 }
878 template<class ... Ts>
879 Tensor permute(const cytnx_uint64 &e1, const Ts&... elems) const{
880 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1,elems...);
881 return this->permute(argv);
882 }
884
885
903 Tensor out;
904 out._impl = this->_impl->contiguous();
905 return out;
906 }
907
924 this->_impl->contiguous_();
925 return *this;
926 }
927
944 void reshape_(const std::vector<cytnx_int64> &new_shape){
945 this->_impl->reshape_(new_shape);
946 }
948 void reshape_(const std::vector<cytnx_uint64> &new_shape){
949 std::vector<cytnx_int64> shape(new_shape.begin(),new_shape.end());
950 this->_impl->reshape_(shape);
951 }
952 void reshape_(const std::initializer_list<cytnx_int64> &new_shape){
953 std::vector<cytnx_int64> shape = new_shape;
954 this->_impl->reshape_(shape);
955 }
956 template<class ...Ts>
957 void reshape_(const cytnx_int64 &e1, const Ts...elems){
958 std::vector<cytnx_int64> shape = dynamic_arg_int64_resolver(e1,elems...);
959 //std::cout << shape << std::endl;
960 this->_impl->reshape_(shape);
961 }
963
981 Tensor reshape(const std::vector<cytnx_int64> &new_shape) const {
982 Tensor out;
983 out._impl = this->_impl->reshape(new_shape);
984 return out;
985 }
986
987 Tensor reshape(const std::vector<cytnx_uint64> &new_shape) const{
988 std::vector<cytnx_int64> tmp(new_shape.size());
989 memcpy(&tmp[0],&new_shape[0],sizeof(cytnx_uint64)*new_shape.size());
990 Tensor out;
991 out._impl = this->_impl->reshape(tmp);
992 return out;
993 }
994
995 Tensor reshape(const std::initializer_list<cytnx_int64> &new_shape) const{
996 return this->reshape(std::vector<cytnx_int64>(new_shape));
997 }
998
1000 template<class ... Ts>
1001 Tensor reshape(const cytnx_int64 &e1, const Ts&...elems) const{
1002 std::vector<cytnx_int64> argv = dynamic_arg_int64_resolver(e1,elems...);
1003 return this->reshape(argv);
1004 }
1006
1025 Tensor astype(const int &new_type) const{
1026 Tensor out;
1027 out._impl = this->_impl->astype(new_type);
1028 return out;
1029 }
1030
1031
1032
1033 //Tensor diagonal(){
1034 // for(unsigned int i=0;i<this->shape().size();i++){
1035 // if(this->shape()[i] != this->shape()[0],"[ERROR] Tensor.diagonal() can only be called when the subject has equal dimension in each rank.%s","\n");
1036 // }
1037 //
1038 //}
1039
1056 template<class T>
1057 T& at(const std::vector<cytnx_uint64> &locator){
1058 return this->_impl->at<T>(locator);
1059 }
1060 template<class T>
1061 const T& at(const std::vector<cytnx_uint64> &locator) const{
1062 return this->_impl->at<T>(locator);
1063 }
1065 template<class T, class...Ts>
1066 const T& at(const cytnx_uint64 &e1, const Ts&...elems) const{
1067 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1,elems...);
1068 return this->at<T>(argv);
1069 }
1070 template<class T, class...Ts>
1071 T& at(const cytnx_uint64 &e1, const Ts&...elems){
1072 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1,elems...);
1073 return this->at<T>(argv);
1074 }
1075
1076 const Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) const{
1077 return this->_impl->at(locator);
1078 }
1079
1080 Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator){
1081 return this->_impl->at(locator);
1082 }
1084
1104 template<class T>
1105 T& item(){
1106 cytnx_error_msg(this->_impl->storage().size()!=1,"[ERROR][Tensor.item<T>]%s","item can only be called from a Tensor with only one element\n");
1107 return this->_impl->storage().at<T>(0);
1108 }
1109
1111 template<class T>
1112 const T& item() const{
1113 cytnx_error_msg(this->_impl->storage().size()!=1,"[ERROR][Tensor.item<T>]%s","item can only be called from a Tensor with only one element\n");
1114 return this->_impl->storage().at<T>(0);
1115 }
1116
1117 const Scalar::Sproxy item() const{
1118 Scalar::Sproxy out(this->storage()._impl,0);
1119 return out;
1120 }
1121
1122 Scalar::Sproxy item(){
1123 Scalar::Sproxy out(this->storage()._impl,0);
1124 return out;
1125 }
1126
1128
1152 Tensor get(const std::vector<cytnx::Accessor> &accessors)const {
1153 Tensor out;
1154 out._impl = this->_impl->get(accessors);
1155 return out;
1156 }
1157
1158 /*
1159 Tensor get_v2(const std::vector<cytnx::Accessor> &accessors) const{
1160 Tensor out;
1161 out._impl = this->_impl->get_v2(accessors);
1162 return out;
1163 }
1164 */
1165
1185 void set(const std::vector<cytnx::Accessor> &accessors, const Tensor &rhs){
1186 this->_impl->set(accessors,rhs._impl);
1187 }
1188
1189
1190
1208 template<class T>
1209 void set(const std::vector<cytnx::Accessor> &accessors, const T &rc){
1210 this->_impl->set(accessors,rc);
1211 }
1213 template<class T>
1214 void set(const std::initializer_list<cytnx::Accessor> &accessors, const T &rc){
1215 std::vector<cytnx::Accessor> args = accessors;
1216 this->set(args,rc);
1217 }
1219
1229 return this->_impl->storage();
1230 }
1231
1247 template<class T>
1248 void fill(const T& val){
1249 this->_impl->fill(val);
1250 }
1251
1252
1253 bool equiv(const Tensor &rhs){
1254 if(this->shape() != rhs.shape()) return false;
1255 return true;
1256 }
1257
1258 Tensor real();
1259 Tensor imag();
1260
1261
1262
1263 // Arithmic:
1264 template<class T>
1265 Tensor& operator+=(const T &rc);
1266 template<class T>
1267 Tensor& operator-=(const T &rc);
1268 template<class T>
1269 Tensor& operator*=(const T &rc);
1270 template<class T>
1271 Tensor& operator/=(const T &rc);
1272
1273
1274 //Tensor &operator+=(const Tproxy &rc);
1275 //Tensor &operator-=(const Tproxy &rc);
1276 //Tensor &operator*=(const Tproxy &rc);
1277 //Tensor &operator/=(const Tproxy &rc);
1278 /*
1279 Tensor operator+(const Tproxy &rc){
1280 return *this + Tensor(rc);
1281 }
1282 Tensor operator-(const Tproxy &rc){
1283 return *this - Tensor(rc);
1284 }
1285 Tensor operator*(const Tproxy &rc){
1286 return *this * Tensor(rc);
1287 }
1288 Tensor operator/(const Tproxy &rc){
1289 return *this / Tensor(rc);
1290 }
1291 */
1292
1293 template<class T>
1294 Tensor Add(const T &rhs){
1295 return *this + rhs;
1296 }
1297 template<class T>
1298 Tensor& Add_(const T &rhs){
1299 return *this += rhs;
1300 }
1301
1302 template<class T>
1303 Tensor Sub(const T &rhs){
1304 return *this - rhs;
1305 }
1306 template<class T>
1307 Tensor& Sub_(const T &rhs){
1308 return *this -= rhs;
1309 }
1310
1311 template<class T>
1312 Tensor Mul(const T &rhs){
1313 return *this * rhs;
1314 }
1315 template<class T>
1316 Tensor& Mul_(const T &rhs){
1317 return *this *= rhs;
1318 }
1319
1320 template<class T>
1321 Tensor Div(const T &rhs){
1322 return *this / rhs;
1323 }
1324 template<class T>
1325 Tensor& Div_(const T &rhs){
1326 return *this /= rhs;
1327 }
1328
1329 template<class T>
1330 Tensor Cpr(const T &rhs){
1331 return *this == rhs;
1332 }
1333
1334 //template<class T>
1335 //Tensor& Cpr_(const T &rhs){
1336 //
1337 // return *this == rhs;
1338 //}
1339
1340 template<class T>
1341 Tensor Mod(const T &rhs){
1342 return *this % rhs;
1343 }
1344
1346 return this->Mul(-1.);
1347 }
1348
1350 Tensor out = this->clone();
1351 out.contiguous_();
1352 out.reshape_({-1});
1353 return out;
1354 }
1355
1356 void flatten_(){
1357 this->contiguous_();
1358 this->reshape_({-1});
1359
1360 }
1361
1362
1363 void append(const Tensor &rhs){
1364 //Tensor in;
1365 if(!this->is_contiguous())
1366 this->contiguous_();
1367
1368 // check Tensor in shape:
1369 cytnx_error_msg(rhs.shape().size()==0 || this->shape().size()==0,"[ERROR] try to append a null Tensor.%s","\n");
1370 cytnx_error_msg(rhs.shape().size()!=(this->shape().size()-1),"[ERROR] try to append a Tensor with rank not match.%s","\n");
1371 cytnx_uint64 Nelem = 1;
1372 for(unsigned int i=0;i<rhs.shape().size();i++){
1373 cytnx_error_msg(rhs.shape()[i]!=this->shape()[i+1],"[ERROR] dimension mismatch @ rhs.rank: [%d] this: [%d] rhs: [%d]\n",i,this->shape()[i+1],rhs.shape()[i]);
1374 Nelem*=rhs.shape()[i];
1375 }
1376
1377 //check type:
1378 Tensor in;
1379 if(rhs.dtype() != this->dtype()){
1380 in = rhs.astype(this->dtype());
1381 if(!in.is_contiguous())
1382 in.contiguous_();
1383 }else{
1384 if(!in.is_contiguous())
1385 in = rhs.contiguous();
1386 else
1387 in = rhs;
1388 }
1389 this->_impl->_shape[0]+=1;
1390 cytnx_uint64 oldsize = this->_impl->_storage.size();
1391 this->_impl->_storage.resize(oldsize+Nelem);
1392 memcpy(((char*)this->_impl->_storage.data()) + oldsize*Type.typeSize(this->dtype())/sizeof(char),
1393 in._impl->_storage.data(),
1394 Type.typeSize(in.dtype())*Nelem);
1395
1396 }
1397 void append(const Storage &srhs){
1398 if(!this->is_contiguous())
1399 this->contiguous_();
1400
1401 // check Tensor in shape:
1402 cytnx_error_msg(srhs.size()==0 || this->shape().size()==0,"[ERROR] try to append a null Tensor.%s","\n");
1403 cytnx_error_msg((this->shape().size()-1)!=1,"[ERROR] append a storage to Tensor can only accept rank-2 Tensor.%s","\n");
1404 cytnx_error_msg(this->shape().back()!=srhs.size(),"[ERROR] Tensor dmension mismatch!%s","\n");
1405
1406
1407 //check type:
1408 Storage in;
1409 if(srhs.dtype() != this->dtype()){
1410 in = srhs.astype(this->dtype());
1411 }else{
1412 in = srhs;
1413 }
1414 this->_impl->_shape[0]+=1;
1415 cytnx_uint64 oldsize = this->_impl->_storage.size();
1416 this->_impl->_storage.resize(oldsize+in.size());
1417 memcpy(((char*)this->_impl->_storage.data()) + oldsize*Type.typeSize(this->dtype())/sizeof(char),
1418 in._impl->Mem,
1419 Type.typeSize(in.dtype())*in.size());
1420
1421 }
1422 /*
1423 void append(const Tensor &rhs){
1424 // convert to the same type.
1425 Tensor in;
1426 if(rhs.dtype() != this->dtype()){
1427 in = rhs.astype(this->dtype());
1428 }else{
1429 in = rhs;
1430 }
1431
1432 // 1) check rank
1433 if(this->shape().size()==1){
1434 // check if rhs is a scalar tensor (only one element)
1435 cytnx_error_msg(!(rhs.shape().size()==1 && rhs.shape()[0]==1),"[ERROR] trying to append a scalar into multidimentional Tensor is not allow.\n Only rank-1 Tensor can accept scalar append.%s","\n");
1436 this->_impl->_shape[0]+=1;
1437 this->_impl->_storage.append(0);
1438
1439 }else{
1440 cytnx_error_msg(rhs.shape().size() != this->shape().size()-1,"[ERROR] try to append a Tensor with rank not match.%s","\n");
1441
1442 }
1443 cytnx_error_msg(!this->is_contiguous(),"[ERROR] append require the Tensor to be contiguous. suggestion: call contiguous() or contiguous_() first.","\n");
1444 }
1445 */
1446 template<class T>
1447 void append(const T &rhs){
1448 cytnx_error_msg(this->shape().size()!=1,"[ERROR] trying to append a scalar into multidimentional Tensor is not allow.\n Only rank-1 Tensor can accept scalar append.%s","\n");
1449 cytnx_error_msg(!this->is_contiguous(),"[ERROR] append require the Tensor to be contiguous. suggestion: call contiguous() or contiguous_() first.","\n");
1450 this->_impl->_shape[0]+=1;
1451 this->_impl->_storage.append(rhs);
1452 }
1453
1454 bool same_data(const Tensor &rhs) const;
1455
1456 // linalg:
1457 std::vector<Tensor> Svd(const bool &is_U=true, const bool &is_vT=true) const;
1458 std::vector<Tensor> Eigh(const bool &is_V=true,const bool &row_v=false) const;
1459 Tensor& InvM_();
1460 Tensor InvM() const;
1461 Tensor& Inv_(const double &clip);
1462 Tensor Inv(const double &clip) const;
1463
1464 Tensor& Conj_();
1465 Tensor Conj() const;
1466 Tensor& Exp_();
1467 Tensor Exp() const;
1468 Tensor Norm() const;
1469 Tensor Pow(const cytnx_double &p) const;
1470 Tensor& Pow_(const cytnx_double &p);
1471 Tensor Trace(const cytnx_uint64 &a=0, const cytnx_uint64 &b=1) const;
1472 Tensor Abs() const;
1473 Tensor& Abs_();
1474 Tensor Max() const;
1475 Tensor Min() const;
1476
1477 };// class Tensor
1478
1479 Tensor operator+(const Tensor &lhs, const Tensor::Tproxy &rhs);
1480 Tensor operator-(const Tensor &lhs, const Tensor::Tproxy &rhs);
1481 Tensor operator*(const Tensor &lhs, const Tensor::Tproxy &rhs);
1482 Tensor operator/(const Tensor &lhs, const Tensor::Tproxy &rhs);
1483
1484 Tensor operator+(const Tensor &lhs, const Scalar::Sproxy &rhs);
1485 Tensor operator-(const Tensor &lhs, const Scalar::Sproxy &rhs);
1486 Tensor operator*(const Tensor &lhs, const Scalar::Sproxy &rhs);
1487 Tensor operator/(const Tensor &lhs, const Scalar::Sproxy &rhs);
1488
1489 std::ostream& operator<<(std::ostream& os, const Tensor &in);
1490 std::ostream& operator<<(std::ostream& os, const Tensor::Tproxy &in);
1491 //{ os << Tensor(in);};
1492}
1493
1494#endif
object that mimic the python slice to access elements in C++ [this is for c++ API only].
Definition Accessor.hpp:16
an memeory storage with multi-type/multi-device support
Definition Storage.hpp:934
const unsigned int & dtype() const
the dtype-id of current Storage
Definition Storage.hpp:1061
Storage astype(const unsigned int &new_type) const
cast the type of current Storage
Definition Storage.hpp:1052
const unsigned long long & size() const
the size ( no. of elements ) in the Storage
Definition Storage.hpp:1201
an tensor (multi-dimensional array)
Definition Tensor.hpp:344
void append(const Storage &srhs)
Definition Tensor.hpp:1397
Tensor & operator*=(const T &rc)
Tensor & Inv_(const double &clip)
Definition Tensor.cpp:1171
Tensor & operator/=(const T &rc)
Tensor operator-()
Definition Tensor.hpp:1345
void fill(const T &val)
fill all the element of current Tensor with the value.
Definition Tensor.hpp:1248
Tensor InvM() const
Definition Tensor.cpp:1168
bool same_data(const Tensor &rhs) const
Definition Tensor.cpp:1227
void to_(const int &device)
move the current Tensor to the device.
Definition Tensor.hpp:836
Tensor reshape(const std::vector< cytnx_uint64 > &new_shape) const
Definition Tensor.hpp:987
void append(const T &rhs)
Definition Tensor.hpp:1447
Tensor & operator-=(const T &rc)
Tensor & Add_(const T &rhs)
Definition Tensor.hpp:1298
Tensor Abs() const
Definition Tensor.cpp:1212
Tensor reshape(const std::initializer_list< cytnx_int64 > &new_shape) const
Definition Tensor.hpp:995
std::string device_str() const
the device (in string) of the Tensor
Definition Tensor.hpp:750
void reshape_(const std::vector< cytnx_int64 > &new_shape)
reshape the Tensor, inplacely
Definition Tensor.hpp:944
Tensor contiguous_()
Make the Tensor contiguous by coalescing the memory (storage), inplacely.
Definition Tensor.hpp:923
static Tensor Load(const std::string &fname)
Load current Tensor to file.
Definition Tensor.cpp:847
Tensor permute_(const std::vector< cytnx_uint64 > &rnks)
Definition Tensor.hpp:844
Tensor Mul(const T &rhs)
Definition Tensor.hpp:1312
unsigned int dtype() const
the dtype-id of the Tensor
Definition Tensor.hpp:729
Tensor Sub(const T &rhs)
Definition Tensor.hpp:1303
Tensor Inv(const double &clip) const
Definition Tensor.cpp:1175
Tensor contiguous() const
Make the Tensor contiguous by coalescing the memory (storage).
Definition Tensor.hpp:902
void Tofile(const std::string &fname) const
Definition Tensor.cpp:772
T & at(const std::vector< cytnx_uint64 > &locator)
[C++ only] get an element at specific location.
Definition Tensor.hpp:1057
Tensor reshape(const std::vector< cytnx_int64 > &new_shape) const
return a new Tensor that is reshaped.
Definition Tensor.hpp:981
T & item()
get an from a rank-0 Tensor
Definition Tensor.hpp:1105
Tensor clone() const
return a clone of the current Tensor.
Definition Tensor.hpp:787
std::vector< Tensor > Eigh(const bool &is_V=true, const bool &row_v=false) const
Definition Tensor.cpp:1159
void append(const Tensor &rhs)
Definition Tensor.hpp:1363
void set(const std::vector< cytnx::Accessor > &accessors, const Tensor &rhs)
set elements with the input Tensor using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1185
Tensor Norm() const
Definition Tensor.cpp:1195
Tensor astype(const int &new_type) const
return a new Tensor that cast to different dtype.
Definition Tensor.hpp:1025
Tensor & Div_(const T &rhs)
Definition Tensor.hpp:1325
Tensor & operator+=(const T &rc)
Tensor Conj() const
Definition Tensor.cpp:1184
Tensor Trace(const cytnx_uint64 &a=0, const cytnx_uint64 &b=1) const
Definition Tensor.cpp:1222
Tensor & Pow_(const cytnx_double &p)
Definition Tensor.cpp:1203
std::string dtype_str() const
the dtype (in string) of the Tensor
Definition Tensor.hpp:743
Tensor & Mul_(const T &rhs)
Definition Tensor.hpp:1316
cytnx_uint64 rank() const
the rank of the Tensor
Definition Tensor.hpp:765
const bool & is_contiguous() const
Definition Tensor.hpp:840
Tensor Exp() const
Definition Tensor.cpp:1192
Tensor & Abs_()
Definition Tensor.cpp:1208
Tensor Add(const T &rhs)
Definition Tensor.hpp:1294
void flatten_()
Definition Tensor.hpp:1356
void Save(const std::string &fname) const
Save current Tensor to file.
Definition Tensor.cpp:798
Tensor flatten() const
Definition Tensor.hpp:1349
Tensor & Conj_()
Definition Tensor.cpp:1180
Tensor Pow(const cytnx_double &p) const
Definition Tensor.cpp:1199
int device() const
the device-id of the Tensor
Definition Tensor.hpp:736
Tensor real()
Definition Tensor.cpp:898
Tensor imag()
Definition Tensor.cpp:905
Tensor to(const int &device) const
copy a tensor to new device
Definition Tensor.hpp:813
void Tofile(std::fstream &f) const
Tensor get(const std::vector< cytnx::Accessor > &accessors) const
get elements using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1152
void set(const std::vector< cytnx::Accessor > &accessors, const T &rc)
set elements with the input constant using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1209
Tensor Max() const
Definition Tensor.cpp:1215
Tensor permute(const std::vector< cytnx_uint64 > &rnks) const
perform tensor permute on the cytnx::Tensor and return a new instance.
Definition Tensor.hpp:872
Tensor Div(const T &rhs)
Definition Tensor.hpp:1321
Tensor Mod(const T &rhs)
Definition Tensor.hpp:1341
bool equiv(const Tensor &rhs)
Definition Tensor.hpp:1253
Tensor Cpr(const T &rhs)
Definition Tensor.hpp:1330
Tensor & Exp_()
Definition Tensor.cpp:1188
Tensor & InvM_()
Definition Tensor.cpp:1164
std::vector< Tensor > Svd(const bool &is_U=true, const bool &is_vT=true) const
Definition Tensor.cpp:1156
const std::vector< cytnx_uint64 > & shape() const
the shape of the Tensor
Definition Tensor.hpp:757
Tensor Min() const
Definition Tensor.cpp:1218
const T & at(const std::vector< cytnx_uint64 > &locator) const
Definition Tensor.hpp:1061
Storage & storage() const
return the storage of current Tensor.
Definition Tensor.hpp:1228
static Tensor from_storage(const Storage &in)
Definition Tensor.hpp:715
static Tensor Fromfile(const std::string &fname, const unsigned int &dtype, const cytnx_int64 &count=-1)
Definition Tensor.cpp:841
void Init(const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1)
initialize a Tensor
Definition Tensor.hpp:705
Tensor(const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1)
Definition Tensor.hpp:710
Tensor & Sub_(const T &rhs)
Definition Tensor.hpp:1307
#define cytnx_error_msg(is_true, format,...)
Definition cytnx_error.hpp:18
Definition Accessor.hpp:12
Device_class Device
Definition Device.cpp:105
cytnx::UniTensor operator*(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
double cytnx_double
Definition Type.hpp:20
uint32_t cytnx_uint32
Definition Type.hpp:23
bool cytnx_bool
Definition Type.hpp:31
std::complex< double > cytnx_complex128
Definition Type.hpp:30
float cytnx_float
Definition Type.hpp:21
std::ostream & operator<<(std::ostream &os, const Scalar &in)
Definition Scalar.cpp:14
int16_t cytnx_int16
Definition Type.hpp:27
std::complex< float > cytnx_complex64
Definition Type.hpp:29
cytnx::UniTensor operator-(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
int32_t cytnx_int32
Definition Type.hpp:26
uint16_t cytnx_uint16
Definition Type.hpp:24
uint64_t cytnx_uint64
Definition Type.hpp:22
int64_t cytnx_int64
Definition Type.hpp:25
Storage_init_interface __SII
Definition Storage.cpp:13
Type_class Type
Definition Type.cpp:143
cytnx::UniTensor operator+(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
cytnx::UniTensor operator/(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)