Cytnx v0.7.4
Loading...
Searching...
No Matches
Tensor.hpp
Go to the documentation of this file.
1#ifndef _H_Tensor_
2#define _H_Tensor_
3
4#include "Type.hpp"
5#include "cytnx_error.hpp"
6#include "Storage.hpp"
7#include "Device.hpp"
9#include <iostream>
10#include <fstream>
11#include "utils/vec_range.hpp"
12#include "utils/dynamic_arg_resolver.hpp"
13//#include "linalg.hpp"
14#include "Accessor.hpp"
15#include <vector>
16#include <initializer_list>
17#include <string>
18#include "Scalar.hpp"
19
20namespace cytnx{
21
22
24 // real implementation
25 class Tensor_impl: public intrusive_ptr_base<Tensor_impl>{
26 private:
27
28 //Interface:
29 Storage_init_interface __SII;
30
31
32 //Memory:
33 Storage _storage;
34
35 //tensor shape
36 std::vector<cytnx_uint64> _shape;
37
38 // pseudo-perm info
39 std::vector<cytnx_uint64> _mapper;
40 std::vector<cytnx_uint64> _invmapper;
41 bool _contiguous;
42
43 public:
44
45 friend class Tensor;
46 boost::intrusive_ptr<Tensor_impl> _clone_meta_only() const{
47 boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
48 out->_mapper = this->_mapper;
49 out->_invmapper = this->_invmapper;
50 out->_shape = this->_shape;
51 out->_contiguous = this->_contiguous;
52 return out;
53 }
54 Tensor_impl(): _contiguous(true){};
55
56 void Init(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype=Type.Double, int device=-1);
57 void Init(const Storage &in);
58 /*
59 template<class T>
60 void From_vec(const T &ndvec){
61 cytnx_error_msg(std::string(typeid(T).name()).find("vector") == std::string::npos,"[ERROR][Tensor][From_vec] the input argument should be a nd vector.%s","\n");
62 //dispatch the rank!:
63
64
65
66 }
67 */
68 //clone&assignment constr., use intrusive_ptr's
69 Tensor_impl(const Tensor_impl &rhs);
70 Tensor_impl& operator=(const Tensor_impl &rhs); // add const
71
72 unsigned int dtype() const{
73 return this->_storage.dtype();
74 }
75 int device() const {
76 return this->_storage.device();
77 }
78
79 std::string dtype_str() const {
80 return Type.getname(this->_storage.dtype());
81 }
82 std::string device_str() const{
83 return Device.getname(this->_storage.device());
84 }
85
86 const std::vector<cytnx_uint64>& shape() const{
87 return _shape;
88 }
89
90 const bool& is_contiguous() const{
91 return this->_contiguous;
92 }
93
94 const std::vector<cytnx_uint64>& mapper() const{
95 return this->_mapper;
96 }
97 const std::vector<cytnx_uint64>& invmapper() const{
98 return this->_invmapper;
99 }
100 Storage& storage(){
101 return _storage;
102 }
103
104 const Storage& storage() const{
105 return _storage;
106 }
107
108 boost::intrusive_ptr<Tensor_impl> clone() const{
109 boost::intrusive_ptr<Tensor_impl> out = this->_clone_meta_only();
110 out->_storage = this->_storage.clone();
111 return out;
112 }
113
114
115 void to_(const int &device){
116 this->_storage.to_(device);
117 }
118 boost::intrusive_ptr<Tensor_impl> to(const int &device){
119 if(this->device()==device){
120 //boost::intrusive_ptr<Tensor_impl> out(this);
121 return this;
122 }else{
123
124 boost::intrusive_ptr<Tensor_impl> out = this->_clone_meta_only();
125 out->_storage = this->_storage.to(device);
126 return out;
127 }
128 }
129
130 void permute_(const std::vector<cytnx_uint64> &rnks);
131
132 boost::intrusive_ptr<Tensor_impl> permute(const std::vector<cytnx_uint64> &rnks);
133
134 template<class T>
135 T& at(const std::vector<cytnx_uint64> &locator) const {
136 cytnx_error_msg(locator.size() != this->_shape.size(), "%s", "The input index does not match Tensor's rank.");
137
138 cytnx_uint64 RealRank,mtplyr;
139 std::vector<cytnx_uint64> c_shape(this->_shape.size());
140 std::vector<cytnx_uint64> c_loc(this->_shape.size());
141
142 RealRank=0;
143 mtplyr = 1;
144
145 for(cytnx_int64 i=this->_shape.size()-1; i>=0; i--){
146 if(locator[i]>=this->_shape[i]){
147 cytnx_error_msg(true, "%s", "Attempting to access out-of-bound index in Tensor.");
148 }
149 c_shape[i] = this->_shape[this->_invmapper[i]];
150 c_loc[i] = locator[this->_invmapper[i]];
151 RealRank += mtplyr*c_loc[i];
152 mtplyr *= c_shape[i];
153 }
154 return this->_storage.at<T>(RealRank);
155 }
156
157 const Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) const{
158 cytnx_error_msg(locator.size() != this->_shape.size(), "%s", "The input index does not match Tensor's rank.");
159
160 cytnx_uint64 RealRank,mtplyr;
161 std::vector<cytnx_uint64> c_shape(this->_shape.size());
162 std::vector<cytnx_uint64> c_loc(this->_shape.size());
163
164 RealRank=0;
165 mtplyr = 1;
166
167 for(cytnx_int64 i=this->_shape.size()-1; i>=0; i--){
168 if(locator[i]>=this->_shape[i]){
169 cytnx_error_msg(true, "%s", "Attempting to access out-of-bound index in Tensor.");
170 }
171 c_shape[i] = this->_shape[this->_invmapper[i]];
172 c_loc[i] = locator[this->_invmapper[i]];
173 RealRank += mtplyr*c_loc[i];
174 mtplyr *= c_shape[i];
175 }
176 return this->_storage.at(RealRank);
177 }
178
179 Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator){
180 cytnx_error_msg(locator.size() != this->_shape.size(), "%s", "The input index does not match Tensor's rank.");
181
182 cytnx_uint64 RealRank,mtplyr;
183 std::vector<cytnx_uint64> c_shape(this->_shape.size());
184 std::vector<cytnx_uint64> c_loc(this->_shape.size());
185
186 RealRank=0;
187 mtplyr = 1;
188
189 for(cytnx_int64 i=this->_shape.size()-1; i>=0; i--){
190 if(locator[i]>=this->_shape[i]){
191 cytnx_error_msg(true, "%s", "Attempting to access out-of-bound index in Tensor.");
192 }
193 c_shape[i] = this->_shape[this->_invmapper[i]];
194 c_loc[i] = locator[this->_invmapper[i]];
195 RealRank += mtplyr*c_loc[i];
196 mtplyr *= c_shape[i];
197 }
198 return this->_storage.at(RealRank);
199 }
200
201 boost::intrusive_ptr<Tensor_impl> get(const std::vector<cytnx::Accessor> &accessors);
202 boost::intrusive_ptr<Tensor_impl> get_deprecated(const std::vector<cytnx::Accessor> &accessors);
203 void set(const std::vector<cytnx::Accessor> &accessors, const boost::intrusive_ptr<Tensor_impl> &rhs);
204
205 template<class T>
206 void set(const std::vector<cytnx::Accessor> &accessors, const T& rc);
207
208 void set(const std::vector<cytnx::Accessor> &accessors, const Scalar::Sproxy& rc);
209
210 template<class Tx>
211 void fill(const Tx& val){
212 this->storage().fill(val);
213 }
214
215 boost::intrusive_ptr<Tensor_impl> contiguous(){
216 // return new instance if act on non-contiguous tensor
217 // return self if act on contiguous tensor
218 if(this->_contiguous){
219 boost::intrusive_ptr<Tensor_impl> out(this);
220 //out->_storage = this->_storage;
221 return out;
222 }else{
223 boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
224 std::vector<cytnx_uint64> oldshape(this->_shape.size());
225 for(cytnx_uint64 i=0;i<this->_shape.size();i++){
226 oldshape[i] = this->_shape[this->_invmapper[i]];
227 }
228
229 out->_storage._impl = this->_storage._impl->Move_memory(oldshape,this->_mapper, this->_invmapper);
230 //std::cout << out->_storage << std::endl;
231 out->_invmapper = vec_range(this->_invmapper.size());
232 out->_mapper = out->_invmapper;
233 out->_shape = this->_shape;
234 out->_contiguous = true;
235 return out;
236 }
237 }
238
239 void contiguous_(){
240 // return new instance if act on non-contiguous tensor
241 // return self if act on contiguous tensor
242 if(!this->_contiguous){
243 std::vector<cytnx_uint64> oldshape(this->_shape.size());
244 for(cytnx_uint64 i=0;i<this->_shape.size();i++){
245 oldshape[i] = this->_shape[this->_invmapper[i]];
246 }
247
248 this->_storage._impl = this->_storage._impl->Move_memory(oldshape,this->_mapper, this->_invmapper);
249 this->_mapper = vec_range(this->_invmapper.size());
250 this->_invmapper = this->_mapper;
251 this->_contiguous = true;
252 }
253 }
254
255
256 void reshape_(const std::vector<cytnx_int64> &new_shape){
257 if(!this->_contiguous){
258 this->contiguous_();
259 }
260 std::vector<cytnx_uint64> result_shape(new_shape.size());
261 cytnx_uint64 new_N = 1;
262 bool has_undetermine = false;
263 unsigned int Udet_id = 0;
264 for(int i=0;i<new_shape.size();i++){
265 if(new_shape[i]<0){
266 if(new_shape[i]!=-1) cytnx_error_msg(new_shape[i]!=-1,"%s","[ERROR] reshape can only have dimension > 0 and one undetermine rank specify as -1");
267 if(has_undetermine) cytnx_error_msg(new_shape[i]!=-1,"%s","[ERROR] reshape can only have dimension > 0 and one undetermine rank specify as -1");
268 Udet_id = i;
269 has_undetermine = true;
270 }else{
271 new_N *= new_shape[i];
272 result_shape[i] = new_shape[i];
273 }
274 }
275
276
277 if(has_undetermine){
278 cytnx_error_msg(new_N > this->_storage.size(),"%s","[ERROR] new shape exceed the total number of elements.");
279 cytnx_error_msg(this->_storage.size()%new_N,"%s","[ERROR] unmatch size when reshape with undetermine dimension");
280 result_shape[Udet_id] = this->_storage.size()/new_N;
281 }else{
282 cytnx_error_msg(new_N != this->_storage.size(),"%s","[ERROR] new shape does not match the number of elements.");
283 }
284
285 this->_shape = result_shape;
286 this->_mapper = vec_range(result_shape.size());
287 this->_invmapper = this->_mapper;
288 }
289
290
291 boost::intrusive_ptr<Tensor_impl> reshape(const std::vector<cytnx_int64> &new_shape){
292 boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
293 if(this->is_contiguous()){
294 out = this->_clone_meta_only();
295 out->_storage = this->_storage;
296 }else{
297 out = this->contiguous();
298 }
299 //out = this->clone();
300
301 out->reshape_(new_shape);
302 return out;
303 }
304
305
306
307 boost::intrusive_ptr<Tensor_impl> astype(const int& new_type){
308 //boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
309 //out->_storage = this->_storage.astype(new_type);
310 if(this->dtype() == new_type){
311 return this;
312 }else{
313 boost::intrusive_ptr<Tensor_impl> out = this->_clone_meta_only();
314 out->_storage = this->_storage.astype(new_type);
315 return out;
316 }
317 }
318
319 };
321
322 class Tensor;
323
325 // [Note] these are fwd from linalg.hpp
326 template<class T> Tensor operator+(const Tensor &lhs, const T &rc);
327 template<class T> Tensor operator-(const Tensor &lhs, const T &rhs);
328 template<class T> Tensor operator*(const Tensor &lhs, const T &rhs);
329 template<class T> Tensor operator/(const Tensor &lhs, const T &rhs);
331
333 class Tensor{
334 private:
335 public:
336
338 // this is a proxy class to allow get/set element using [] as python!
339 struct Tproxy
340 {
341 boost::intrusive_ptr<Tensor_impl> _insimpl;
342 std::vector<cytnx::Accessor> _accs;
343 Tproxy(boost::intrusive_ptr<Tensor_impl> _ptr,const std::vector<cytnx::Accessor> &accs) : _insimpl(_ptr), _accs(accs){}
344
345 // when used to set elems:
346 const Tensor& operator=(const Tensor &rhs){
347 this->_insimpl->set(_accs,rhs._impl);
348 return rhs;
349 }
350
351 template<class T>
352 const T& operator=(const T &rc){
353 this->_insimpl->set(_accs,rc);
354 return rc;
355 }
356 const Tproxy& operator=(const Tproxy &rc){
357 Tensor tmp = Tensor(rc);
358 this->_insimpl->set(_accs,tmp._impl);
359 return rc;
360 }
361
362
363 template<class T>
364 Tensor operator+=(const T &rc){
365 Tensor self;
366 self._impl = _insimpl->get(_accs);
367 self += rc;
368 _insimpl->set(_accs,self._impl);
369 self._impl = this->_insimpl;
370 return self;
371 }
372 Tensor operator+=(const Tproxy &rc);
373
374 template<class T>
375 Tensor operator-=(const T &rc){
376 Tensor self;
377 self._impl = _insimpl->get(_accs);
378 self -= rc;
379 _insimpl->set(_accs,self._impl);
380 self._impl = this->_insimpl;
381 return self;
382 }
383 Tensor operator-=(const Tproxy &rc);
384
385 template<class T>
386 Tensor operator/=(const T &rc){
387 Tensor self;
388 self._impl = _insimpl->get(_accs);
389 self /= rc;
390 _insimpl->set(_accs,self._impl);
391 self._impl = this->_insimpl;
392 return self;
393 }
394 Tensor operator/=(const Tproxy &rc);
395
396 template<class T>
397 Tensor operator*=(const T &rc){
398 Tensor self;
399 self._impl = _insimpl->get(_accs);
400 self *= rc;
401 _insimpl->set(_accs,self._impl);
402 self._impl = this->_insimpl;
403 return self;
404 }
405 Tensor operator*=(const Tproxy &rc);
406
407
408 //alias to resolve conflict with op ovld for rc=Tensor
409 /*
410 template<class T>
411 Tensor _operatorADD(const T &rc) const{
412 Tensor out;
413 out._impl = _insimpl->get(_accs);
414 return out.Add(rc);
415 }
416 */
417 Tensor operator+(const cytnx_complex128 &rc) const;//{return this->_operatorADD(rc);};
418 Tensor operator+(const cytnx_complex64 &rc) const;//{return this->_operatorADD(rc);};
419 Tensor operator+(const cytnx_double &rc) const;//{return this->_operatorADD(rc);};
420 Tensor operator+(const cytnx_float &rc) const;//{return this->_operatorADD(rc);};
421 Tensor operator+(const cytnx_uint64 &rc) const;//{return this->_operatorADD(rc);};
422 Tensor operator+(const cytnx_int64 &rc) const;//{return this->_operatorADD(rc);};
423 Tensor operator+(const cytnx_uint32 &rc) const;//{return this->_operatorADD(rc);};
424 Tensor operator+(const cytnx_int32 &rc) const;//{return this->_operatorADD(rc);};
425 Tensor operator+(const cytnx_uint16 &rc) const;//{return this->_operatorADD(rc);};
426 Tensor operator+(const cytnx_int16 &rc) const;//{return this->_operatorADD(rc);};
427 Tensor operator+(const cytnx_bool &rc) const;//{return this->_operatorADD(rc);};
428 Tensor operator+(const Tproxy &rc) const;
429
430 /*
431 template<class T>
432 Tensor _operatorSUB(const T &rc) const{
433 Tensor out;
434 out._impl = _insimpl->get(_accs);
435 return out.Sub(rc);
436 }
437 */
438 Tensor operator-(const cytnx_complex128 &rc) const;//{return this->_operatorSUB(rc);};
439 Tensor operator-(const cytnx_complex64 &rc) const;//{return this->_operatorSUB(rc);};
440 Tensor operator-(const cytnx_double &rc) const;//{return this->_operatorSUB(rc);};
441 Tensor operator-(const cytnx_float &rc) const;//{return this->_operatorSUB(rc);};
442 Tensor operator-(const cytnx_uint64 &rc) const;//{return this->_operatorSUB(rc);};
443 Tensor operator-(const cytnx_int64 &rc) const;//{return this->_operatorSUB(rc);};
444 Tensor operator-(const cytnx_uint32 &rc) const;//{return this->_operatorSUB(rc);};
445 Tensor operator-(const cytnx_int32 &rc) const;//{return this->_operatorSUB(rc);};
446 Tensor operator-(const cytnx_uint16 &rc) const;//{return this->_operatorSUB(rc);};
447 Tensor operator-(const cytnx_int16 &rc) const;//{return this->_operatorSUB(rc);};
448 Tensor operator-(const cytnx_bool &rc) const;//{return this->_operatorSUB(rc);};
449 Tensor operator-(const Tproxy &rc) const;
450
451 Tensor operator-() const;
452
453 /*
454 template<class T>
455 Tensor _operatorMUL(const T &rc) const{
456 Tensor out;
457 out._impl = _insimpl->get(_accs);
458 return out.Mul(rc);
459 }
460 */
461 Tensor operator*(const cytnx_complex128 &rc) const;//{return this->_operatorMUL(rc);};
462 Tensor operator*(const cytnx_complex64 &rc) const;//{return this->_operatorMUL(rc);};
463 Tensor operator*(const cytnx_double &rc) const;//{return this->_operatorMUL(rc);};
464 Tensor operator*(const cytnx_float &rc) const;//{return this->_operatorMUL(rc);};
465 Tensor operator*(const cytnx_uint64 &rc) const;//{return this->_operatorMUL(rc);};
466 Tensor operator*(const cytnx_int64 &rc) const;//{return this->_operatorMUL(rc);};
467 Tensor operator*(const cytnx_uint32 &rc) const;//{return this->_operatorMUL(rc);};
468 Tensor operator*(const cytnx_int32 &rc) const;//{return this->_operatorMUL(rc);};
469 Tensor operator*(const cytnx_uint16 &rc) const;//{return this->_operatorMUL(rc);};
470 Tensor operator*(const cytnx_int16 &rc) const;//{return this->_operatorMUL(rc);};
471 Tensor operator*(const cytnx_bool &rc) const;//{return this->_operatorMUL(rc);};
472 Tensor operator*(const Tproxy &rc) const;
473
474 /*
475 template<class T>
476 Tensor _operatorDIV(const T &rc) const{
477 Tensor out;
478 out._impl = _insimpl->get(_accs);
479 return out.Div(rc);
480 }
481 */
482 Tensor operator/(const cytnx_complex128 &rc) const;//{return this->_operatorDIV(rc);};
483 Tensor operator/(const cytnx_complex64 &rc) const;//{return this->_operatorDIV(rc);};
484 Tensor operator/(const cytnx_double &rc) const;//{return this->_operatorDIV(rc);};
485 Tensor operator/(const cytnx_float &rc) const;//{return this->_operatorDIV(rc);};
486 Tensor operator/(const cytnx_uint64 &rc) const;//{return this->_operatorDIV(rc);};
487 Tensor operator/(const cytnx_int64 &rc) const;//{return this->_operatorDIV(rc);};
488 Tensor operator/(const cytnx_uint32 &rc) const;//{return this->_operatorDIV(rc);};
489 Tensor operator/(const cytnx_int32 &rc) const;//{return this->_operatorDIV(rc);};
490 Tensor operator/(const cytnx_uint16 &rc) const;//{return this->_operatorDIV(rc);};
491 Tensor operator/(const cytnx_int16 &rc) const;//{return this->_operatorDIV(rc);};
492 Tensor operator/(const cytnx_bool &rc) const;//{return this->_operatorDIV(rc);};
493 Tensor operator/(const Tproxy &rc) const;
494
495 template<class T>
496 T item() const{
497 Tensor out;
498 out._impl = _insimpl->get(_accs);
499 return out.item<T>();
500 }
501
502 Scalar::Sproxy item() const{
503 Tensor out;
504 out._impl = _insimpl->get(_accs);
505 return out.item();
506 }
507
508 // when used to get elems:
509 operator Tensor () const{
510 Tensor out;
511 out._impl = _insimpl->get(_accs);
512 return out;
513 }
514
515
516 Storage storage() const{
517 Tensor out;
518 out._impl = _insimpl->get(_accs);
519 return out.storage();
520 }
521
522 };// proxy class of Tensor.
523
525
527 // these two are using the python way!
528 //----------------------------------------
529 template<class ... Ts>
530 Tproxy operator()(const std::string &e1, const Ts&...elems){
531 //std::cout << e1 << std::endl;
532 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1,elems...);
533 return (*this)[tmp];
534 }
535 template<class ... Ts>
536 Tproxy operator()(const cytnx_int64 &e1, const Ts&...elems){
537 //std::cout << e1<< std::endl;
538 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1,elems...);
539 return (*this)[tmp];
540 }
541 template<class ... Ts>
542 Tproxy operator()(const cytnx::Accessor &e1, const Ts&...elems){
543 //std::cout << e1 << std::endl;
544 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1,elems...);
545 return (*this)[tmp];
546 }
547
548 template<class ... Ts>
549 const Tproxy operator()(const cytnx_int64 &e1, const Ts&...elems) const{
550 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1,elems...);
551 return (*this)[tmp];
552 }
553 template<class ... Ts>
554 const Tproxy operator()(const cytnx::Accessor &e1, const Ts&...elems) const{
555 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1,elems...);
556 return (*this)[tmp];
557 }
558 //-----------------------------------------
559
560
561 Tproxy operator[](const std::initializer_list<cytnx::Accessor> &accs){
562 std::vector<cytnx::Accessor> tmp = accs;
563 return (*this)[tmp];
564 }
565 Tproxy operator[](const std::vector<cytnx::Accessor> &accs){
566 return Tproxy(this->_impl,accs);
567 }
568
569
570 const Tproxy operator[](const std::vector<cytnx::Accessor> &accs) const{
571 return Tproxy(this->_impl,accs);
572 }
573 const Tproxy operator[](const std::initializer_list<cytnx::Accessor> &accs) const{
574 std::vector<cytnx::Accessor> tmp = accs;
575 return (*this)[tmp];
576 }
577
578 Tproxy operator[](const std::initializer_list<cytnx_int64> &accs){
579 std::vector<cytnx_int64> tmp = accs;
580 return (*this)[tmp];
581 }
582 Tproxy operator[](const std::vector<cytnx_int64> &accs){
583 std::vector<cytnx::Accessor> acc_in;
584 for(int i=0;i<accs.size();i++){
585 acc_in.push_back(cytnx::Accessor(accs[i]));
586 }
587 return Tproxy(this->_impl,acc_in);
588 }
589 const Tproxy operator[](const std::initializer_list<cytnx_int64> &accs) const{
590 std::vector<cytnx_int64> tmp = accs;
591 return (*this)[tmp];
592 }
593 const Tproxy operator[](const std::vector<cytnx_int64> &accs) const{
594 std::vector<cytnx::Accessor> acc_in;
595 for(int i=0;i<accs.size();i++){
596 acc_in.push_back(cytnx::Accessor(accs[i]));
597 }
598 return Tproxy(this->_impl,acc_in);
599 }
601 //-------------------------------------------
602
603
605 void _Save(std::fstream &f) const;
606 void _Load(std::fstream &f);
607
609
617 void Save(const std::string &fname) const;
618 void Save(const char* fname) const;
619 void Tofile(const std::string &fname) const;
620 void Tofile(const char* fname) const;
621 void Tofile(std::fstream &f) const;
630 static Tensor Load(const std::string &fname);
631 static Tensor Load(const char* fname);
632 static Tensor Fromfile(const std::string &fname, const unsigned int &dtype, const cytnx_int64 &count=-1);
633 static Tensor Fromfile(const char* fname, const unsigned int &dtype, const cytnx_int64 &count=-1);
634
635 //static Tensor Frombinary(const std::string &fname);
636
637
638
640 boost::intrusive_ptr<Tensor_impl> _impl;
641 Tensor(): _impl(new Tensor_impl()){};
642 Tensor(const Tensor &rhs){
643 _impl = rhs._impl;
644 }
645
646 /*
647 template<class Tp>
648 Tensor(const std::initializer_list<Tp> &rhs){
649 Storage stmp = std::vector<Tp>(rhs);
650 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
651 tmp->Init(stmp);
652 this->_impl = tmp;
653 }
654 */
655
656 Tensor& operator=(const Tensor &rhs){
657 _impl = rhs._impl;
658 return *this;
659 }
660
661 void operator=(const Tproxy &rhsp){ // this is used to handle proxy assignment
662 this->_impl = rhsp._insimpl->get(rhsp._accs);
663
664 }
666
668 //default device==Device.cpu (-1)
688 void Init(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype=Type.Double, const int &device=-1){
689 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
690 this->_impl = tmp;
691 this->_impl->Init(shape,dtype,device);
692 }
693 Tensor(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype=Type.Double, const int &device=-1):_impl(new Tensor_impl()){
694 this->Init(shape,dtype,device);
695 }
697
699 Tensor out;
700 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
701 out._impl = tmp;
702 out._impl->Init(in);
703 return out;
704 }
705
706
712 unsigned int dtype() const {return this->_impl->dtype();}
713
719 int device() const { return this->_impl->device();}
720
726 std::string dtype_str() const { return this->_impl->dtype_str();}
727
733 std::string device_str() const{ return this->_impl->device_str();}
734
740 const std::vector<cytnx_uint64>& shape() const{
741 return this->_impl->shape();
742 }
743
749 return this->_impl->shape().size();
750 }
751
770 Tensor clone() const{
771 Tensor out;
772 out._impl = this->_impl->clone();
773 return out;
774 }
775
796 Tensor to(const int &device) const{
797 Tensor out;
798 out._impl = this->_impl->to(device);
799 return out;
800 }
801
819 void to_(const int &device){
820 this->_impl->to_(device);
821 }
822
823 const bool& is_contiguous() const{
824 return this->_impl->is_contiguous();
825 }
826
827 Tensor permute_(const std::vector<cytnx_uint64> &rnks){
828 this->_impl->permute_(rnks);
829 return *this;
830 }
832 template<class ... Ts>
833 Tensor permute_(const cytnx_uint64 &e1, const Ts&... elems){
834 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1,elems...);
835 this->_impl->permute_(argv);
836 return *this;
837 }
839
855 Tensor permute(const std::vector<cytnx_uint64> &rnks) const{
856 Tensor out;
857 out._impl = this->_impl->permute(rnks);
858 return out;
859 }
861 template<class ... Ts>
862 Tensor permute(const cytnx_uint64 &e1, const Ts&... elems) const{
863 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1,elems...);
864 return this->permute(argv);
865 }
867
868
886 Tensor out;
887 out._impl = this->_impl->contiguous();
888 return out;
889 }
890
907 this->_impl->contiguous_();
908 return *this;
909 }
910
927 void reshape_(const std::vector<cytnx_int64> &new_shape){
928 this->_impl->reshape_(new_shape);
929 }
931 void reshape_(const std::vector<cytnx_uint64> &new_shape){
932 std::vector<cytnx_int64> shape(new_shape.begin(),new_shape.end());
933 this->_impl->reshape_(shape);
934 }
935 void reshape_(const std::initializer_list<cytnx_int64> &new_shape){
936 std::vector<cytnx_int64> shape = new_shape;
937 this->_impl->reshape_(shape);
938 }
939 template<class ...Ts>
940 void reshape_(const cytnx_int64 &e1, const Ts...elems){
941 std::vector<cytnx_int64> shape = dynamic_arg_int64_resolver(e1,elems...);
942 //std::cout << shape << std::endl;
943 this->_impl->reshape_(shape);
944 }
946
964 Tensor reshape(const std::vector<cytnx_int64> &new_shape) const {
965 Tensor out;
966 out._impl = this->_impl->reshape(new_shape);
967 return out;
968 }
970 template<class ... Ts>
971 Tensor reshape(const cytnx_int64 &e1, const Ts&...elems) const{
972 std::vector<cytnx_int64> argv = dynamic_arg_int64_resolver(e1,elems...);
973 return this->reshape(argv);
974 }
976
995 Tensor astype(const int &new_type) const{
996 Tensor out;
997 out._impl = this->_impl->astype(new_type);
998 return out;
999 }
1000
1001
1002
1003 //Tensor diagonal(){
1004 // for(unsigned int i=0;i<this->shape().size();i++){
1005 // if(this->shape()[i] != this->shape()[0],"[ERROR] Tensor.diagonal() can only be called when the subject has equal dimension in each rank.%s","\n");
1006 // }
1007 //
1008 //}
1009
1026 template<class T>
1027 T& at(const std::vector<cytnx_uint64> &locator){
1028 return this->_impl->at<T>(locator);
1029 }
1030 template<class T>
1031 const T& at(const std::vector<cytnx_uint64> &locator) const{
1032 return this->_impl->at<T>(locator);
1033 }
1035 template<class T, class...Ts>
1036 const T& at(const cytnx_uint64 &e1, const Ts&...elems) const{
1037 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1,elems...);
1038 return this->at<T>(argv);
1039 }
1040 template<class T, class...Ts>
1041 T& at(const cytnx_uint64 &e1, const Ts&...elems){
1042 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1,elems...);
1043 return this->at<T>(argv);
1044 }
1045
1046 const Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) const{
1047 return this->_impl->at(locator);
1048 }
1049
1050 Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator){
1051 return this->_impl->at(locator);
1052 }
1054
1074 template<class T>
1076 cytnx_error_msg(this->_impl->storage().size()!=1,"[ERROR][Tensor.item<T>]%s","item can only be called from a Tensor with only one element\n");
1077 return this->_impl->storage().at<T>(0);
1078 }
1079
1081 template<class T>
1082 const T& item() const{
1083 cytnx_error_msg(this->_impl->storage().size()!=1,"[ERROR][Tensor.item<T>]%s","item can only be called from a Tensor with only one element\n");
1084 return this->_impl->storage().at<T>(0);
1085 }
1086
1087 const Scalar::Sproxy item() const{
1088 Scalar::Sproxy out(this->storage()._impl,0);
1089 return out;
1090 }
1091
1092 Scalar::Sproxy item(){
1093 Scalar::Sproxy out(this->storage()._impl,0);
1094 return out;
1095 }
1096
1098
1122 Tensor get(const std::vector<cytnx::Accessor> &accessors)const {
1123 Tensor out;
1124 out._impl = this->_impl->get(accessors);
1125 return out;
1126 }
1127
1128 /*
1129 Tensor get_v2(const std::vector<cytnx::Accessor> &accessors) const{
1130 Tensor out;
1131 out._impl = this->_impl->get_v2(accessors);
1132 return out;
1133 }
1134 */
1135
1155 void set(const std::vector<cytnx::Accessor> &accessors, const Tensor &rhs){
1156 this->_impl->set(accessors,rhs._impl);
1157 }
1158
1159
1160
1178 template<class T>
1179 void set(const std::vector<cytnx::Accessor> &accessors, const T &rc){
1180 this->_impl->set(accessors,rc);
1181 }
1183 template<class T>
1184 void set(const std::initializer_list<cytnx::Accessor> &accessors, const T &rc){
1185 std::vector<cytnx::Accessor> args = accessors;
1186 this->set(args,rc);
1187 }
1189
1199 return this->_impl->storage();
1200 }
1201
1217 template<class T>
1218 void fill(const T& val){
1219 this->_impl->fill(val);
1220 }
1221
1222
1223 bool equiv(const Tensor &rhs){
1224 if(this->shape() != rhs.shape()) return false;
1225 return true;
1226 }
1227
1228 Tensor real();
1229 Tensor imag();
1230
1231
1232
1233 // Arithmic:
1234 template<class T>
1236 template<class T>
1238 template<class T>
1240 template<class T>
1242
1243
1244 //Tensor &operator+=(const Tproxy &rc);
1245 //Tensor &operator-=(const Tproxy &rc);
1246 //Tensor &operator*=(const Tproxy &rc);
1247 //Tensor &operator/=(const Tproxy &rc);
1248 /*
1249 Tensor operator+(const Tproxy &rc){
1250 return *this + Tensor(rc);
1251 }
1252 Tensor operator-(const Tproxy &rc){
1253 return *this - Tensor(rc);
1254 }
1255 Tensor operator*(const Tproxy &rc){
1256 return *this * Tensor(rc);
1257 }
1258 Tensor operator/(const Tproxy &rc){
1259 return *this / Tensor(rc);
1260 }
1261 */
1262
1263 template<class T>
1264 Tensor Add(const T &rhs){
1265 return *this + rhs;
1266 }
1267 template<class T>
1268 Tensor& Add_(const T &rhs){
1269 return *this += rhs;
1270 }
1271
1272 template<class T>
1273 Tensor Sub(const T &rhs){
1274 return *this - rhs;
1275 }
1276 template<class T>
1277 Tensor& Sub_(const T &rhs){
1278 return *this -= rhs;
1279 }
1280
1281 template<class T>
1282 Tensor Mul(const T &rhs){
1283 return *this * rhs;
1284 }
1285 template<class T>
1286 Tensor& Mul_(const T &rhs){
1287 return *this *= rhs;
1288 }
1289
1290 template<class T>
1291 Tensor Div(const T &rhs){
1292 return *this / rhs;
1293 }
1294 template<class T>
1295 Tensor& Div_(const T &rhs){
1296 return *this /= rhs;
1297 }
1298
1299 template<class T>
1300 Tensor Cpr(const T &rhs){
1301 return *this == rhs;
1302 }
1303
1304 //template<class T>
1305 //Tensor& Cpr_(const T &rhs){
1306 //
1307 // return *this == rhs;
1308 //}
1309
1310 template<class T>
1311 Tensor Mod(const T &rhs){
1312 return *this % rhs;
1313 }
1314
1316 return this->Mul(-1.);
1317 }
1318
1320 Tensor out = this->clone();
1321 out.contiguous_();
1322 out.reshape_({-1});
1323 return out;
1324 }
1325
1326 void flatten_(){
1327 this->contiguous_();
1328 this->reshape_({-1});
1329
1330 }
1331
1332
1333 void append(const Tensor &rhs){
1334 //Tensor in;
1335 if(!this->is_contiguous())
1336 this->contiguous_();
1337
1338 // check Tensor in shape:
1339 cytnx_error_msg(rhs.shape().size()==0 || this->shape().size()==0,"[ERROR] try to append a null Tensor.%s","\n");
1340 cytnx_error_msg(rhs.shape().size()!=(this->shape().size()-1),"[ERROR] try to append a Tensor with rank not match.%s","\n");
1341 cytnx_uint64 Nelem = 1;
1342 for(unsigned int i=0;i<rhs.shape().size();i++){
1343 cytnx_error_msg(rhs.shape()[i]!=this->shape()[i+1],"[ERROR] dimension mismatch @ rhs.rank: [%d] this: [%d] rhs: [%d]\n",i,this->shape()[i+1],rhs.shape()[i]);
1344 Nelem*=rhs.shape()[i];
1345 }
1346
1347 //check type:
1348 Tensor in;
1349 if(rhs.dtype() != this->dtype()){
1350 in = rhs.astype(this->dtype());
1351 if(!in.is_contiguous())
1352 in.contiguous_();
1353 }else{
1354 if(!in.is_contiguous())
1355 in = rhs.contiguous();
1356 else
1357 in = rhs;
1358 }
1359 this->_impl->_shape[0]+=1;
1360 cytnx_uint64 oldsize = this->_impl->_storage.size();
1361 this->_impl->_storage.resize(oldsize+Nelem);
1362 memcpy(((char*)this->_impl->_storage.data()) + oldsize*Type.typeSize(this->dtype())/sizeof(char),
1363 in._impl->_storage.data(),
1364 Type.typeSize(in.dtype())*Nelem);
1365
1366 }
1367 void append(const Storage &srhs){
1368 if(!this->is_contiguous())
1369 this->contiguous_();
1370
1371 // check Tensor in shape:
1372 cytnx_error_msg(srhs.size()==0 || this->shape().size()==0,"[ERROR] try to append a null Tensor.%s","\n");
1373 cytnx_error_msg((this->shape().size()-1)!=1,"[ERROR] append a storage to Tensor can only accept rank-2 Tensor.%s","\n");
1374 cytnx_error_msg(this->shape().back()!=srhs.size(),"[ERROR] Tensor dmension mismatch!%s","\n");
1375
1376
1377 //check type:
1378 Storage in;
1379 if(srhs.dtype() != this->dtype()){
1380 in = srhs.astype(this->dtype());
1381 }else{
1382 in = srhs;
1383 }
1384 this->_impl->_shape[0]+=1;
1385 cytnx_uint64 oldsize = this->_impl->_storage.size();
1386 this->_impl->_storage.resize(oldsize+in.size());
1387 memcpy(((char*)this->_impl->_storage.data()) + oldsize*Type.typeSize(this->dtype())/sizeof(char),
1388 in._impl->Mem,
1389 Type.typeSize(in.dtype())*in.size());
1390
1391 }
1392 /*
1393 void append(const Tensor &rhs){
1394 // convert to the same type.
1395 Tensor in;
1396 if(rhs.dtype() != this->dtype()){
1397 in = rhs.astype(this->dtype());
1398 }else{
1399 in = rhs;
1400 }
1401
1402 // 1) check rank
1403 if(this->shape().size()==1){
1404 // check if rhs is a scalar tensor (only one element)
1405 cytnx_error_msg(!(rhs.shape().size()==1 && rhs.shape()[0]==1),"[ERROR] trying to append a scalar into multidimentional Tensor is not allow.\n Only rank-1 Tensor can accept scalar append.%s","\n");
1406 this->_impl->_shape[0]+=1;
1407 this->_impl->_storage.append(0);
1408
1409 }else{
1410 cytnx_error_msg(rhs.shape().size() != this->shape().size()-1,"[ERROR] try to append a Tensor with rank not match.%s","\n");
1411
1412 }
1413 cytnx_error_msg(!this->is_contiguous(),"[ERROR] append require the Tensor to be contiguous. suggestion: call contiguous() or contiguous_() first.","\n");
1414 }
1415 */
1416 template<class T>
1417 void append(const T &rhs){
1418 cytnx_error_msg(this->shape().size()!=1,"[ERROR] trying to append a scalar into multidimentional Tensor is not allow.\n Only rank-1 Tensor can accept scalar append.%s","\n");
1419 cytnx_error_msg(!this->is_contiguous(),"[ERROR] append require the Tensor to be contiguous. suggestion: call contiguous() or contiguous_() first.","\n");
1420 this->_impl->_shape[0]+=1;
1421 this->_impl->_storage.append(rhs);
1422 }
1423
1424 bool same_data(const Tensor &rhs) const;
1425
1426 // linalg:
1427 std::vector<Tensor> Svd(const bool &is_U=true, const bool &is_vT=true) const;
1428 std::vector<Tensor> Eigh(const bool &is_V=true,const bool &row_v=false) const;
1429 Tensor& InvM_();
1430 Tensor InvM() const;
1431 Tensor& Inv_(const double &clip);
1432 Tensor Inv(const double &clip) const;
1433
1434 Tensor& Conj_();
1435 Tensor Conj() const;
1436 Tensor& Exp_();
1437 Tensor Exp() const;
1438 Tensor Norm() const;
1439 Tensor Pow(const cytnx_double &p) const;
1440 Tensor& Pow_(const cytnx_double &p);
1441 Tensor Trace(const cytnx_uint64 &a=0, const cytnx_uint64 &b=1) const;
1442 Tensor Abs() const;
1443 Tensor& Abs_();
1444 Tensor Max() const;
1445 Tensor Min() const;
1446
1447 };// class Tensor
1448
1449 Tensor operator+(const Tensor &lhs, const Tensor::Tproxy &rhs);
1450 Tensor operator-(const Tensor &lhs, const Tensor::Tproxy &rhs);
1451 Tensor operator*(const Tensor &lhs, const Tensor::Tproxy &rhs);
1452 Tensor operator/(const Tensor &lhs, const Tensor::Tproxy &rhs);
1453
1454 Tensor operator+(const Tensor &lhs, const Scalar::Sproxy &rhs);
1455 Tensor operator-(const Tensor &lhs, const Scalar::Sproxy &rhs);
1456 Tensor operator*(const Tensor &lhs, const Scalar::Sproxy &rhs);
1457 Tensor operator/(const Tensor &lhs, const Scalar::Sproxy &rhs);
1458
1459 std::ostream& operator<<(std::ostream& os, const Tensor &in);
1460 std::ostream& operator<<(std::ostream& os, const Tensor::Tproxy &in);
1461 //{ os << Tensor(in);};
1462}
1463
1464#endif
object that mimic the python slice to access elements in C++ [this is for c++ API only].
Definition Accessor.hpp:16
an memeory storage with multi-type/multi-device support
Definition Storage.hpp:934
Storage astype(const unsigned int &new_type) const
cast the type of current Storage
Definition Storage.hpp:1052
const unsigned long long & size() const
the size ( no. of elements ) in the Storage
Definition Storage.hpp:1201
an tensor (multi-dimensional array)
Definition Tensor.hpp:333
void append(const Storage &srhs)
Definition Tensor.hpp:1367
Tensor & operator*=(const T &rc)
Tensor & Inv_(const double &clip)
Definition Tensor.cpp:1171
Tensor & operator/=(const T &rc)
Tensor operator-()
Definition Tensor.hpp:1315
void fill(const T &val)
fill all the element of current Tensor with the value.
Definition Tensor.hpp:1218
Tensor InvM() const
Definition Tensor.cpp:1168
bool same_data(const Tensor &rhs) const
Definition Tensor.cpp:1227
void to_(const int &device)
move the current Tensor to the device.
Definition Tensor.hpp:819
void append(const T &rhs)
Definition Tensor.hpp:1417
Tensor & operator-=(const T &rc)
Tensor & Add_(const T &rhs)
Definition Tensor.hpp:1268
Tensor Abs() const
Definition Tensor.cpp:1212
std::string device_str() const
the device (in string) of the Tensor
Definition Tensor.hpp:733
void reshape_(const std::vector< cytnx_int64 > &new_shape)
reshape the Tensor, inplacely
Definition Tensor.hpp:927
Tensor contiguous_()
Make the Tensor contiguous by coalescing the memory (storage), inplacely.
Definition Tensor.hpp:906
static Tensor Load(const std::string &fname)
Load current Tensor to file.
Definition Tensor.cpp:847
Tensor permute_(const std::vector< cytnx_uint64 > &rnks)
Definition Tensor.hpp:827
Tensor Mul(const T &rhs)
Definition Tensor.hpp:1282
unsigned int dtype() const
the dtype-id of the Tensor
Definition Tensor.hpp:712
Tensor Sub(const T &rhs)
Definition Tensor.hpp:1273
Tensor Inv(const double &clip) const
Definition Tensor.cpp:1175
Tensor contiguous() const
Make the Tensor contiguous by coalescing the memory (storage).
Definition Tensor.hpp:885
void Tofile(const std::string &fname) const
Definition Tensor.cpp:772
T & at(const std::vector< cytnx_uint64 > &locator)
[C++ only] get an element at specific location.
Definition Tensor.hpp:1027
Tensor reshape(const std::vector< cytnx_int64 > &new_shape) const
return a new Tensor that is reshaped.
Definition Tensor.hpp:964
T & item()
get an from a rank-0 Tensor
Definition Tensor.hpp:1075
Tensor clone() const
return a clone of the current Tensor.
Definition Tensor.hpp:770
std::vector< Tensor > Eigh(const bool &is_V=true, const bool &row_v=false) const
Definition Tensor.cpp:1159
void append(const Tensor &rhs)
Definition Tensor.hpp:1333
void set(const std::vector< cytnx::Accessor > &accessors, const Tensor &rhs)
set elements with the input Tensor using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1155
Tensor Norm() const
Definition Tensor.cpp:1195
Tensor astype(const int &new_type) const
return a new Tensor that cast to different dtype.
Definition Tensor.hpp:995
Tensor & Div_(const T &rhs)
Definition Tensor.hpp:1295
Tensor & operator+=(const T &rc)
Tensor Conj() const
Definition Tensor.cpp:1184
Tensor Trace(const cytnx_uint64 &a=0, const cytnx_uint64 &b=1) const
Definition Tensor.cpp:1222
Tensor & Pow_(const cytnx_double &p)
Definition Tensor.cpp:1203
std::string dtype_str() const
the dtype (in string) of the Tensor
Definition Tensor.hpp:726
Tensor & Mul_(const T &rhs)
Definition Tensor.hpp:1286
cytnx_uint64 rank() const
the rank of the Tensor
Definition Tensor.hpp:748
const bool & is_contiguous() const
Definition Tensor.hpp:823
Tensor Exp() const
Definition Tensor.cpp:1192
Tensor & Abs_()
Definition Tensor.cpp:1208
Tensor Add(const T &rhs)
Definition Tensor.hpp:1264
void flatten_()
Definition Tensor.hpp:1326
void Save(const std::string &fname) const
Save current Tensor to file.
Definition Tensor.cpp:798
Tensor flatten() const
Definition Tensor.hpp:1319
Tensor & Conj_()
Definition Tensor.cpp:1180
Tensor Pow(const cytnx_double &p) const
Definition Tensor.cpp:1199
int device() const
the device-id of the Tensor
Definition Tensor.hpp:719
Tensor real()
Definition Tensor.cpp:898
Tensor imag()
Definition Tensor.cpp:905
Tensor to(const int &device) const
copy a tensor to new device
Definition Tensor.hpp:796
void Tofile(std::fstream &f) const
Tensor get(const std::vector< cytnx::Accessor > &accessors) const
get elements using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1122
void set(const std::vector< cytnx::Accessor > &accessors, const T &rc)
set elements with the input constant using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1179
Tensor Max() const
Definition Tensor.cpp:1215
Tensor permute(const std::vector< cytnx_uint64 > &rnks) const
perform tensor permute on the cytnx::Tensor and return a new instance.
Definition Tensor.hpp:855
Tensor Div(const T &rhs)
Definition Tensor.hpp:1291
Tensor Mod(const T &rhs)
Definition Tensor.hpp:1311
bool equiv(const Tensor &rhs)
Definition Tensor.hpp:1223
Tensor Cpr(const T &rhs)
Definition Tensor.hpp:1300
Tensor & Exp_()
Definition Tensor.cpp:1188
Tensor & InvM_()
Definition Tensor.cpp:1164
std::vector< Tensor > Svd(const bool &is_U=true, const bool &is_vT=true) const
Definition Tensor.cpp:1156
const std::vector< cytnx_uint64 > & shape() const
the shape of the Tensor
Definition Tensor.hpp:740
Tensor Min() const
Definition Tensor.cpp:1218
const T & at(const std::vector< cytnx_uint64 > &locator) const
Definition Tensor.hpp:1031
Storage & storage() const
return the storage of current Tensor.
Definition Tensor.hpp:1198
static Tensor from_storage(const Storage &in)
Definition Tensor.hpp:698
static Tensor Fromfile(const std::string &fname, const unsigned int &dtype, const cytnx_int64 &count=-1)
Definition Tensor.cpp:841
void Init(const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1)
initialize a Tensor
Definition Tensor.hpp:688
Tensor(const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1)
Definition Tensor.hpp:693
Tensor & Sub_(const T &rhs)
Definition Tensor.hpp:1277
#define cytnx_error_msg(is_true, format,...)
Definition cytnx_error.hpp:18
Definition Accessor.hpp:12
Device_class Device
Definition Device.cpp:105
cytnx::UniTensor operator*(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
double cytnx_double
Definition Type.hpp:20
uint32_t cytnx_uint32
Definition Type.hpp:23
bool cytnx_bool
Definition Type.hpp:31
std::complex< double > cytnx_complex128
Definition Type.hpp:30
float cytnx_float
Definition Type.hpp:21
std::ostream & operator<<(std::ostream &os, const Scalar &in)
Definition Scalar.cpp:14
int16_t cytnx_int16
Definition Type.hpp:27
std::complex< float > cytnx_complex64
Definition Type.hpp:29
cytnx::UniTensor operator-(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
int32_t cytnx_int32
Definition Type.hpp:26
uint16_t cytnx_uint16
Definition Type.hpp:24
uint64_t cytnx_uint64
Definition Type.hpp:22
int64_t cytnx_int64
Definition Type.hpp:25
Storage_init_interface __SII
Definition Storage.cpp:13
Type_class Type
Definition Type.cpp:137
cytnx::UniTensor operator+(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
cytnx::UniTensor operator/(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)