Cytnx v0.7.3
Loading...
Searching...
No Matches
Tensor.hpp
Go to the documentation of this file.
1#ifndef _H_Tensor_
2#define _H_Tensor_
3
4#include "Type.hpp"
5#include "cytnx_error.hpp"
6#include "Storage.hpp"
7#include "Device.hpp"
9#include <iostream>
10#include <fstream>
11#include "utils/vec_range.hpp"
12#include "utils/dynamic_arg_resolver.hpp"
13//#include "linalg.hpp"
14#include "Accessor.hpp"
15#include <vector>
16#include <initializer_list>
17#include <string>
18#include "Scalar.hpp"
19
20namespace cytnx{
21
22
24 // real implementation
25 class Tensor_impl: public intrusive_ptr_base<Tensor_impl>{
26 private:
27
28 //Interface:
29 Storage_init_interface __SII;
30
31
32 //Memory:
33 Storage _storage;
34
35 //tensor shape
36 std::vector<cytnx_uint64> _shape;
37
38 // pseudo-perm info
39 std::vector<cytnx_uint64> _mapper;
40 std::vector<cytnx_uint64> _invmapper;
41 bool _contiguous;
42
43 public:
44
45 friend class Tensor;
46 boost::intrusive_ptr<Tensor_impl> _clone_meta_only() const{
47 boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
48 out->_mapper = this->_mapper;
49 out->_invmapper = this->_invmapper;
50 out->_shape = this->_shape;
51 out->_contiguous = this->_contiguous;
52 return out;
53 }
54 Tensor_impl(): _contiguous(true){};
55
56 void Init(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype=Type.Double, int device=-1);
57 void Init(const Storage &in);
58 /*
59 template<class T>
60 void From_vec(const T &ndvec){
61 cytnx_error_msg(std::string(typeid(T).name()).find("vector") == std::string::npos,"[ERROR][Tensor][From_vec] the input argument should be a nd vector.%s","\n");
62 //dispatch the rank!:
63
64
65
66 }
67 */
68 //clone&assignment constr., use intrusive_ptr's
69 Tensor_impl(const Tensor_impl &rhs);
70 Tensor_impl& operator=(const Tensor_impl &rhs); // add const
71
72 unsigned int dtype() const{
73 return this->_storage.dtype();
74 }
75 int device() const {
76 return this->_storage.device();
77 }
78
79 std::string dtype_str() const {
80 return Type.getname(this->_storage.dtype());
81 }
82 std::string device_str() const{
83 return Device.getname(this->_storage.device());
84 }
85
86 const std::vector<cytnx_uint64>& shape() const{
87 return _shape;
88 }
89
90 const bool& is_contiguous() const{
91 return this->_contiguous;
92 }
93
94 const std::vector<cytnx_uint64>& mapper() const{
95 return this->_mapper;
96 }
97 const std::vector<cytnx_uint64>& invmapper() const{
98 return this->_invmapper;
99 }
100 Storage& storage(){
101 return _storage;
102 }
103
104 const Storage& storage() const{
105 return _storage;
106 }
107
108 boost::intrusive_ptr<Tensor_impl> clone() const{
109 boost::intrusive_ptr<Tensor_impl> out = this->_clone_meta_only();
110 out->_storage = this->_storage.clone();
111 return out;
112 }
113
114
115 void to_(const int &device){
116 this->_storage.to_(device);
117 }
118 boost::intrusive_ptr<Tensor_impl> to(const int &device){
119 if(this->device()==device){
120 //boost::intrusive_ptr<Tensor_impl> out(this);
121 return this;
122 }else{
123
124 boost::intrusive_ptr<Tensor_impl> out = this->_clone_meta_only();
125 out->_storage = this->_storage.to(device);
126 return out;
127 }
128 }
129
130 void permute_(const std::vector<cytnx_uint64> &rnks);
131
132 boost::intrusive_ptr<Tensor_impl> permute(const std::vector<cytnx_uint64> &rnks);
133
134 template<class T>
135 T& at(const std::vector<cytnx_uint64> &locator) const {
136 cytnx_error_msg(locator.size() != this->_shape.size(), "%s", "The input index does not match Tensor's rank.");
137
138 cytnx_uint64 RealRank,mtplyr;
139 std::vector<cytnx_uint64> c_shape(this->_shape.size());
140 std::vector<cytnx_uint64> c_loc(this->_shape.size());
141
142 RealRank=0;
143 mtplyr = 1;
144
145 for(cytnx_int64 i=this->_shape.size()-1; i>=0; i--){
146 if(locator[i]>=this->_shape[i]){
147 cytnx_error_msg(true, "%s", "Attempting to access out-of-bound index in Tensor.");
148 }
149 c_shape[i] = this->_shape[this->_invmapper[i]];
150 c_loc[i] = locator[this->_invmapper[i]];
151 RealRank += mtplyr*c_loc[i];
152 mtplyr *= c_shape[i];
153 }
154 return this->_storage.at<T>(RealRank);
155 }
156
157 boost::intrusive_ptr<Tensor_impl> get(const std::vector<cytnx::Accessor> &accessors);
158 boost::intrusive_ptr<Tensor_impl> get_deprecated(const std::vector<cytnx::Accessor> &accessors);
159 void set(const std::vector<cytnx::Accessor> &accessors, const boost::intrusive_ptr<Tensor_impl> &rhs);
160
161 template<class T>
162 void set(const std::vector<cytnx::Accessor> &accessors, const T& rc);
163
164 void set(const std::vector<cytnx::Accessor> &accessors, const Scalar::Sproxy& rc);
165
166 template<class Tx>
167 void fill(const Tx& val){
168 this->storage().fill(val);
169 }
170
171 boost::intrusive_ptr<Tensor_impl> contiguous(){
172 // return new instance if act on non-contiguous tensor
173 // return self if act on contiguous tensor
174 if(this->_contiguous){
175 boost::intrusive_ptr<Tensor_impl> out(this);
176 //out->_storage = this->_storage;
177 return out;
178 }else{
179 boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
180 std::vector<cytnx_uint64> oldshape(this->_shape.size());
181 for(cytnx_uint64 i=0;i<this->_shape.size();i++){
182 oldshape[i] = this->_shape[this->_invmapper[i]];
183 }
184
185 out->_storage._impl = this->_storage._impl->Move_memory(oldshape,this->_mapper, this->_invmapper);
186 //std::cout << out->_storage << std::endl;
187 out->_invmapper = vec_range(this->_invmapper.size());
188 out->_mapper = out->_invmapper;
189 out->_shape = this->_shape;
190 out->_contiguous = true;
191 return out;
192 }
193 }
194
195 void contiguous_(){
196 // return new instance if act on non-contiguous tensor
197 // return self if act on contiguous tensor
198 if(!this->_contiguous){
199 std::vector<cytnx_uint64> oldshape(this->_shape.size());
200 for(cytnx_uint64 i=0;i<this->_shape.size();i++){
201 oldshape[i] = this->_shape[this->_invmapper[i]];
202 }
203
204 this->_storage._impl = this->_storage._impl->Move_memory(oldshape,this->_mapper, this->_invmapper);
205 this->_mapper = vec_range(this->_invmapper.size());
206 this->_invmapper = this->_mapper;
207 this->_contiguous = true;
208 }
209 }
210
211
212 void reshape_(const std::vector<cytnx_int64> &new_shape){
213 if(!this->_contiguous){
214 this->contiguous_();
215 }
216 std::vector<cytnx_uint64> result_shape(new_shape.size());
217 cytnx_uint64 new_N = 1;
218 bool has_undetermine = false;
219 unsigned int Udet_id = 0;
220 for(int i=0;i<new_shape.size();i++){
221 if(new_shape[i]<0){
222 if(new_shape[i]!=-1) cytnx_error_msg(new_shape[i]!=-1,"%s","[ERROR] reshape can only have dimension > 0 and one undetermine rank specify as -1");
223 if(has_undetermine) cytnx_error_msg(new_shape[i]!=-1,"%s","[ERROR] reshape can only have dimension > 0 and one undetermine rank specify as -1");
224 Udet_id = i;
225 has_undetermine = true;
226 }else{
227 new_N *= new_shape[i];
228 result_shape[i] = new_shape[i];
229 }
230 }
231
232
233 if(has_undetermine){
234 cytnx_error_msg(new_N > this->_storage.size(),"%s","[ERROR] new shape exceed the total number of elements.");
235 cytnx_error_msg(this->_storage.size()%new_N,"%s","[ERROR] unmatch size when reshape with undetermine dimension");
236 result_shape[Udet_id] = this->_storage.size()/new_N;
237 }else{
238 cytnx_error_msg(new_N != this->_storage.size(),"%s","[ERROR] new shape does not match the number of elements.");
239 }
240
241 this->_shape = result_shape;
242 this->_mapper = vec_range(result_shape.size());
243 this->_invmapper = this->_mapper;
244 }
245
246
247 boost::intrusive_ptr<Tensor_impl> reshape(const std::vector<cytnx_int64> &new_shape){
248 boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
249 if(this->is_contiguous()){
250 out = this->_clone_meta_only();
251 out->_storage = this->_storage;
252 }else{
253 out = this->contiguous();
254 }
255 //out = this->clone();
256
257 out->reshape_(new_shape);
258 return out;
259 }
260
261
262
263 boost::intrusive_ptr<Tensor_impl> astype(const int& new_type){
264 //boost::intrusive_ptr<Tensor_impl> out(new Tensor_impl());
265 //out->_storage = this->_storage.astype(new_type);
266 if(this->dtype() == new_type){
267 return this;
268 }else{
269 boost::intrusive_ptr<Tensor_impl> out = this->_clone_meta_only();
270 out->_storage = this->_storage.astype(new_type);
271 return out;
272 }
273 }
274
275 };
277
278 class Tensor;
279
281 // [Note] these are fwd from linalg.hpp
282 template<class T> Tensor operator+(const Tensor &lhs, const T &rc);
283 template<class T> Tensor operator-(const Tensor &lhs, const T &rhs);
284 template<class T> Tensor operator*(const Tensor &lhs, const T &rhs);
285 template<class T> Tensor operator/(const Tensor &lhs, const T &rhs);
287
289 class Tensor{
290 private:
291 public:
292
294 // this is a proxy class to allow get/set element using [] as python!
295 struct Tproxy
296 {
297 boost::intrusive_ptr<Tensor_impl> _insimpl;
298 std::vector<cytnx::Accessor> _accs;
299 Tproxy(boost::intrusive_ptr<Tensor_impl> _ptr,const std::vector<cytnx::Accessor> &accs) : _insimpl(_ptr), _accs(accs){}
300
301 // when used to set elems:
302 const Tensor& operator=(const Tensor &rhs){
303 this->_insimpl->set(_accs,rhs._impl);
304 return rhs;
305 }
306
307 template<class T>
308 const T& operator=(const T &rc){
309 this->_insimpl->set(_accs,rc);
310 return rc;
311 }
312 const Tproxy& operator=(const Tproxy &rc){
313 Tensor tmp = Tensor(rc);
314 this->_insimpl->set(_accs,tmp._impl);
315 return rc;
316 }
317
318
319 template<class T>
320 Tensor operator+=(const T &rc){
321 Tensor self;
322 self._impl = _insimpl->get(_accs);
323 self += rc;
324 _insimpl->set(_accs,self._impl);
325 self._impl = this->_insimpl;
326 return self;
327 }
328 Tensor operator+=(const Tproxy &rc);
329
330 template<class T>
331 Tensor operator-=(const T &rc){
332 Tensor self;
333 self._impl = _insimpl->get(_accs);
334 self -= rc;
335 _insimpl->set(_accs,self._impl);
336 self._impl = this->_insimpl;
337 return self;
338 }
339 Tensor operator-=(const Tproxy &rc);
340
341 template<class T>
342 Tensor operator/=(const T &rc){
343 Tensor self;
344 self._impl = _insimpl->get(_accs);
345 self /= rc;
346 _insimpl->set(_accs,self._impl);
347 self._impl = this->_insimpl;
348 return self;
349 }
350 Tensor operator/=(const Tproxy &rc);
351
352 template<class T>
353 Tensor operator*=(const T &rc){
354 Tensor self;
355 self._impl = _insimpl->get(_accs);
356 self *= rc;
357 _insimpl->set(_accs,self._impl);
358 self._impl = this->_insimpl;
359 return self;
360 }
361 Tensor operator*=(const Tproxy &rc);
362
363
364 //alias to resolve conflict with op ovld for rc=Tensor
365 /*
366 template<class T>
367 Tensor _operatorADD(const T &rc) const{
368 Tensor out;
369 out._impl = _insimpl->get(_accs);
370 return out.Add(rc);
371 }
372 */
373 Tensor operator+(const cytnx_complex128 &rc) const;//{return this->_operatorADD(rc);};
374 Tensor operator+(const cytnx_complex64 &rc) const;//{return this->_operatorADD(rc);};
375 Tensor operator+(const cytnx_double &rc) const;//{return this->_operatorADD(rc);};
376 Tensor operator+(const cytnx_float &rc) const;//{return this->_operatorADD(rc);};
377 Tensor operator+(const cytnx_uint64 &rc) const;//{return this->_operatorADD(rc);};
378 Tensor operator+(const cytnx_int64 &rc) const;//{return this->_operatorADD(rc);};
379 Tensor operator+(const cytnx_uint32 &rc) const;//{return this->_operatorADD(rc);};
380 Tensor operator+(const cytnx_int32 &rc) const;//{return this->_operatorADD(rc);};
381 Tensor operator+(const cytnx_uint16 &rc) const;//{return this->_operatorADD(rc);};
382 Tensor operator+(const cytnx_int16 &rc) const;//{return this->_operatorADD(rc);};
383 Tensor operator+(const cytnx_bool &rc) const;//{return this->_operatorADD(rc);};
384 Tensor operator+(const Tproxy &rc) const;
385
386 /*
387 template<class T>
388 Tensor _operatorSUB(const T &rc) const{
389 Tensor out;
390 out._impl = _insimpl->get(_accs);
391 return out.Sub(rc);
392 }
393 */
394 Tensor operator-(const cytnx_complex128 &rc) const;//{return this->_operatorSUB(rc);};
395 Tensor operator-(const cytnx_complex64 &rc) const;//{return this->_operatorSUB(rc);};
396 Tensor operator-(const cytnx_double &rc) const;//{return this->_operatorSUB(rc);};
397 Tensor operator-(const cytnx_float &rc) const;//{return this->_operatorSUB(rc);};
398 Tensor operator-(const cytnx_uint64 &rc) const;//{return this->_operatorSUB(rc);};
399 Tensor operator-(const cytnx_int64 &rc) const;//{return this->_operatorSUB(rc);};
400 Tensor operator-(const cytnx_uint32 &rc) const;//{return this->_operatorSUB(rc);};
401 Tensor operator-(const cytnx_int32 &rc) const;//{return this->_operatorSUB(rc);};
402 Tensor operator-(const cytnx_uint16 &rc) const;//{return this->_operatorSUB(rc);};
403 Tensor operator-(const cytnx_int16 &rc) const;//{return this->_operatorSUB(rc);};
404 Tensor operator-(const cytnx_bool &rc) const;//{return this->_operatorSUB(rc);};
405 Tensor operator-(const Tproxy &rc) const;
406
407 Tensor operator-() const;
408
409 /*
410 template<class T>
411 Tensor _operatorMUL(const T &rc) const{
412 Tensor out;
413 out._impl = _insimpl->get(_accs);
414 return out.Mul(rc);
415 }
416 */
417 Tensor operator*(const cytnx_complex128 &rc) const;//{return this->_operatorMUL(rc);};
418 Tensor operator*(const cytnx_complex64 &rc) const;//{return this->_operatorMUL(rc);};
419 Tensor operator*(const cytnx_double &rc) const;//{return this->_operatorMUL(rc);};
420 Tensor operator*(const cytnx_float &rc) const;//{return this->_operatorMUL(rc);};
421 Tensor operator*(const cytnx_uint64 &rc) const;//{return this->_operatorMUL(rc);};
422 Tensor operator*(const cytnx_int64 &rc) const;//{return this->_operatorMUL(rc);};
423 Tensor operator*(const cytnx_uint32 &rc) const;//{return this->_operatorMUL(rc);};
424 Tensor operator*(const cytnx_int32 &rc) const;//{return this->_operatorMUL(rc);};
425 Tensor operator*(const cytnx_uint16 &rc) const;//{return this->_operatorMUL(rc);};
426 Tensor operator*(const cytnx_int16 &rc) const;//{return this->_operatorMUL(rc);};
427 Tensor operator*(const cytnx_bool &rc) const;//{return this->_operatorMUL(rc);};
428 Tensor operator*(const Tproxy &rc) const;
429
430 /*
431 template<class T>
432 Tensor _operatorDIV(const T &rc) const{
433 Tensor out;
434 out._impl = _insimpl->get(_accs);
435 return out.Div(rc);
436 }
437 */
438 Tensor operator/(const cytnx_complex128 &rc) const;//{return this->_operatorDIV(rc);};
439 Tensor operator/(const cytnx_complex64 &rc) const;//{return this->_operatorDIV(rc);};
440 Tensor operator/(const cytnx_double &rc) const;//{return this->_operatorDIV(rc);};
441 Tensor operator/(const cytnx_float &rc) const;//{return this->_operatorDIV(rc);};
442 Tensor operator/(const cytnx_uint64 &rc) const;//{return this->_operatorDIV(rc);};
443 Tensor operator/(const cytnx_int64 &rc) const;//{return this->_operatorDIV(rc);};
444 Tensor operator/(const cytnx_uint32 &rc) const;//{return this->_operatorDIV(rc);};
445 Tensor operator/(const cytnx_int32 &rc) const;//{return this->_operatorDIV(rc);};
446 Tensor operator/(const cytnx_uint16 &rc) const;//{return this->_operatorDIV(rc);};
447 Tensor operator/(const cytnx_int16 &rc) const;//{return this->_operatorDIV(rc);};
448 Tensor operator/(const cytnx_bool &rc) const;//{return this->_operatorDIV(rc);};
449 Tensor operator/(const Tproxy &rc) const;
450
451 template<class T>
452 T item() const{
453 Tensor out;
454 out._impl = _insimpl->get(_accs);
455 return out.item<T>();
456 }
457
458 Scalar::Sproxy item() const{
459 Tensor out;
460 out._impl = _insimpl;
461 return out.item();
462 }
463
464 // when used to get elems:
465 operator Tensor () const{
466 Tensor out;
467 out._impl = _insimpl->get(_accs);
468 return out;
469 }
470
471 };// proxy class of Tensor.
472
474
476 // these two are using the python way!
477 //----------------------------------------
478 template<class ... Ts>
479 Tproxy operator()(const std::string &e1, const Ts&...elems){
480 //std::cout << e1 << std::endl;
481 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1,elems...);
482 return (*this)[tmp];
483 }
484 template<class ... Ts>
485 Tproxy operator()(const cytnx_int64 &e1, const Ts&...elems){
486 //std::cout << e1<< std::endl;
487 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1,elems...);
488 return (*this)[tmp];
489 }
490 template<class ... Ts>
491 Tproxy operator()(const cytnx::Accessor &e1, const Ts&...elems){
492 //std::cout << e1 << std::endl;
493 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1,elems...);
494 return (*this)[tmp];
495 }
496
497 template<class ... Ts>
498 const Tproxy operator()(const cytnx_int64 &e1, const Ts&...elems) const{
499 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1,elems...);
500 return (*this)[tmp];
501 }
502 template<class ... Ts>
503 const Tproxy operator()(const cytnx::Accessor &e1, const Ts&...elems) const{
504 std::vector<cytnx::Accessor> tmp = Indices_resolver(e1,elems...);
505 return (*this)[tmp];
506 }
507 //-----------------------------------------
508
509
510 Tproxy operator[](const std::initializer_list<cytnx::Accessor> &accs){
511 std::vector<cytnx::Accessor> tmp = accs;
512 return (*this)[tmp];
513 }
514 Tproxy operator[](const std::vector<cytnx::Accessor> &accs){
515 return Tproxy(this->_impl,accs);
516 }
517
518
519 const Tproxy operator[](const std::vector<cytnx::Accessor> &accs) const{
520 return Tproxy(this->_impl,accs);
521 }
522 const Tproxy operator[](const std::initializer_list<cytnx::Accessor> &accs) const{
523 std::vector<cytnx::Accessor> tmp = accs;
524 return (*this)[tmp];
525 }
526
527 Tproxy operator[](const std::initializer_list<cytnx_int64> &accs){
528 std::vector<cytnx_int64> tmp = accs;
529 return (*this)[tmp];
530 }
531 Tproxy operator[](const std::vector<cytnx_int64> &accs){
532 std::vector<cytnx::Accessor> acc_in;
533 for(int i=0;i<accs.size();i++){
534 acc_in.push_back(cytnx::Accessor(accs[i]));
535 }
536 return Tproxy(this->_impl,acc_in);
537 }
538 const Tproxy operator[](const std::initializer_list<cytnx_int64> &accs) const{
539 std::vector<cytnx_int64> tmp = accs;
540 return (*this)[tmp];
541 }
542 const Tproxy operator[](const std::vector<cytnx_int64> &accs) const{
543 std::vector<cytnx::Accessor> acc_in;
544 for(int i=0;i<accs.size();i++){
545 acc_in.push_back(cytnx::Accessor(accs[i]));
546 }
547 return Tproxy(this->_impl,acc_in);
548 }
550 //-------------------------------------------
551
552
554 void _Save(std::fstream &f) const;
555 void _Load(std::fstream &f);
556
558
566 void Save(const std::string &fname) const;
567 void Save(const char* fname) const;
568 void Tofile(const std::string &fname) const;
569 void Tofile(const char* fname) const;
570 void Tofile(std::fstream &f) const;
579 static Tensor Load(const std::string &fname);
580 static Tensor Load(const char* fname);
581 static Tensor Fromfile(const std::string &fname, const unsigned int &dtype, const cytnx_int64 &count=-1);
582 static Tensor Fromfile(const char* fname, const unsigned int &dtype, const cytnx_int64 &count=-1);
583
584 //static Tensor Frombinary(const std::string &fname);
585
586
587
589 boost::intrusive_ptr<Tensor_impl> _impl;
590 Tensor(): _impl(new Tensor_impl()){};
591 Tensor(const Tensor &rhs){
592 _impl = rhs._impl;
593 }
594
595 /*
596 template<class Tp>
597 Tensor(const std::initializer_list<Tp> &rhs){
598 Storage stmp = std::vector<Tp>(rhs);
599 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
600 tmp->Init(stmp);
601 this->_impl = tmp;
602 }
603 */
604
605 Tensor& operator=(const Tensor &rhs){
606 _impl = rhs._impl;
607 return *this;
608 }
609
610 void operator=(const Tproxy &rhsp){ // this is used to handle proxy assignment
611 this->_impl = rhsp._insimpl->get(rhsp._accs);
612
613 }
615
617 //default device==Device.cpu (-1)
637 void Init(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype=Type.Double, const int &device=-1){
638 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
639 this->_impl = tmp;
640 this->_impl->Init(shape,dtype,device);
641 }
642 Tensor(const std::vector<cytnx_uint64> &shape, const unsigned int &dtype=Type.Double, const int &device=-1):_impl(new Tensor_impl()){
643 this->Init(shape,dtype,device);
644 }
646
648 Tensor out;
649 boost::intrusive_ptr<Tensor_impl> tmp(new Tensor_impl());
650 out._impl = tmp;
651 out._impl->Init(in);
652 return out;
653 }
654
655
661 unsigned int dtype() const {return this->_impl->dtype();}
662
668 int device() const { return this->_impl->device();}
669
675 std::string dtype_str() const { return this->_impl->dtype_str();}
676
682 std::string device_str() const{ return this->_impl->device_str();}
683
689 const std::vector<cytnx_uint64>& shape() const{
690 return this->_impl->shape();
691 }
692
698 return this->_impl->shape().size();
699 }
700
719 Tensor clone() const{
720 Tensor out;
721 out._impl = this->_impl->clone();
722 return out;
723 }
724
745 Tensor to(const int &device) const{
746 Tensor out;
747 out._impl = this->_impl->to(device);
748 return out;
749 }
750
768 void to_(const int &device){
769 this->_impl->to_(device);
770 }
771
772 const bool& is_contiguous() const{
773 return this->_impl->is_contiguous();
774 }
775
776 Tensor permute_(const std::vector<cytnx_uint64> &rnks){
777 this->_impl->permute_(rnks);
778 return *this;
779 }
781 template<class ... Ts>
782 Tensor permute_(const cytnx_uint64 &e1, const Ts&... elems){
783 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1,elems...);
784 this->_impl->permute_(argv);
785 return *this;
786 }
788
804 Tensor permute(const std::vector<cytnx_uint64> &rnks) const{
805 Tensor out;
806 out._impl = this->_impl->permute(rnks);
807 return out;
808 }
810 template<class ... Ts>
811 Tensor permute(const cytnx_uint64 &e1, const Ts&... elems) const{
812 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1,elems...);
813 return this->permute(argv);
814 }
816
817
835 Tensor out;
836 out._impl = this->_impl->contiguous();
837 return out;
838 }
839
856 this->_impl->contiguous_();
857 return *this;
858 }
859
876 void reshape_(const std::vector<cytnx_int64> &new_shape){
877 this->_impl->reshape_(new_shape);
878 }
880 void reshape_(const std::vector<cytnx_uint64> &new_shape){
881 std::vector<cytnx_int64> shape(new_shape.begin(),new_shape.end());
882 this->_impl->reshape_(shape);
883 }
884 void reshape_(const std::initializer_list<cytnx_int64> &new_shape){
885 std::vector<cytnx_int64> shape = new_shape;
886 this->_impl->reshape_(shape);
887 }
888 template<class ...Ts>
889 void reshape_(const cytnx_int64 &e1, const Ts...elems){
890 std::vector<cytnx_int64> shape = dynamic_arg_int64_resolver(e1,elems...);
891 //std::cout << shape << std::endl;
892 this->_impl->reshape_(shape);
893 }
895
913 Tensor reshape(const std::vector<cytnx_int64> &new_shape) const {
914 Tensor out;
915 out._impl = this->_impl->reshape(new_shape);
916 return out;
917 }
919 template<class ... Ts>
920 Tensor reshape(const cytnx_int64 &e1, const Ts&...elems) const{
921 std::vector<cytnx_int64> argv = dynamic_arg_int64_resolver(e1,elems...);
922 return this->reshape(argv);
923 }
925
944 Tensor astype(const int &new_type) const{
945 Tensor out;
946 out._impl = this->_impl->astype(new_type);
947 return out;
948 }
949
950
951
952 //Tensor diagonal(){
953 // for(unsigned int i=0;i<this->shape().size();i++){
954 // if(this->shape()[i] != this->shape()[0],"[ERROR] Tensor.diagonal() can only be called when the subject has equal dimension in each rank.%s","\n");
955 // }
956 //
957 //}
958
975 template<class T>
976 T& at(const std::vector<cytnx_uint64> &locator){
977 return this->_impl->at<T>(locator);
978 }
979 template<class T>
980 const T& at(const std::vector<cytnx_uint64> &locator) const{
981 return this->_impl->at<T>(locator);
982 }
984 template<class T, class...Ts>
985 const T& at(const cytnx_uint64 &e1, const Ts&...elems) const{
986 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1,elems...);
987 return this->at<T>(argv);
988 }
989 template<class T, class...Ts>
990 T& at(const cytnx_uint64 &e1, const Ts&...elems){
991 std::vector<cytnx_uint64> argv = dynamic_arg_uint64_resolver(e1,elems...);
992 return this->at<T>(argv);
993 }
995
1015 template<class T>
1017 cytnx_error_msg(this->_impl->storage().size()!=1,"[ERROR][Tensor.item<T>]%s","item can only be called from a Tensor with only one element\n");
1018 return this->_impl->storage().at<T>(0);
1019 }
1020
1022 template<class T>
1023 const T& item() const{
1024 cytnx_error_msg(this->_impl->storage().size()!=1,"[ERROR][Tensor.item<T>]%s","item can only be called from a Tensor with only one element\n");
1025 return this->_impl->storage().at<T>(0);
1026 }
1027
1028 const Scalar::Sproxy item() const{
1029 Scalar::Sproxy out(this->storage()._impl,0);
1030 return out;
1031 }
1032
1033 Scalar::Sproxy item(){
1034 Scalar::Sproxy out(this->storage()._impl,0);
1035 return out;
1036 }
1037
1039
1063 Tensor get(const std::vector<cytnx::Accessor> &accessors)const {
1064 Tensor out;
1065 out._impl = this->_impl->get(accessors);
1066 return out;
1067 }
1068
1069 /*
1070 Tensor get_v2(const std::vector<cytnx::Accessor> &accessors) const{
1071 Tensor out;
1072 out._impl = this->_impl->get_v2(accessors);
1073 return out;
1074 }
1075 */
1076
1096 void set(const std::vector<cytnx::Accessor> &accessors, const Tensor &rhs){
1097 this->_impl->set(accessors,rhs._impl);
1098 }
1099
1100
1101
1119 template<class T>
1120 void set(const std::vector<cytnx::Accessor> &accessors, const T &rc){
1121 this->_impl->set(accessors,rc);
1122 }
1124 template<class T>
1125 void set(const std::initializer_list<cytnx::Accessor> &accessors, const T &rc){
1126 std::vector<cytnx::Accessor> args = accessors;
1127 this->set(args,rc);
1128 }
1130
1140 return this->_impl->storage();
1141 }
1142
1158 template<class T>
1159 void fill(const T& val){
1160 this->_impl->fill(val);
1161 }
1162
1163
1164 bool equiv(const Tensor &rhs){
1165 if(this->shape() != rhs.shape()) return false;
1166 return true;
1167 }
1168
1169 Tensor real();
1170 Tensor imag();
1171
1172
1173
1174 // Arithmic:
1175 template<class T>
1177 template<class T>
1179 template<class T>
1181 template<class T>
1183
1184
1185 //Tensor &operator+=(const Tproxy &rc);
1186 //Tensor &operator-=(const Tproxy &rc);
1187 //Tensor &operator*=(const Tproxy &rc);
1188 //Tensor &operator/=(const Tproxy &rc);
1189 /*
1190 Tensor operator+(const Tproxy &rc){
1191 return *this + Tensor(rc);
1192 }
1193 Tensor operator-(const Tproxy &rc){
1194 return *this - Tensor(rc);
1195 }
1196 Tensor operator*(const Tproxy &rc){
1197 return *this * Tensor(rc);
1198 }
1199 Tensor operator/(const Tproxy &rc){
1200 return *this / Tensor(rc);
1201 }
1202 */
1203
1204 template<class T>
1205 Tensor Add(const T &rhs){
1206 return *this + rhs;
1207 }
1208 template<class T>
1209 Tensor& Add_(const T &rhs){
1210 return *this += rhs;
1211 }
1212
1213 template<class T>
1214 Tensor Sub(const T &rhs){
1215 return *this - rhs;
1216 }
1217 template<class T>
1218 Tensor& Sub_(const T &rhs){
1219 return *this -= rhs;
1220 }
1221
1222 template<class T>
1223 Tensor Mul(const T &rhs){
1224 return *this * rhs;
1225 }
1226 template<class T>
1227 Tensor& Mul_(const T &rhs){
1228 return *this *= rhs;
1229 }
1230
1231 template<class T>
1232 Tensor Div(const T &rhs){
1233 return *this / rhs;
1234 }
1235 template<class T>
1236 Tensor& Div_(const T &rhs){
1237 return *this /= rhs;
1238 }
1239
1240 template<class T>
1241 Tensor Cpr(const T &rhs){
1242 return *this == rhs;
1243 }
1244
1245 //template<class T>
1246 //Tensor& Cpr_(const T &rhs){
1247 //
1248 // return *this == rhs;
1249 //}
1250
1251 template<class T>
1252 Tensor Mod(const T &rhs){
1253 return *this % rhs;
1254 }
1255
1257 return this->Mul(-1.);
1258 }
1259
1261 Tensor out = this->clone();
1262 out.contiguous_();
1263 out.reshape_({-1});
1264 return out;
1265 }
1266
1267 void flatten_(){
1268 this->contiguous_();
1269 this->reshape_({-1});
1270
1271 }
1272
1273
1274 void append(const Tensor &rhs){
1275 //Tensor in;
1276 if(!this->is_contiguous())
1277 this->contiguous_();
1278
1279 // check Tensor in shape:
1280 cytnx_error_msg(rhs.shape().size()==0 || this->shape().size()==0,"[ERROR] try to append a null Tensor.%s","\n");
1281 cytnx_error_msg(rhs.shape().size()!=(this->shape().size()-1),"[ERROR] try to append a Tensor with rank not match.%s","\n");
1282 cytnx_uint64 Nelem = 1;
1283 for(unsigned int i=0;i<rhs.shape().size();i++){
1284 cytnx_error_msg(rhs.shape()[i]!=this->shape()[i+1],"[ERROR] dimension mismatch @ rhs.rank: [%d] this: [%d] rhs: [%d]\n",i,this->shape()[i+1],rhs.shape()[i]);
1285 Nelem*=rhs.shape()[i];
1286 }
1287
1288 //check type:
1289 Tensor in;
1290 if(rhs.dtype() != this->dtype()){
1291 in = rhs.astype(this->dtype());
1292 if(!in.is_contiguous())
1293 in.contiguous_();
1294 }else{
1295 if(!in.is_contiguous())
1296 in = rhs.contiguous();
1297 else
1298 in = rhs;
1299 }
1300 this->_impl->_shape[0]+=1;
1301 cytnx_uint64 oldsize = this->_impl->_storage.size();
1302 this->_impl->_storage.resize(oldsize+Nelem);
1303 memcpy(((char*)this->_impl->_storage.data()) + oldsize*Type.typeSize(this->dtype())/sizeof(char),
1304 in._impl->_storage.data(),
1305 Type.typeSize(in.dtype())*Nelem);
1306
1307 }
1308 void append(const Storage &srhs){
1309 if(!this->is_contiguous())
1310 this->contiguous_();
1311
1312 // check Tensor in shape:
1313 cytnx_error_msg(srhs.size()==0 || this->shape().size()==0,"[ERROR] try to append a null Tensor.%s","\n");
1314 cytnx_error_msg((this->shape().size()-1)!=1,"[ERROR] append a storage to Tensor can only accept rank-2 Tensor.%s","\n");
1315 cytnx_error_msg(this->shape().back()!=srhs.size(),"[ERROR] Tensor dmension mismatch!%s","\n");
1316
1317
1318 //check type:
1319 Storage in;
1320 if(srhs.dtype() != this->dtype()){
1321 in = srhs.astype(this->dtype());
1322 }else{
1323 in = srhs;
1324 }
1325 this->_impl->_shape[0]+=1;
1326 cytnx_uint64 oldsize = this->_impl->_storage.size();
1327 this->_impl->_storage.resize(oldsize+in.size());
1328 memcpy(((char*)this->_impl->_storage.data()) + oldsize*Type.typeSize(this->dtype())/sizeof(char),
1329 in._impl->Mem,
1330 Type.typeSize(in.dtype())*in.size());
1331
1332 }
1333 /*
1334 void append(const Tensor &rhs){
1335 // convert to the same type.
1336 Tensor in;
1337 if(rhs.dtype() != this->dtype()){
1338 in = rhs.astype(this->dtype());
1339 }else{
1340 in = rhs;
1341 }
1342
1343 // 1) check rank
1344 if(this->shape().size()==1){
1345 // check if rhs is a scalar tensor (only one element)
1346 cytnx_error_msg(!(rhs.shape().size()==1 && rhs.shape()[0]==1),"[ERROR] trying to append a scalar into multidimentional Tensor is not allow.\n Only rank-1 Tensor can accept scalar append.%s","\n");
1347 this->_impl->_shape[0]+=1;
1348 this->_impl->_storage.append(0);
1349
1350 }else{
1351 cytnx_error_msg(rhs.shape().size() != this->shape().size()-1,"[ERROR] try to append a Tensor with rank not match.%s","\n");
1352
1353 }
1354 cytnx_error_msg(!this->is_contiguous(),"[ERROR] append require the Tensor to be contiguous. suggestion: call contiguous() or contiguous_() first.","\n");
1355 }
1356 */
1357 template<class T>
1358 void append(const T &rhs){
1359 cytnx_error_msg(this->shape().size()!=1,"[ERROR] trying to append a scalar into multidimentional Tensor is not allow.\n Only rank-1 Tensor can accept scalar append.%s","\n");
1360 cytnx_error_msg(!this->is_contiguous(),"[ERROR] append require the Tensor to be contiguous. suggestion: call contiguous() or contiguous_() first.","\n");
1361 this->_impl->_shape[0]+=1;
1362 this->_impl->_storage.append(rhs);
1363 }
1364
1365 bool same_data(const Tensor &rhs) const;
1366
1367 // linalg:
1368 std::vector<Tensor> Svd(const bool &is_U=true, const bool &is_vT=true) const;
1369 std::vector<Tensor> Eigh(const bool &is_V=true,const bool &row_v=false) const;
1370 Tensor& InvM_();
1371 Tensor InvM() const;
1372 Tensor& Inv_(const double &clip);
1373 Tensor Inv(const double &clip) const;
1374
1375 Tensor& Conj_();
1376 Tensor Conj() const;
1377 Tensor& Exp_();
1378 Tensor Exp() const;
1379 Tensor Norm() const;
1380 Tensor Pow(const cytnx_double &p) const;
1381 Tensor& Pow_(const cytnx_double &p);
1382 Tensor Trace(const cytnx_uint64 &a, const cytnx_uint64 &b) const;
1383 Tensor Abs() const;
1384 Tensor& Abs_();
1385 Tensor Max() const;
1386 Tensor Min() const;
1387
1388 };// class Tensor
1389
1390 Tensor operator+(const Tensor &lhs, const Tensor::Tproxy &rhs);
1391 Tensor operator-(const Tensor &lhs, const Tensor::Tproxy &rhs);
1392 Tensor operator*(const Tensor &lhs, const Tensor::Tproxy &rhs);
1393 Tensor operator/(const Tensor &lhs, const Tensor::Tproxy &rhs);
1394
1395 Tensor operator+(const Tensor &lhs, const Scalar::Sproxy &rhs);
1396 Tensor operator-(const Tensor &lhs, const Scalar::Sproxy &rhs);
1397 Tensor operator*(const Tensor &lhs, const Scalar::Sproxy &rhs);
1398 Tensor operator/(const Tensor &lhs, const Scalar::Sproxy &rhs);
1399
1400 std::ostream& operator<<(std::ostream& os, const Tensor &in);
1401 std::ostream& operator<<(std::ostream& os, const Tensor::Tproxy &in);
1402 //{ os << Tensor(in);};
1403}
1404
1405#endif
object that mimic the python slice to access elements in C++ [this is for c++ API only].
Definition Accessor.hpp:16
an memeory storage with multi-type/multi-device support
Definition Storage.hpp:918
Storage astype(const unsigned int &new_type) const
cast the type of current Storage
Definition Storage.hpp:1036
const unsigned long long & size() const
the size ( no. of elements ) in the Storage
Definition Storage.hpp:1185
an tensor (multi-dimensional array)
Definition Tensor.hpp:289
void append(const Storage &srhs)
Definition Tensor.hpp:1308
Tensor & operator*=(const T &rc)
Tensor & Inv_(const double &clip)
Definition Tensor.cpp:1165
Tensor & operator/=(const T &rc)
Tensor operator-()
Definition Tensor.hpp:1256
void fill(const T &val)
fill all the element of current Tensor with the value.
Definition Tensor.hpp:1159
Tensor InvM() const
Definition Tensor.cpp:1162
bool same_data(const Tensor &rhs) const
Definition Tensor.cpp:1221
void to_(const int &device)
move the current Tensor to the device.
Definition Tensor.hpp:768
void append(const T &rhs)
Definition Tensor.hpp:1358
Tensor & operator-=(const T &rc)
Tensor & Add_(const T &rhs)
Definition Tensor.hpp:1209
Tensor Abs() const
Definition Tensor.cpp:1206
std::string device_str() const
the device (in string) of the Tensor
Definition Tensor.hpp:682
void reshape_(const std::vector< cytnx_int64 > &new_shape)
reshape the Tensor, inplacely
Definition Tensor.hpp:876
Tensor contiguous_()
Make the Tensor contiguous by coalescing the memory (storage), inplacely.
Definition Tensor.hpp:855
static Tensor Load(const std::string &fname)
Load current Tensor to file.
Definition Tensor.cpp:841
Tensor permute_(const std::vector< cytnx_uint64 > &rnks)
Definition Tensor.hpp:776
Tensor Mul(const T &rhs)
Definition Tensor.hpp:1223
unsigned int dtype() const
the dtype-id of the Tensor
Definition Tensor.hpp:661
Tensor Sub(const T &rhs)
Definition Tensor.hpp:1214
Tensor Inv(const double &clip) const
Definition Tensor.cpp:1169
Tensor contiguous() const
Make the Tensor contiguous by coalescing the memory (storage).
Definition Tensor.hpp:834
void Tofile(const std::string &fname) const
Definition Tensor.cpp:766
T & at(const std::vector< cytnx_uint64 > &locator)
[C++ only] get an element at specific location.
Definition Tensor.hpp:976
Tensor reshape(const std::vector< cytnx_int64 > &new_shape) const
return a new Tensor that is reshaped.
Definition Tensor.hpp:913
T & item()
get an from a rank-0 Tensor
Definition Tensor.hpp:1016
Tensor clone() const
return a clone of the current Tensor.
Definition Tensor.hpp:719
std::vector< Tensor > Eigh(const bool &is_V=true, const bool &row_v=false) const
Definition Tensor.cpp:1153
void append(const Tensor &rhs)
Definition Tensor.hpp:1274
void set(const std::vector< cytnx::Accessor > &accessors, const Tensor &rhs)
set elements with the input Tensor using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1096
Tensor Norm() const
Definition Tensor.cpp:1189
Tensor astype(const int &new_type) const
return a new Tensor that cast to different dtype.
Definition Tensor.hpp:944
Tensor & Div_(const T &rhs)
Definition Tensor.hpp:1236
Tensor & operator+=(const T &rc)
Tensor Conj() const
Definition Tensor.cpp:1178
Tensor & Pow_(const cytnx_double &p)
Definition Tensor.cpp:1197
std::string dtype_str() const
the dtype (in string) of the Tensor
Definition Tensor.hpp:675
Tensor & Mul_(const T &rhs)
Definition Tensor.hpp:1227
cytnx_uint64 rank() const
the rank of the Tensor
Definition Tensor.hpp:697
Tensor Trace(const cytnx_uint64 &a, const cytnx_uint64 &b) const
Definition Tensor.cpp:1216
const bool & is_contiguous() const
Definition Tensor.hpp:772
Tensor Exp() const
Definition Tensor.cpp:1186
Tensor & Abs_()
Definition Tensor.cpp:1202
Tensor Add(const T &rhs)
Definition Tensor.hpp:1205
void flatten_()
Definition Tensor.hpp:1267
void Save(const std::string &fname) const
Save current Tensor to file.
Definition Tensor.cpp:792
Tensor flatten() const
Definition Tensor.hpp:1260
Tensor & Conj_()
Definition Tensor.cpp:1174
Tensor Pow(const cytnx_double &p) const
Definition Tensor.cpp:1193
int device() const
the device-id of the Tensor
Definition Tensor.hpp:668
Tensor real()
Definition Tensor.cpp:892
Tensor imag()
Definition Tensor.cpp:899
Tensor to(const int &device) const
copy a tensor to new device
Definition Tensor.hpp:745
void Tofile(std::fstream &f) const
Tensor get(const std::vector< cytnx::Accessor > &accessors) const
get elements using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1063
void set(const std::vector< cytnx::Accessor > &accessors, const T &rc)
set elements with the input constant using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1120
Tensor Max() const
Definition Tensor.cpp:1209
Tensor permute(const std::vector< cytnx_uint64 > &rnks) const
perform tensor permute on the cytnx::Tensor and return a new instance.
Definition Tensor.hpp:804
Tensor Div(const T &rhs)
Definition Tensor.hpp:1232
Tensor Mod(const T &rhs)
Definition Tensor.hpp:1252
bool equiv(const Tensor &rhs)
Definition Tensor.hpp:1164
Tensor Cpr(const T &rhs)
Definition Tensor.hpp:1241
Tensor & Exp_()
Definition Tensor.cpp:1182
Tensor & InvM_()
Definition Tensor.cpp:1158
std::vector< Tensor > Svd(const bool &is_U=true, const bool &is_vT=true) const
Definition Tensor.cpp:1150
const std::vector< cytnx_uint64 > & shape() const
the shape of the Tensor
Definition Tensor.hpp:689
Tensor Min() const
Definition Tensor.cpp:1212
const T & at(const std::vector< cytnx_uint64 > &locator) const
Definition Tensor.hpp:980
Storage & storage() const
return the storage of current Tensor.
Definition Tensor.hpp:1139
static Tensor from_storage(const Storage &in)
Definition Tensor.hpp:647
static Tensor Fromfile(const std::string &fname, const unsigned int &dtype, const cytnx_int64 &count=-1)
Definition Tensor.cpp:835
void Init(const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1)
initialize a Tensor
Definition Tensor.hpp:637
Tensor(const std::vector< cytnx_uint64 > &shape, const unsigned int &dtype=Type.Double, const int &device=-1)
Definition Tensor.hpp:642
Tensor & Sub_(const T &rhs)
Definition Tensor.hpp:1218
#define cytnx_error_msg(is_true, format,...)
Definition cytnx_error.hpp:18
Definition Accessor.hpp:12
Device_class Device
Definition Device.cpp:105
cytnx::UniTensor operator*(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
double cytnx_double
Definition Type.hpp:20
uint32_t cytnx_uint32
Definition Type.hpp:23
bool cytnx_bool
Definition Type.hpp:31
std::complex< double > cytnx_complex128
Definition Type.hpp:30
float cytnx_float
Definition Type.hpp:21
std::ostream & operator<<(std::ostream &os, const Scalar &in)
Definition Scalar.cpp:14
int16_t cytnx_int16
Definition Type.hpp:27
std::complex< float > cytnx_complex64
Definition Type.hpp:29
cytnx::UniTensor operator-(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
int32_t cytnx_int32
Definition Type.hpp:26
uint16_t cytnx_uint16
Definition Type.hpp:24
uint64_t cytnx_uint64
Definition Type.hpp:22
int64_t cytnx_int64
Definition Type.hpp:25
Storage_init_interface __SII
Definition Storage.cpp:13
Type_class Type
Definition Type.cpp:137
cytnx::UniTensor operator+(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)
cytnx::UniTensor operator/(const cytnx::UniTensor &Lt, const cytnx::UniTensor &Rt)