Cytnx v1.0.0
Loading...
Searching...
No Matches
UniTensor.hpp
Go to the documentation of this file.
1#ifndef CYTNX_UNITENSOR_H_
2#define CYTNX_UNITENSOR_H_
3
4#include "Type.hpp"
5#include "cytnx_error.hpp"
6#include "Device.hpp"
7#include "Tensor.hpp"
8#include "utils/utils.hpp"
10#include <iostream>
11#include <vector>
12#include <map>
13#include <utility>
14#include <initializer_list>
15#include <fstream>
16#include <algorithm>
17#include "Symmetry.hpp"
18#include "Bond.hpp"
19#include "Generator.hpp"
20#include <random>
21
22#ifdef BACKEND_TORCH
23#else
24 #include "backend/Scalar.hpp"
25
26// namespace cytnx{
27namespace cytnx {
28 namespace random {
29 extern std::random_device __static_random_device;
30 }
31
32 using namespace cytnx;
34 class UniTensorType_class {
35 public:
36 enum : int { Void = -99, Dense = 0, Sparse = 1, Block = 2, BlockFermionic = 3 };
37 std::string getname(const int &ut_type) const;
38 };
40
57 extern UniTensorType_class UTenType;
58
60 // class DenseUniTensor;
61 // class SparseUniTensor;
62 class UniTensor_base : public intrusive_ptr_base<UniTensor_base> {
63 public:
64 int uten_type_id; // the unitensor type id.
65 bool _is_braket_form;
66 bool _is_tag;
67 bool _is_diag;
68 cytnx_int64 _rowrank;
69 std::string _name;
70 std::vector<std::string> _labels;
71 std::vector<Bond> _bonds;
72
73 bool _update_braket() {
74 if (_bonds.size() == 0) return false;
75
76 if (this->_bonds[0].type() != bondType::BD_REG) {
77 // check:
78 for (unsigned int i = 0; i < this->_bonds.size(); i++) {
79 if (i < this->_rowrank) {
80 if (this->_bonds[i].type() != bondType::BD_KET) return false;
81 } else {
82 if (this->_bonds[i].type() != bondType::BD_BRA) return false;
83 }
84 }
85 return true;
86 } else {
87 return false;
88 }
89 }
90
91 friend class UniTensor; // allow wrapper to access the private elems
92 friend class DenseUniTensor;
93 // friend class SparseUniTensor;
94 friend class BlockUniTensor;
95 friend class BlockFermionicUniTensor;
96
97 UniTensor_base()
98 : _is_tag(false),
99 _name(std::string("")),
100 _is_braket_form(false),
101 _rowrank(0),
102 _is_diag(false),
103 uten_type_id(UTenType.Void){};
104
105 // copy&assignment constr., use intrusive_ptr's !!
106 UniTensor_base(const UniTensor_base &rhs);
107 UniTensor_base &operator=(UniTensor_base &rhs);
108
109 cytnx_uint64 rowrank() const { return this->_rowrank; }
110 bool is_diag() const { return this->_is_diag; }
111 const bool &is_braket_form() const { return this->_is_braket_form; }
112 const bool &is_tag() const { return this->_is_tag; }
113 const std::vector<std::string> &labels() const { return this->_labels; }
120 cytnx_int64 get_index(std::string label) const {
121 std::vector<std::string> labels = this->_labels;
122 for (cytnx_uint64 i = 0; i < labels.size(); i++) {
123 if (labels[i] == label) return i;
124 }
125 return -1;
126 }
127 const std::vector<Bond> &bonds() const { return this->_bonds; }
128 std::vector<Bond> &bonds() { return this->_bonds; }
129
130 Bond &bond_(const cytnx_uint64 &idx) {
131 cytnx_error_msg(idx >= this->_bonds.size(), "[ERROR][bond] index %d out of bound, total %d\n",
132 idx, this->_bonds.size());
133 return this->_bonds[idx];
134 }
135
136 Bond &bond_(const std::string &label) {
137 auto res = std::find(this->_labels.begin(), this->_labels.end(), label);
138 cytnx_error_msg(res == this->_labels.end(), "[ERROR] label %s not exists.\n", label.c_str());
139 cytnx_uint64 idx = std::distance(this->_labels.begin(), res);
140
141 return this->bond_(idx);
142 }
143
144 const std::string &name() const { return this->_name; }
145 cytnx_uint64 rank() const { return this->_labels.size(); }
146 void set_name(const std::string &in) { this->_name = in; }
147
159 void set_label(const std::string &oldlabel, const std::string &new_label) {
160 cytnx_int64 idx;
161 auto res = std::find(this->_labels.begin(), this->_labels.end(), oldlabel);
162 cytnx_error_msg(res == this->_labels.end(), "[ERROR] label %s not exists.\n",
163 oldlabel.c_str());
164 idx = std::distance(this->_labels.begin(), res);
165
166 cytnx_error_msg(idx >= this->_labels.size(), "[ERROR] index exceed the rank of UniTensor%s",
167 "\n");
168 // check in:
169 bool is_dup = false;
170 for (cytnx_uint64 i = 0; i < this->_labels.size(); i++) {
171 if (i == idx) continue;
172 if (new_label == this->_labels[i]) {
173 is_dup = true;
174 break;
175 }
176 }
177 cytnx_error_msg(is_dup, "[ERROR] alreay has a label that is the same as the input label%s",
178 "\n");
179 this->_labels[idx] = new_label;
180 }
181 void set_label(const cytnx_int64 &inx, const std::string &new_label) {
182 cytnx_error_msg(inx < 0, "[ERROR] index is negative%s", "\n");
183 cytnx_error_msg(inx >= this->_labels.size(), "[ERROR] index exceed the rank of UniTensor%s",
184 "\n");
185 // check in:
186 bool is_dup = false;
187 for (cytnx_uint64 i = 0; i < this->_labels.size(); i++) {
188 if (i == inx) continue;
189 if (new_label == this->_labels[i]) {
190 is_dup = true;
191 break;
192 }
193 }
194 cytnx_error_msg(is_dup, "[ERROR] alreay has a label that is the same as the input label%s",
195 "\n");
196 this->_labels[inx] = new_label;
197 }
198
199 void set_labels(const std::vector<std::string> &new_labels);
200 void relabel_(const std::vector<std::string> &new_labels); // implemented
201 void relabels_(const std::vector<std::string> &new_labels); // implemented
202 void relabel_(const std::vector<std::string> &old_labels,
203 const std::vector<std::string> &new_labels); // implemented
204 void relabels_(const std::vector<std::string> &old_labels,
205 const std::vector<std::string> &new_labels); // implemented
206 void relabel_(const std::string &old_label, const std::string &new_label) {
207 this->set_label(old_label, new_label);
208 }
209 void relabel_(const cytnx_int64 &inx, const std::string &new_label) {
210 this->set_label(inx, new_label);
211 }
212
213 int uten_type() { return this->uten_type_id; }
214 std::string uten_type_str() const { return UTenType.getname(this->uten_type_id); }
215
217
218 // string labels!
219 virtual void Init(const std::vector<Bond> &bonds,
220 const std::vector<std::string> &in_labels = {},
221 const cytnx_int64 &rowrank = -1, const unsigned int &dtype = Type.Double,
222 const int &device = Device.cpu, const bool &is_diag = false,
223 const bool &no_alloc = false, const std::string &name = "");
224
225 virtual void Init_by_Tensor(const Tensor &in, const bool &is_diag = false,
226 const cytnx_int64 &rowrank = -1, const std::string &name = "");
227 virtual std::vector<cytnx_uint64> shape() const;
228 virtual std::vector<bool> signflip() const;
229 virtual std::vector<bool> &signflip_();
230 virtual bool is_blockform() const;
231 virtual bool is_contiguous() const;
232 virtual void to_(const int &device);
233 virtual boost::intrusive_ptr<UniTensor_base> to(const int &device);
234 virtual boost::intrusive_ptr<UniTensor_base> clone() const;
235 virtual unsigned int dtype() const;
236 virtual int device() const;
237 virtual std::string dtype_str() const;
238 virtual std::string device_str() const;
239 virtual void set_rowrank_(const cytnx_uint64 &new_rowrank);
240 virtual boost::intrusive_ptr<UniTensor_base> set_rowrank(const cytnx_uint64 &new_rowrank) const;
241
242 virtual boost::intrusive_ptr<UniTensor_base> permute(const std::vector<cytnx_int64> &mapper,
243 const cytnx_int64 &rowrank = -1);
244 virtual boost::intrusive_ptr<UniTensor_base> permute(const std::vector<std::string> &mapper,
245 const cytnx_int64 &rowrank = -1);
246 // virtual boost::intrusive_ptr<UniTensor_base> permute(const std::vector<cytnx_int64> &mapper,
247 // const cytnx_int64 &rowrank = -1);
248
249 virtual void permute_(const std::vector<cytnx_int64> &mapper, const cytnx_int64 &rowrank = -1);
250 virtual void permute_(const std::vector<std::string> &mapper, const cytnx_int64 &rowrank = -1);
251
252 virtual boost::intrusive_ptr<UniTensor_base> permute_nosignflip(
253 const std::vector<cytnx_int64> &mapper, const cytnx_int64 &rowrank = -1);
254 virtual boost::intrusive_ptr<UniTensor_base> permute_nosignflip(
255 const std::vector<std::string> &mapper, const cytnx_int64 &rowrank = -1);
256 virtual void permute_nosignflip_(const std::vector<cytnx_int64> &mapper,
257 const cytnx_int64 &rowrank = -1);
258 virtual void permute_nosignflip_(const std::vector<std::string> &mapper,
259 const cytnx_int64 &rowrank = -1);
260
261 // virtual void permute_(const std::vector<cytnx_int64> &mapper, const cytnx_int64 &rowrank =
262 // -1);
263 virtual boost::intrusive_ptr<UniTensor_base> contiguous_();
264 virtual boost::intrusive_ptr<UniTensor_base> contiguous();
265 virtual void print_diagram(const bool &bond_info = false) const;
266 virtual void print_blocks(const bool &full_info = true) const;
267 virtual void print_block(const cytnx_int64 &idx, const bool &full_info = true) const;
268
269 virtual boost::intrusive_ptr<UniTensor_base> astype(const unsigned int &dtype) const;
270
271 virtual cytnx_uint64 Nblocks() const { return 0; };
272 virtual Tensor get_block(const cytnx_uint64 &idx = 0) const; // return a copy of block
273 virtual Tensor get_block(const std::vector<cytnx_int64> &qnum,
274 const bool &force) const; // return a copy of block
275
276 virtual const Tensor &get_block_(const cytnx_uint64 &idx = 0)
277 const; // return a share view of block, this only work for non-symm tensor.
278 virtual const Tensor &get_block_(const std::vector<cytnx_int64> &qnum,
279 const bool &force) const; // return a copy of block
280 virtual Tensor &get_block_(const cytnx_uint64 &idx = 0); // return a share view of block, this
281 // only work for non-symm tensor.
282 virtual Tensor &get_block_(const std::vector<cytnx_int64> &qnum,
283 const bool &force); // return a copy of block
284 virtual bool same_data(const boost::intrusive_ptr<UniTensor_base> &rhs) const;
285
286 virtual std::vector<Tensor> get_blocks() const;
287 virtual const std::vector<Tensor> &get_blocks_(const bool &) const;
288 virtual std::vector<Tensor> &get_blocks_(const bool &);
289
290 virtual void put_block(const Tensor &in, const cytnx_uint64 &idx = 0);
291 virtual void put_block_(Tensor &in, const cytnx_uint64 &idx = 0);
292 virtual void put_block(const Tensor &in, const std::vector<cytnx_int64> &qnum,
293 const bool &force);
294 virtual void put_block_(Tensor &in, const std::vector<cytnx_int64> &qnum, const bool &force);
295
296 // this will only work on non-symm tensor (DenseUniTensor)
297 virtual boost::intrusive_ptr<UniTensor_base> get(const std::vector<Accessor> &accessors);
298
299 // this will only work on non-symm tensor (DenseUniTensor)
300 virtual void set(const std::vector<Accessor> &accessors, const Tensor &rhs);
301
302 virtual void reshape_(const std::vector<cytnx_int64> &new_shape,
303 const cytnx_uint64 &rowrank = 0);
304 virtual boost::intrusive_ptr<UniTensor_base> reshape(const std::vector<cytnx_int64> &new_shape,
305 const cytnx_uint64 &rowrank = 0);
306 virtual boost::intrusive_ptr<UniTensor_base> to_dense();
307 virtual void to_dense_();
308 virtual void combineBond(const std::vector<std::string> &indicators, const bool &force = false);
309 virtual void combineBonds(const std::vector<cytnx_int64> &indicators, const bool &force,
310 const bool &by_label);
311 virtual void combineBonds(const std::vector<std::string> &indicators,
312 const bool &force = false);
313 virtual void combineBonds(const std::vector<cytnx_int64> &indicators,
314 const bool &force = false);
315 virtual boost::intrusive_ptr<UniTensor_base> contract(
316 const boost::intrusive_ptr<UniTensor_base> &rhs, const bool &mv_elem_self = false,
317 const bool &mv_elem_rhs = false);
318 virtual std::vector<Bond> getTotalQnums(const bool &physical = false);
319 virtual std::vector<std::vector<cytnx_int64>> get_blocks_qnums() const;
320 virtual void Trace_(const std::string &a, const std::string &b);
321 virtual void Trace_(const cytnx_int64 &a, const cytnx_int64 &b);
322
323 virtual boost::intrusive_ptr<UniTensor_base> Trace(const std::string &a, const std::string &b);
324 virtual boost::intrusive_ptr<UniTensor_base> Trace(const cytnx_int64 &a, const cytnx_int64 &b);
325
326 virtual boost::intrusive_ptr<UniTensor_base> relabel(
327 const std::vector<std::string> &new_labels);
328 virtual boost::intrusive_ptr<UniTensor_base> relabels(
329 const std::vector<std::string> &new_labels);
330
331 virtual boost::intrusive_ptr<UniTensor_base> relabel(
332 const std::vector<std::string> &old_labels, const std::vector<std::string> &new_labels);
333 virtual boost::intrusive_ptr<UniTensor_base> relabels(
334 const std::vector<std::string> &old_labels, const std::vector<std::string> &new_labels);
335
336 virtual boost::intrusive_ptr<UniTensor_base> relabel(const std::string &old_label,
337 const std::string &new_label);
338
339 virtual boost::intrusive_ptr<UniTensor_base> relabel(const cytnx_int64 &inx,
340 const std::string &new_label);
341
342 virtual std::vector<Symmetry> syms() const;
343
344 // arithmetic
345 virtual void Add_(const boost::intrusive_ptr<UniTensor_base> &rhs);
346 virtual void Add_(const Scalar &rhs);
347
348 virtual void Mul_(const boost::intrusive_ptr<UniTensor_base> &rhs);
349 virtual void Mul_(const Scalar &rhs);
350
351 virtual void Sub_(const boost::intrusive_ptr<UniTensor_base> &rhs);
352 virtual void Sub_(const Scalar &rhs);
353 virtual void lSub_(const Scalar &lhs);
354
355 virtual void Div_(const boost::intrusive_ptr<UniTensor_base> &rhs);
356 virtual void Div_(const Scalar &rhs);
357 virtual void lDiv_(const Scalar &lhs);
358
359 virtual Tensor Norm() const;
360 virtual boost::intrusive_ptr<UniTensor_base> normalize();
361 virtual void normalize_();
362
363 virtual boost::intrusive_ptr<UniTensor_base> Conj();
364 virtual void Conj_();
365
366 virtual boost::intrusive_ptr<UniTensor_base> Transpose();
367 virtual void Transpose_();
368
369 virtual boost::intrusive_ptr<UniTensor_base> Dagger();
370 virtual void Dagger_();
371
372 virtual void tag();
373
374 virtual void truncate_(const std::string &label, const cytnx_uint64 &dim);
375 virtual void truncate_(const cytnx_int64 &bond_idx, const cytnx_uint64 &dim);
376
377 virtual bool elem_exists(const std::vector<cytnx_uint64> &locator) const;
378
379 // this a workaround, as virtual function cannot template.
380 virtual Scalar::Sproxy at_for_sparse(const std::vector<cytnx_uint64> &locator);
381 virtual const Scalar::Sproxy at_for_sparse(const std::vector<cytnx_uint64> &locator) const;
382
383 virtual cytnx_complex128 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
384 const cytnx_complex128 &aux);
385 virtual cytnx_complex64 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
386 const cytnx_complex64 &aux);
387 virtual cytnx_double &at_for_sparse(const std::vector<cytnx_uint64> &locator,
388 const cytnx_double &aux);
389 virtual cytnx_float &at_for_sparse(const std::vector<cytnx_uint64> &locator,
390 const cytnx_float &aux);
391 virtual cytnx_uint64 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
392 const cytnx_uint64 &aux);
393 virtual cytnx_int64 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
394 const cytnx_int64 &aux);
395 virtual cytnx_uint32 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
396 const cytnx_uint32 &aux);
397 virtual cytnx_int32 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
398 const cytnx_int32 &aux);
399 virtual cytnx_uint16 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
400 const cytnx_uint16 &aux);
401 virtual cytnx_int16 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
402 const cytnx_int16 &aux);
403
404 virtual const cytnx_complex128 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
405 const cytnx_complex128 &aux) const;
406 virtual const cytnx_complex64 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
407 const cytnx_complex64 &aux) const;
408 virtual const cytnx_double &at_for_sparse(const std::vector<cytnx_uint64> &locator,
409 const cytnx_double &aux) const;
410 virtual const cytnx_float &at_for_sparse(const std::vector<cytnx_uint64> &locator,
411 const cytnx_float &aux) const;
412 virtual const cytnx_uint64 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
413 const cytnx_uint64 &aux) const;
414 virtual const cytnx_int64 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
415 const cytnx_int64 &aux) const;
416 virtual const cytnx_uint32 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
417 const cytnx_uint32 &aux) const;
418 virtual const cytnx_int32 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
419 const cytnx_int32 &aux) const;
420 virtual const cytnx_uint16 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
421 const cytnx_uint16 &aux) const;
422 virtual const cytnx_int16 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
423 const cytnx_int16 &aux) const;
424
425 virtual void from_(const boost::intrusive_ptr<UniTensor_base> &rhs, const bool &force,
426 const cytnx_double &tol);
427 virtual void from_(const boost::intrusive_ptr<UniTensor_base> &rhs, const bool &force);
428
429 virtual void group_basis_();
430 virtual const std::vector<cytnx_uint64> &get_qindices(const cytnx_uint64 &bidx) const;
431 virtual std::vector<cytnx_uint64> &get_qindices(const cytnx_uint64 &bidx);
432 virtual const vec2d<cytnx_uint64> &get_itoi() const;
433 virtual vec2d<cytnx_uint64> &get_itoi();
434
435 virtual void _save_dispatch(std::fstream &f) const;
436 virtual void _load_dispatch(std::fstream &f);
437
438 virtual ~UniTensor_base(){};
439 };
441
442 //======================================================================
444 class DenseUniTensor : public UniTensor_base {
445 protected:
446 public:
447 Tensor _block;
448 std::vector<Tensor> _interface_block; // this is serves as interface for get_blocks_();
449 DenseUniTensor *clone_meta() const {
450 DenseUniTensor *tmp = new DenseUniTensor();
451 tmp->_bonds = vec_clone(this->_bonds);
452 tmp->_labels = this->_labels;
453 tmp->_is_braket_form = this->_is_braket_form;
454 tmp->_rowrank = this->_rowrank;
455 tmp->_is_diag = this->_is_diag;
456 tmp->_name = this->_name;
457 tmp->_is_tag = this->_is_tag;
458 return tmp;
459 }
460 //------------------------------------------
461
462 DenseUniTensor() { this->uten_type_id = UTenType.Dense; };
463 friend class UniTensor; // allow wrapper to access the private elems
464 // virtual functions
465
466 // void Init(const std::vector<Bond> &bonds, const std::vector<cytnx_int64> &in_labels = {},
467 // const cytnx_int64 &rowrank = -1, const unsigned int &dtype = Type.Double,
468 // const int &device = Device.cpu, const bool &is_diag = false,
469 // const bool &no_alloc = false);
470
471 void Init(const std::vector<Bond> &bonds, const std::vector<std::string> &in_labels = {},
472 const cytnx_int64 &rowrank = -1, const unsigned int &dtype = Type.Double,
473 const int &device = Device.cpu, const bool &is_diag = false,
474 const bool &no_alloc = false, const std::string &name = "");
475 // this only work for non-symm tensor
476 void Init_by_Tensor(const Tensor &in_tensor, const bool &is_diag = false,
477 const cytnx_int64 &rowrank = -1, const std::string &name = "");
478 std::vector<cytnx_uint64> shape() const {
479 if (this->_is_diag) {
480 std::vector<cytnx_uint64> shape = this->_block.shape();
481 shape.push_back(shape[0]);
482 return shape;
483 } else {
484 return this->_block.shape();
485 }
486 }
487 bool is_blockform() const { return false; }
488 void to_(const int &device) { this->_block.to_(device); }
489 boost::intrusive_ptr<UniTensor_base> to(const int &device) {
490 if (this->device() == device) {
491 std::vector<Tensor> _interface_block; // this is serves as interface for get_blocks_();
492 return this;
493 } else {
494 boost::intrusive_ptr<UniTensor_base> out = this->clone();
495 out->to_(device);
496 return out;
497 }
498 }
499 void set_rowrank_(const cytnx_uint64 &new_rowrank) {
500 cytnx_error_msg(new_rowrank > this->_labels.size(),
501 "[ERROR] rowrank cannot exceed the rank of UniTensor.%s", "\n");
502 if (this->is_diag()) {
503 cytnx_error_msg(new_rowrank != 1, "[ERROR] rowrank should be [==1] when is_diag =true!.%s",
504 "\n");
505 }
506
507 this->_rowrank = new_rowrank;
508 }
509
510 boost::intrusive_ptr<UniTensor_base> set_rowrank(const cytnx_uint64 &new_rowrank) const {
511 DenseUniTensor *out_raw = this->clone_meta();
512 out_raw->_block = this->_block;
513 out_raw->set_rowrank_(new_rowrank);
514 boost::intrusive_ptr<UniTensor_base> out(out_raw);
515 return out;
516 }
517
518 boost::intrusive_ptr<UniTensor_base> clone() const {
519 DenseUniTensor *tmp = this->clone_meta();
520 tmp->_block = this->_block.clone();
521 boost::intrusive_ptr<UniTensor_base> out(tmp);
522 return out;
523 };
524 bool is_contiguous() const { return this->_block.is_contiguous(); }
525 unsigned int dtype() const { return this->_block.dtype(); }
526 int device() const { return this->_block.device(); }
527 std::string dtype_str() const { return Type.getname(this->_block.dtype()); }
528 std::string device_str() const { return Device.getname(this->_block.device()); }
538 boost::intrusive_ptr<UniTensor_base> permute(const std::vector<cytnx_int64> &mapper,
539 const cytnx_int64 &rowrank = -1);
540 boost::intrusive_ptr<UniTensor_base> permute(const std::vector<std::string> &mapper,
541 const cytnx_int64 &rowrank = -1);
542
551 void permute_(const std::vector<cytnx_int64> &mapper, const cytnx_int64 &rowrank = -1);
552 void permute_(const std::vector<std::string> &mapper, const cytnx_int64 &rowrank = -1);
553
554 boost::intrusive_ptr<UniTensor_base> relabel(const std::vector<std::string> &new_labels);
555 boost::intrusive_ptr<UniTensor_base> relabels(const std::vector<std::string> &new_labels);
556
557 boost::intrusive_ptr<UniTensor_base> relabel(const std::vector<std::string> &old_labels,
558 const std::vector<std::string> &new_labels);
559 boost::intrusive_ptr<UniTensor_base> relabels(const std::vector<std::string> &old_labels,
560 const std::vector<std::string> &new_labels);
561
572 boost::intrusive_ptr<UniTensor_base> relabel(const std::string &old_label,
573 const std::string &new_label);
574 boost::intrusive_ptr<UniTensor_base> relabel(const cytnx_int64 &inx,
575 const std::string &new_label);
576
577 boost::intrusive_ptr<UniTensor_base> astype(const unsigned int &dtype) const {
578 DenseUniTensor *tmp = this->clone_meta();
579 tmp->_block = this->_block.astype(dtype);
580 boost::intrusive_ptr<UniTensor_base> out(tmp);
581 return tmp;
582 }
583
584 std::vector<Symmetry> syms() const {
585 cytnx_error_msg(true, "[ERROR][DenseUniTensor] dense unitensor does not have symmetry.%s",
586 "\n");
587 return std::vector<Symmetry>();
588 }
589
590 boost::intrusive_ptr<UniTensor_base> contiguous_() {
591 this->_block.contiguous_();
592 return boost::intrusive_ptr<UniTensor_base>(this);
593 }
594 boost::intrusive_ptr<UniTensor_base> contiguous() {
595 // if contiguous then return self!
596 if (this->is_contiguous()) {
597 boost::intrusive_ptr<UniTensor_base> out(this);
598 return out;
599 } else {
600 DenseUniTensor *tmp = this->clone_meta();
601 tmp->_block = this->_block.contiguous();
602 boost::intrusive_ptr<UniTensor_base> out(tmp);
603 return out;
604 }
605 }
606 void print_diagram(const bool &bond_info = false) const;
607 void print_blocks(const bool &full_info = true) const;
608 void print_block(const cytnx_int64 &idx, const bool &full_info = true) const;
609 Tensor get_block(const cytnx_uint64 &idx = 0) const { return this->_block.clone(); }
610
611 Tensor get_block(const std::vector<cytnx_int64> &qnum, const bool &force) const {
613 true, "[ERROR][DenseUniTensor] try to get_block() using qnum on a non-symmetry UniTensor%s",
614 "\n");
615 return Tensor();
616 }
617 // return a share view of block, this only work for non-symm tensor.
618 const Tensor &get_block_(const std::vector<cytnx_int64> &qnum, const bool &force) const {
620 true,
621 "[ERROR][DenseUniTensor] try to get_block_() using qnum on a non-symmetry UniTensor%s",
622 "\n");
623 return this->_block;
624 }
625 Tensor &get_block_(const std::vector<cytnx_int64> &qnum, const bool &force) {
627 true,
628 "[ERROR][DenseUniTensor] try to get_block_() using qnum on a non-symmetry UniTensor%s",
629 "\n");
630 return this->_block;
631 }
632
633 // return a share view of block, this only work for non-symm tensor.
634 Tensor &get_block_(const cytnx_uint64 &idx = 0) { return this->_block; }
635 // return a share view of block, this only work for non-symm tensor.
636 const Tensor &get_block_(const cytnx_uint64 &idx = 0) const { return this->_block; }
637
638 cytnx_uint64 Nblocks() const { return 1; };
639 std::vector<Tensor> get_blocks() const {
640 std::vector<Tensor> out;
642 true, "[ERROR][DenseUniTensor] cannot use get_blocks(), use get_block() instead!%s", "\n");
643 return out; // this will not share memory!!
644 }
645 const std::vector<Tensor> &get_blocks_(const bool &silent = false) const {
647 true, "[ERROR][DenseUniTensor] cannot use get_blocks_(), use get_block_() instead!%s",
648 "\n");
649 return this->_interface_block; // this will not share memory!!
650 }
651 std::vector<Tensor> &get_blocks_(const bool &silent = false) {
653 true, "[ERROR][DenseUniTensor] cannot use get_blocks_(), use get_block_() instead!%s",
654 "\n");
655 return this->_interface_block; // this will not share memory!!
656 }
657
658 void put_block(const Tensor &in, const cytnx_uint64 &idx = 0) {
659 // We don't check the dtype for DenseUniTensor, since it'll be more convinent to change
660 // DenseUniTensor's dtype
661
662 // cytnx_error_msg(in.dtype() != this->dtype(),
663 // "[ERROR][DenseUniTensor][put_block] The input tensor dtype does not
664 // match.%s",
665 // "\n");
666 cytnx_error_msg(in.device() != this->device(),
667 "[ERROR][DenseUniTensor][put_block] The input tensor device does not "
668 "match.%s",
669 "\n");
670 // We shouldn't check the contiguous
671 // cytnx_error_msg(!in.contiguous());
672 if (this->is_diag()) {
674 in.shape() != this->_block.shape(),
675 "[ERROR][DenseUniTensor] put_block, the input tensor shape does not match.%s", "\n");
676 this->_block = in.clone();
677 } else {
679 in.shape() != this->shape(),
680 "[ERROR][DenseUniTensor] put_block, the input tensor shape does not match.%s", "\n");
681 this->_block = in.clone();
682 }
683 }
684 // share view of the block
685 void put_block_(Tensor &in, const cytnx_uint64 &idx = 0) {
686 // We don't check the dtype for DenseUniTensor, since it'll be more convinent to change
687 // DenseUniTensor's dtype
688
689 // cytnx_error_msg(in.dtype() != this->dtype(),
690 // "[ERROR][DenseUniTensor][put_block] The input tensor dtype does not
691 // match.%s",
692 // "\n");
693 cytnx_error_msg(in.device() != this->device(),
694 "[ERROR][DenseUniTensor][put_block] The input tensor device does not "
695 "match.%s",
696 "\n");
697 // We shouldn't check the contiguous
698 // cytnx_error_msg(!in.contiguous());
699 if (this->is_diag()) {
701 in.shape() != this->_block.shape(),
702 "[ERROR][DenseUniTensor] put_block, the input tensor shape does not match.%s", "\n");
703 this->_block = in;
704 } else {
706 in.shape() != this->shape(),
707 "[ERROR][DenseUniTensor] put_block, the input tensor shape does not match.%s", "\n");
708 this->_block = in;
709 }
710 }
711
712 void put_block(const Tensor &in, const std::vector<cytnx_int64> &qnum, const bool &force) {
714 true, "[ERROR][DenseUniTensor] try to put_block using qnum on a non-symmetry UniTensor%s",
715 "\n");
716 }
717 void put_block_(Tensor &in, const std::vector<cytnx_int64> &qnum, const bool &force) {
719 true, "[ERROR][DenseUniTensor] try to put_block using qnum on a non-symmetry UniTensor%s",
720 "\n");
721 }
722 // this will only work on non-symm tensor (DenseUniTensor)
723 boost::intrusive_ptr<UniTensor_base> get(const std::vector<Accessor> &accessors) {
724 boost::intrusive_ptr<UniTensor_base> out(new DenseUniTensor());
725 out->Init_by_Tensor(this->_block.get(accessors), false, 0); // wrapping around.
726 return out;
727 }
728 // this will only work on non-symm tensor (DenseUniTensor)
729 void set(const std::vector<Accessor> &accessors, const Tensor &rhs) {
730 this->_block.set(accessors, rhs);
731 }
732
733 void reshape_(const std::vector<cytnx_int64> &new_shape, const cytnx_uint64 &rowrank = 0);
734 boost::intrusive_ptr<UniTensor_base> reshape(const std::vector<cytnx_int64> &new_shape,
735 const cytnx_uint64 &rowrank = 0);
736 boost::intrusive_ptr<UniTensor_base> to_dense();
737 void to_dense_();
738 void combineBond(const std::vector<std::string> &indicators, const bool &force = true);
739 void combineBonds(const std::vector<cytnx_int64> &indicators, const bool &force,
740 const bool &by_label);
741 void combineBonds(const std::vector<std::string> &indicators, const bool &force = true);
742 void combineBonds(const std::vector<cytnx_int64> &indicators, const bool &force = true);
743 boost::intrusive_ptr<UniTensor_base> contract(const boost::intrusive_ptr<UniTensor_base> &rhs,
744 const bool &mv_elem_self = false,
745 const bool &mv_elem_rhs = false);
746 std::vector<Bond> getTotalQnums(const bool &physical = false) {
747 cytnx_error_msg(true, "[ERROR][DenseUniTensor] %s",
748 "getTotalQnums can only operate on UniTensor with symmetry.\n");
749 return std::vector<Bond>();
750 }
751
752 std::vector<std::vector<cytnx_int64>> get_blocks_qnums() const {
753 cytnx_error_msg(true, "[ERROR][DenseUniTensor] %s",
754 "get_blocks_qnums can only operate on UniTensor with symmetry.\n");
755 return std::vector<std::vector<cytnx_int64>>();
756 }
757
758 bool same_data(const boost::intrusive_ptr<UniTensor_base> &rhs) const {
759 if (rhs->uten_type() != UTenType.Dense) return false;
760
761 return this->get_block_().same_data(rhs->get_block_());
762 }
763
764 ~DenseUniTensor(){};
765
766 // arithmetic
767 void Add_(const boost::intrusive_ptr<UniTensor_base> &rhs);
768 void Add_(const Scalar &rhs);
769
770 void Mul_(const boost::intrusive_ptr<UniTensor_base> &rhs);
771 void Mul_(const Scalar &rhs);
772
773 void Sub_(const boost::intrusive_ptr<UniTensor_base> &rhs);
774 void Sub_(const Scalar &rhs);
775 void lSub_(const Scalar &lhs);
776
777 void Div_(const boost::intrusive_ptr<UniTensor_base> &rhs);
778 void Div_(const Scalar &rhs);
779 void lDiv_(const Scalar &lhs);
780
781 void Conj_() { this->_block.Conj_(); };
782
783 boost::intrusive_ptr<UniTensor_base> Conj() {
784 boost::intrusive_ptr<UniTensor_base> out = this->clone();
785 out->Conj_();
786 return out;
787 }
788
789 boost::intrusive_ptr<UniTensor_base> Transpose() {
790 boost::intrusive_ptr<UniTensor_base> out = this->clone();
791 out->Transpose_();
792 return out;
793 }
794 void Transpose_();
795
796 boost::intrusive_ptr<UniTensor_base> normalize() {
797 boost::intrusive_ptr<UniTensor_base> out = this->clone();
798 out->normalize_();
799 return out;
800 }
801 void normalize_();
802
803 boost::intrusive_ptr<UniTensor_base> Dagger() {
804 boost::intrusive_ptr<UniTensor_base> out = this->Conj();
805 out->Transpose_();
806 return out;
807 }
808 void Dagger_() {
809 this->Conj_();
810 this->Transpose_();
811 }
821 void Trace_(const cytnx_int64 &a, const cytnx_int64 &b);
822 void Trace_(const std::string &a, const std::string &b);
823 boost::intrusive_ptr<UniTensor_base> Trace(const std::string &a, const std::string &b) {
824 boost::intrusive_ptr<UniTensor_base> out = this->clone();
825 out->Trace_(a, b);
826 return out;
827 }
828 boost::intrusive_ptr<UniTensor_base> Trace(const cytnx_int64 &a, const cytnx_int64 &b) {
829 boost::intrusive_ptr<UniTensor_base> out = this->clone();
830 out->Trace_(a, b);
831 return out;
832 }
833
834 Tensor Norm() const;
835
836 const Scalar::Sproxy at_for_sparse(const std::vector<cytnx_uint64> &locator) const {
838 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
839 "\n");
840 return Scalar::Sproxy();
841 }
842 const cytnx_complex128 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
843 const cytnx_complex128 &aux) const {
845 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
846 "\n");
847 return cytnx_complex128(0, 0);
848 }
849 const cytnx_complex64 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
850 const cytnx_complex64 &aux) const {
852 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
853 "\n");
854 return cytnx_complex64(0, 0);
855 }
856 const cytnx_double &at_for_sparse(const std::vector<cytnx_uint64> &locator,
857 const cytnx_double &aux) const {
859 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
860 "\n");
861 return 0;
862 }
863 const cytnx_float &at_for_sparse(const std::vector<cytnx_uint64> &locator,
864 const cytnx_float &aux) const {
866 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
867 "\n");
868 return 0;
869 }
870 const cytnx_uint64 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
871 const cytnx_uint64 &aux) const {
873 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
874 "\n");
875 return 0;
876 }
877 const cytnx_int64 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
878 const cytnx_int64 &aux) const {
880 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
881 "\n");
882 return 0;
883 }
884 const cytnx_uint32 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
885 const cytnx_uint32 &aux) const {
887 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
888 "\n");
889 return 0;
890 }
891 const cytnx_int32 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
892 const cytnx_int32 &aux) const {
894 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
895 "\n");
896 return 0;
897 }
898 const cytnx_uint16 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
899 const cytnx_uint16 &aux) const {
901 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
902 "\n");
903 return 0;
904 }
905 const cytnx_int16 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
906 const cytnx_int16 &aux) const {
908 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
909 "\n");
910 return 0;
911 }
912
913 Scalar::Sproxy at_for_sparse(const std::vector<cytnx_uint64> &locator) {
915 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
916 "\n");
917 return Scalar::Sproxy();
918 }
919 cytnx_complex128 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
920 const cytnx_complex128 &aux) {
922 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
923 "\n");
924 return *(cytnx_complex128 *)nullptr;
925 }
926 cytnx_complex64 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
927 const cytnx_complex64 &aux) {
929 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
930 "\n");
931 return *(cytnx_complex64 *)nullptr;
932 }
933 cytnx_double &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_double &aux) {
935 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
936 "\n");
937 return *(cytnx_double *)nullptr;
938 }
939 cytnx_float &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_float &aux) {
941 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
942 "\n");
943 return *(cytnx_float *)nullptr;
944 }
945 cytnx_uint64 &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_uint64 &aux) {
947 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
948 "\n");
949 return *(cytnx_uint64 *)nullptr;
950 }
951 cytnx_int64 &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_int64 &aux) {
953 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
954 "\n");
955 return *(cytnx_int64 *)nullptr;
956 }
957 cytnx_uint32 &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_uint32 &aux) {
959 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
960 "\n");
961 return *(cytnx_uint32 *)nullptr;
962 }
963 cytnx_int32 &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_int32 &aux) {
965 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
966 "\n");
967 return *(cytnx_int32 *)nullptr;
968 }
969 cytnx_uint16 &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_uint16 &aux) {
971 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
972 "\n");
973 return *(cytnx_uint16 *)nullptr;
974 }
975 cytnx_int16 &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_int16 &aux) {
977 true, "[ERROR][Internal] This shouldn't be called by DenseUniTensor, something wrong.%s",
978 "\n");
979 return *(cytnx_int16 *)nullptr;
980 }
981
982 bool elem_exists(const std::vector<cytnx_uint64> &locator) const {
984 true, "[ERROR][DenseUniTensor] elem_exists can only be used on UniTensor with Symmetry.%s",
985 "\n");
986 }
987 void tag() {
988 if (!this->is_tag()) {
989 for (int i = 0; i < this->_rowrank; i++) {
990 this->_bonds[i].set_type(BD_KET);
991 }
992 for (int i = this->_rowrank; i < this->_bonds.size(); i++) {
993 this->_bonds[i].set_type(BD_BRA);
994 }
995 this->_is_tag = true;
996 this->_is_braket_form = this->_update_braket();
997 }
998 }
1008 void truncate_(const cytnx_int64 &bond_idx, const cytnx_uint64 &dim);
1009 void truncate_(const std::string &label, const cytnx_uint64 &dim);
1010
1011 void from_(const boost::intrusive_ptr<UniTensor_base> &rhs, const bool &force);
1012
1013 void group_basis_() {
1014 cytnx_warning_msg(true, "[WARNING] group basis will not have any effect on DensUniTensor.%s",
1015 "\n");
1016 }
1017
1018 void _save_dispatch(std::fstream &f) const;
1019 void _load_dispatch(std::fstream &f);
1020
1021 const std::vector<cytnx_uint64> &get_qindices(const cytnx_uint64 &bidx) const {
1022 cytnx_error_msg(true, "[ERROR] get_qindices can only be unsed on UniTensor with Symmetry.%s",
1023 "\n");
1024 }
1025 std::vector<cytnx_uint64> &get_qindices(const cytnx_uint64 &bidx) {
1026 cytnx_error_msg(true, "[ERROR] get_qindices can only be unsed on UniTensor with Symmetry.%s",
1027 "\n");
1028 }
1029
1030 const vec2d<cytnx_uint64> &get_itoi() const {
1031 cytnx_error_msg(true, "[ERROR] get_itoi can only be unsed on UniTensor with Symmetry.%s",
1032 "\n");
1033 }
1034 vec2d<cytnx_uint64> &get_itoi() {
1035 cytnx_error_msg(true, "[ERROR] get_itoi can only be unsed on UniTensor with Symmetry.%s",
1036 "\n");
1037 }
1038
1039 // end virtual function
1040 };
1042
1043 //======================================================================
1045 class BlockUniTensor : public UniTensor_base {
1046 protected:
1047 public:
1048 std::vector<std::vector<cytnx_uint64>>
1049 _inner_to_outer_idx; // stores the qindices for each block
1050 std::vector<Tensor> _blocks;
1051 Tensor NullRefTensor; // this returns when access block does not exists!
1052
1053 // given an index list [loc], get qnums from this->_bonds[loc] and return the combined qnums
1054 // calculated from Symm object! this assume 1. symmetry are the same for each bond!
1055 // 2. total_qns are feeded with size len(symmetry)
1056 void _fx_get_total_fluxs(std::vector<cytnx_uint64> &loc, const std::vector<Symmetry> &syms,
1057 std::vector<cytnx_int64> &total_qns) {
1058 memset(&total_qns[0], 0, sizeof(cytnx_int64) * total_qns.size());
1059
1060 for (cytnx_int32 i = 0; i < syms.size(); i++) {
1061 if (this->_bonds[0].type() == BD_BRA)
1062 total_qns[i] = syms[0].reverse_rule(this->_bonds[0]._impl->_qnums[loc[0]][i]);
1063 else
1064 total_qns[i] = this->_bonds[0]._impl->_qnums[loc[0]][i];
1065
1066 for (auto j = 1; j < loc.size(); j++) {
1067 if (this->_bonds[j].type() == BD_BRA)
1068 total_qns[i] = syms[i].combine_rule(
1069 total_qns[i], syms[i].reverse_rule(this->_bonds[j]._impl->_qnums[loc[j]][i]));
1070 else {
1071 total_qns[i] =
1072 syms[i].combine_rule(total_qns[i], this->_bonds[j]._impl->_qnums[loc[j]][i]);
1073 }
1074 }
1075 }
1076 }
1077
1078 void _fx_locate_elem(cytnx_int64 &bidx, std::vector<cytnx_uint64> &loc_in_T,
1079 const std::vector<cytnx_uint64> &locator) const;
1080
1081 // internal function, grouping all duplicate qnums in all bonds
1082 void _fx_group_duplicates(const std::vector<cytnx_uint64> &dup_bond_idxs,
1083 const std::vector<std::vector<cytnx_uint64>> &idx_mappers);
1084
1085 void set_meta(BlockUniTensor *tmp, const bool &inner, const bool &outer) const {
1086 // outer meta
1087 if (outer) {
1088 tmp->_bonds = vec_clone(this->_bonds);
1089 tmp->_labels = this->_labels;
1090 tmp->_is_braket_form = this->_is_braket_form;
1091 tmp->_rowrank = this->_rowrank;
1092 tmp->_name = this->_name;
1093 }
1094
1095 tmp->_is_diag = this->_is_diag;
1096
1097 // inner meta
1098 if (inner) {
1099 tmp->_inner_to_outer_idx = this->_inner_to_outer_idx;
1100 }
1101 }
1102
1103 BlockUniTensor *clone_meta(const bool &inner, const bool &outer) const {
1104 BlockUniTensor *tmp = new BlockUniTensor();
1105 this->set_meta(tmp, inner, outer);
1106 return tmp;
1107 };
1108
1109 friend class UniTensor;
1110 BlockUniTensor() {
1111 this->uten_type_id = UTenType.Block;
1112 this->_is_tag = true;
1113 }
1114
1115 // virtual functions:
1116 // void Init(const std::vector<Bond> &bonds, const std::vector<cytnx_int64> &in_labels = {},
1117 // const cytnx_int64 &rowrank = -1, const unsigned int &dtype = Type.Double,
1118 // const int &device = Device.cpu, const bool &is_diag = false,
1119 // const bool &no_alloc = false);
1120
1121 void Init(const std::vector<Bond> &bonds, const std::vector<std::string> &in_labels = {},
1122 const cytnx_int64 &rowrank = -1, const unsigned int &dtype = Type.Double,
1123 const int &device = Device.cpu, const bool &is_diag = false,
1124 const bool &no_alloc = false, const std::string &name = "");
1125
1126 void Init_by_Tensor(const Tensor &in_tensor, const bool &is_diag = false,
1127 const cytnx_int64 &rowrank = -1, const std::string &name = "") {
1129 true, "[ERROR][BlockUniTensor] cannot use Init_by_tensor() on a BlockUniTensor.%s", "\n");
1130 }
1131
1132 std::vector<cytnx_uint64> shape() const {
1133 std::vector<cytnx_uint64> out(this->_bonds.size());
1134 for (cytnx_uint64 i = 0; i < out.size(); i++) {
1135 out[i] = this->_bonds[i].dim();
1136 }
1137 return out;
1138 }
1139
1140 bool is_blockform() const { return true; }
1141 bool is_contiguous() const {
1142 bool out = true;
1143 for (int i = 0; i < this->_blocks.size(); i++) {
1144 out &= this->_blocks[i].is_contiguous();
1145 }
1146 return out;
1147 };
1148
1149 cytnx_uint64 Nblocks() const { return this->_blocks.size(); };
1150
1151 void to_(const int &device) {
1152 for (cytnx_uint64 i = 0; i < this->_blocks.size(); i++) {
1153 this->_blocks[i].to_(device);
1154 }
1155 };
1156
1157 boost::intrusive_ptr<UniTensor_base> to(const int &device) {
1158 if (this->device() == device) {
1159 return this;
1160 } else {
1161 boost::intrusive_ptr<UniTensor_base> out = this->clone();
1162 out->to_(device);
1163 return out;
1164 }
1165 };
1166
1167 boost::intrusive_ptr<UniTensor_base> clone() const {
1168 BlockUniTensor *tmp = this->clone_meta(true, true);
1169 tmp->_blocks = vec_clone(this->_blocks);
1170 boost::intrusive_ptr<UniTensor_base> out(tmp);
1171 return out;
1172 };
1173
1174 unsigned int dtype() const {
1175 //[21 Aug 2024] This is a copy from BlockUniTensor;
1176 #ifdef UNI_DEBUG
1177 cytnx_error_msg(this->_blocks.size() == 0, "[ERROR][internal] empty blocks for blockform.%s",
1178 "\n");
1179 #endif
1180 return this->_blocks.size() < 1 ? Type.Void : this->_blocks[0].dtype();
1181 };
1182 int device() const {
1183 //[21 Aug 2024] This is a copy from BlockUniTensor;
1184 #ifdef UNI_DEBUG
1185 cytnx_error_msg(this->_blocks.size() == 0, "[ERROR][internal] empty blocks for blockform.%s",
1186 "\n");
1187 #endif
1188 return this->_blocks.size() < 1 ? -404 : this->_blocks[0].device();
1189 };
1190 std::string dtype_str() const {
1191 //[21 Aug 2024] This is a copy from BlockUniTensor;
1192 #ifdef UNI_DEBUG
1193 cytnx_error_msg(this->_blocks.size() == 0, "[ERROR][internal] empty blocks for blockform.%s",
1194 "\n");
1195 #endif
1196 return this->_blocks.size() < 1 ? "Void, no valid blocks" : this->_blocks[0].dtype_str();
1197 };
1198 std::string device_str() const {
1199 //[21 Aug 2024] This is a copy from BlockUniTensor;
1200 #ifdef UNI_DEBUG
1201 cytnx_error_msg(this->_blocks.size() == 0, "[ERROR][internal] empty blocks for blockform.%s",
1202 "\n");
1203 #endif
1204 return this->_blocks.size() < 1 ? "None, no valid blocks" : this->_blocks[0].device_str();
1205 };
1206
1207 Tensor get_block(const cytnx_uint64 &idx = 0) const {
1208 cytnx_error_msg(idx >= this->_blocks.size(), "[ERROR][BlockUniTensor] index out of range%s",
1209 "\n");
1210 return this->_blocks[idx].clone();
1211 };
1212
1213 // this one for Block will return the indicies!!
1214 Tensor get_block(const std::vector<cytnx_int64> &indices, const bool &force_return) const {
1215 cytnx_error_msg(indices.size() != this->rank(),
1216 "[ERROR][get_block][BlockUniTensor] len(indices) must be the same as the "
1217 "Tensor rank (number of legs).%s",
1218 "\n");
1219
1220 std::vector<cytnx_uint64> inds(indices.begin(), indices.end());
1221
1222 // find if the indices specify exists!
1223 cytnx_int64 b = -1;
1224 for (cytnx_uint64 i = 0; i < this->_inner_to_outer_idx.size(); i++) {
1225 if (inds == this->_inner_to_outer_idx[i]) {
1226 b = i;
1227 break;
1228 }
1229 }
1230
1231 if (b < 0) {
1232 if (force_return) {
1233 return NullRefTensor;
1234 } else {
1235 cytnx_error_msg(true,
1236 "[ERROR][get_block][BlockUniTensor] no avaliable block exists, "
1237 "force_return=false, so "
1238 "error throws. \n If you want to return an empty block without "
1239 "error when block is "
1240 "not avaliable, set force_return=True.%s",
1241 "\n");
1242 }
1243 } else {
1244 return this->_blocks[b].clone();
1245 }
1246 }
1247
1248 const Tensor &get_block_(const cytnx_uint64 &idx = 0) const {
1249 cytnx_error_msg(idx >= this->_blocks.size(), "[ERROR][BlockUniTensor] index out of range%s",
1250 "\n");
1251 return this->_blocks[idx];
1252 };
1253
1254 Tensor &get_block_(const cytnx_uint64 &idx = 0) {
1255 cytnx_error_msg(idx >= this->_blocks.size(), "[ERROR][BlockUniTensor] index out of range%s",
1256 "\n");
1257 return this->_blocks[idx];
1258 };
1259
1260 const Tensor &get_block_(const std::vector<cytnx_int64> &indices,
1261 const bool &force_return) const {
1262 cytnx_error_msg(indices.size() != this->rank(),
1263 "[ERROR][get_block][BlockUniTensor] len(indices) must be the same as the "
1264 "Tensor rank (number of legs).%s",
1265 "\n");
1266
1267 std::vector<cytnx_uint64> inds(indices.begin(), indices.end());
1268
1269 // find if the indices specify exists!
1270 cytnx_int64 b = -1;
1271 for (cytnx_uint64 i = 0; i < this->_inner_to_outer_idx.size(); i++) {
1272 if (inds == this->_inner_to_outer_idx[i]) {
1273 b = i;
1274 break;
1275 }
1276 }
1277
1278 if (b < 0) {
1279 if (force_return) {
1280 return this->NullRefTensor;
1281 } else {
1282 cytnx_error_msg(true,
1283 "[ERROR][get_block][BlockUniTensor] no avaliable block exists, "
1284 "force_return=false, so "
1285 "error throws. \n If you want to return an empty block without "
1286 "error when block is "
1287 "not avaliable, set force_return=True.%s",
1288 "\n");
1289 }
1290 } else {
1291 return this->_blocks[b];
1292 }
1293 }
1294
1295 Tensor &get_block_(const std::vector<cytnx_int64> &indices, const bool &force_return) {
1296 cytnx_error_msg(indices.size() != this->rank(),
1297 "[ERROR][get_block][BlockUniTensor] len(indices) must be the same as the "
1298 "Tensor rank (number of legs).%s",
1299 "\n");
1300
1301 std::vector<cytnx_uint64> inds(indices.begin(), indices.end());
1302
1303 // find if the indices specify exists!
1304 cytnx_int64 b = -1;
1305 for (cytnx_uint64 i = 0; i < this->_inner_to_outer_idx.size(); i++) {
1306 if (inds == this->_inner_to_outer_idx[i]) {
1307 b = i;
1308 break;
1309 }
1310 }
1311
1312 if (b < 0) {
1313 if (force_return) {
1314 return this->NullRefTensor;
1315 } else {
1316 cytnx_error_msg(true,
1317 "[ERROR][get_block][BlockUniTensor] no avaliable block exists, "
1318 "force_return=false, so "
1319 "error throws. \n If you want to return an empty block without "
1320 "error when block is "
1321 "not avaliable, set force_return=True.%s",
1322 "\n");
1323 }
1324 } else {
1325 return this->_blocks[b];
1326 }
1327 }
1328
1329 std::vector<Tensor> get_blocks() const { return vec_clone(this->_blocks); }
1330 const std::vector<Tensor> &get_blocks_(const bool &) const { return this->_blocks; }
1331 std::vector<Tensor> &get_blocks_(const bool &) { return this->_blocks; }
1332
1333 bool same_data(const boost::intrusive_ptr<UniTensor_base> &rhs) const {
1334 if (rhs->uten_type() != UTenType.Block) return false;
1335 if (rhs->get_blocks_(1).size() != this->get_blocks_(1).size()) return false;
1336
1337 for (int i = 0; i < rhs->get_blocks_(1).size(); i++)
1338 if (this->get_blocks_(1)[i].same_data(rhs->get_blocks_(1)[i]) == false) return false;
1339
1340 return true;
1341 }
1342
1343 void set_rowrank_(const cytnx_uint64 &new_rowrank) {
1344 cytnx_error_msg(new_rowrank > this->rank(),
1345 "[ERROR][BlockUniTensor] rowrank should be [>=0] and [<=UniTensor.rank].%s",
1346 "\n");
1347 if (this->is_diag()) {
1348 cytnx_error_msg(new_rowrank != 1,
1349 "[ERROR][BlockUniTensor] rowrank should be [==1] when is_diag =true!.%s",
1350 "\n");
1351 }
1352 this->_rowrank = new_rowrank;
1353 this->_is_braket_form = this->_update_braket();
1354 }
1355
1356 boost::intrusive_ptr<UniTensor_base> set_rowrank(const cytnx_uint64 &new_rowrank) const {
1357 BlockUniTensor *tmp = this->clone_meta(true, true);
1358 tmp->_blocks = this->_blocks;
1359 tmp->set_rowrank_(new_rowrank);
1360 boost::intrusive_ptr<UniTensor_base> out(tmp);
1361 return out;
1362 }
1363
1364 boost::intrusive_ptr<UniTensor_base> permute(const std::vector<cytnx_int64> &mapper,
1365 const cytnx_int64 &rowrank = -1);
1366 boost::intrusive_ptr<UniTensor_base> permute(const std::vector<std::string> &mapper,
1367 const cytnx_int64 &rowrank = -1);
1368
1369 void permute_(const std::vector<cytnx_int64> &mapper, const cytnx_int64 &rowrank = -1);
1370 void permute_(const std::vector<std::string> &mapper, const cytnx_int64 &rowrank = -1);
1371
1372 boost::intrusive_ptr<UniTensor_base> contiguous_() {
1373 for (unsigned int b = 0; b < this->_blocks.size(); b++) this->_blocks[b].contiguous_();
1374 return boost::intrusive_ptr<UniTensor_base>(this);
1375 }
1376
1377 boost::intrusive_ptr<UniTensor_base> contiguous();
1378
1379 void print_diagram(const bool &bond_info = false) const;
1380 void print_blocks(const bool &full_info = true) const;
1381 void print_block(const cytnx_int64 &idx, const bool &full_info = true) const;
1382
1383 boost::intrusive_ptr<UniTensor_base> contract(const boost::intrusive_ptr<UniTensor_base> &rhs,
1384 const bool &mv_elem_self = false,
1385 const bool &mv_elem_rhs = false);
1386
1387 boost::intrusive_ptr<UniTensor_base> relabel(const std::vector<std::string> &new_labels);
1388 boost::intrusive_ptr<UniTensor_base> relabels(const std::vector<std::string> &new_labels);
1389
1390 boost::intrusive_ptr<UniTensor_base> relabel(const std::vector<std::string> &old_labels,
1391 const std::vector<std::string> &new_labels);
1392 boost::intrusive_ptr<UniTensor_base> relabels(const std::vector<std::string> &old_labels,
1393 const std::vector<std::string> &new_labels);
1394
1395 boost::intrusive_ptr<UniTensor_base> relabel(const std::string &old_label,
1396 const std::string &new_label);
1397 boost::intrusive_ptr<UniTensor_base> relabel(const cytnx_int64 &inx,
1398 const std::string &new_label);
1399
1400 std::vector<Symmetry> syms() const;
1401
1402 void reshape_(const std::vector<cytnx_int64> &new_shape, const cytnx_uint64 &rowrank = 0) {
1403 cytnx_error_msg(true, "[ERROR] cannot reshape a UniTensor with symmetry.%s", "\n");
1404 }
1405 boost::intrusive_ptr<UniTensor_base> reshape(const std::vector<cytnx_int64> &new_shape,
1406 const cytnx_uint64 &rowrank = 0) {
1407 cytnx_error_msg(true, "[ERROR] cannot reshape a UniTensor with symmetry.%s", "\n");
1408 return nullptr;
1409 }
1410
1411 boost::intrusive_ptr<UniTensor_base> astype(const unsigned int &dtype) const {
1412 BlockUniTensor *tmp = this->clone_meta(true, true);
1413 tmp->_blocks.resize(this->_blocks.size());
1414 for (cytnx_int64 blk = 0; blk < this->_blocks.size(); blk++) {
1415 tmp->_blocks[blk] = this->_blocks[blk].astype(dtype);
1416 }
1417 boost::intrusive_ptr<UniTensor_base> out(tmp);
1418 return out;
1419 };
1420
1421 // this will only work on non-symm tensor (DenseUniTensor)
1422 boost::intrusive_ptr<UniTensor_base> get(const std::vector<Accessor> &accessors) {
1424 true,
1425 "[ERROR][BlockUniTensor][get] cannot use get on a UniTensor with "
1426 "Symmetry.\n suggestion: try get_block/get_block_/get_blocks/get_blocks_ first.%s",
1427 "\n");
1428 return nullptr;
1429 }
1430
1431 // this will only work on non-symm tensor (DenseUniTensor)
1432 void set(const std::vector<Accessor> &accessors, const Tensor &rhs) {
1434 true,
1435 "[ERROR][BlockUniTensor][get] cannot use get on a UniTensor with "
1436 "Symmetry.\n suggestion: try get_block/get_block_/get_blocks/get_blocks_ first.%s",
1437 "\n");
1438 }
1439
1440 void put_block(const Tensor &in, const cytnx_uint64 &idx = 0) {
1441 cytnx_error_msg(in.dtype() != this->dtype(),
1442 "[ERROR][DenseUniTensor][put_block] The input tensor dtype does not match.%s",
1443 "\n");
1444 cytnx_error_msg(in.device() != this->device(),
1445 "[ERROR][DenseUniTensor][put_block] The input tensor device does not "
1446 "match.%s",
1447 "\n");
1448 // We shouldn't check the contiguous
1449 // cytnx_error_msg(!in.contiguous());
1450 cytnx_error_msg(idx >= this->_blocks.size(), "[ERROR][BlockUniTensor] index out of range%s",
1451 "\n");
1452 cytnx_error_msg(in.shape() != this->_blocks[idx].shape(),
1453 "[ERROR][BlockUniTensor] the shape of input tensor does not match the shape "
1454 "of block @ idx=%d\n",
1455 idx);
1456
1457 this->_blocks[idx] = in.clone();
1458 }
1459 void put_block_(Tensor &in, const cytnx_uint64 &idx = 0) {
1460 cytnx_error_msg(in.dtype() != this->dtype(),
1461 "[ERROR][DenseUniTensor][put_block] The input tensor dtype does not match.%s",
1462 "\n");
1463 cytnx_error_msg(in.device() != this->device(),
1464 "[ERROR][DenseUniTensor][put_block] The input tensor device does not "
1465 "match.%s",
1466 "\n");
1467 // We shouldn't check the contiguous
1468 // cytnx_error_msg(!in.contiguous());
1469 cytnx_error_msg(idx >= this->_blocks.size(), "[ERROR][BlockUniTensor] index out of range%s",
1470 "\n");
1471 cytnx_error_msg(in.shape() != this->_blocks[idx].shape(),
1472 "[ERROR][BlockUniTensor] the shape of input tensor does not match the shape "
1473 "of block @ idx=%d\n",
1474 idx);
1475
1476 this->_blocks[idx] = in;
1477 }
1478 void put_block(const Tensor &in, const std::vector<cytnx_int64> &indices, const bool &check) {
1479 cytnx_error_msg(in.dtype() != this->dtype(),
1480 "[ERROR][DenseUniTensor][put_block] The input tensor dtype does not match.%s",
1481 "\n");
1482 cytnx_error_msg(in.device() != this->device(),
1483 "[ERROR][DenseUniTensor][put_block] The input tensor device does not "
1484 "match.%s",
1485 "\n");
1486 // We shouldn't check the contiguous
1487 // cytnx_error_msg(!in.contiguous());
1488 cytnx_error_msg(indices.size() != this->rank(),
1489 "[ERROR][put_block][BlockUniTensor] len(indices) must be the same as the "
1490 "Tensor rank (number of legs).%s",
1491 "\n");
1492
1493 std::vector<cytnx_uint64> inds(indices.begin(), indices.end());
1494
1495 // find if the indices specify exists!
1496 cytnx_int64 b = -1;
1497 for (cytnx_uint64 i = 0; i < this->_inner_to_outer_idx.size(); i++) {
1498 if (inds == this->_inner_to_outer_idx[i]) {
1499 b = i;
1500 break;
1501 }
1502 }
1503
1504 if (b < 0) {
1505 if (check) {
1506 cytnx_error_msg(true,
1507 "[ERROR][put_block][BlockUniTensor] no avaliable block exists, "
1508 "check=true, so error throws. \n If you want without error when block "
1509 "is not avaliable, set check=false.%s",
1510 "\n");
1511 }
1512 } else {
1514 in.shape() != this->_blocks[b].shape(),
1515 "[ERROR][BlockUniTensor] the shape of input tensor does not match the shape "
1516 "of block @ idx=%d\n",
1517 b);
1518
1519 this->_blocks[b] = in.clone();
1520 }
1521 }
1522 void put_block_(Tensor &in, const std::vector<cytnx_int64> &indices, const bool &check) {
1523 cytnx_error_msg(in.dtype() != this->dtype(),
1524 "[ERROR][DenseUniTensor][put_block] The input tensor dtype does not match.%s",
1525 "\n");
1526 cytnx_error_msg(in.device() != this->device(),
1527 "[ERROR][DenseUniTensor][put_block] The input tensor device does not "
1528 "match.%s",
1529 "\n");
1530 // We shouldn't check the contiguous
1531 // cytnx_error_msg(!in.contiguous());
1532 cytnx_error_msg(indices.size() != this->rank(),
1533 "[ERROR][put_block][BlockUniTensor] len(indices) must be the same as the "
1534 "Tensor rank (number of legs).%s",
1535 "\n");
1536
1537 std::vector<cytnx_uint64> inds(indices.begin(), indices.end());
1538
1539 // find if the indices specify exists!
1540 cytnx_int64 b = -1;
1541 for (cytnx_uint64 i = 0; i < this->_inner_to_outer_idx.size(); i++) {
1542 if (inds == this->_inner_to_outer_idx[i]) {
1543 b = i;
1544 break;
1545 }
1546 }
1547
1548 if (b < 0) {
1549 if (check) {
1550 cytnx_error_msg(true,
1551 "[ERROR][put_block][BlockUniTensor] no avaliable block exists, "
1552 "check=true, so error throws. \n If you want without error when block "
1553 "is not avaliable, set check=false.%s",
1554 "\n");
1555 }
1556 } else {
1558 in.shape() != this->_blocks[b].shape(),
1559 "[ERROR][BlockUniTensor] the shape of input tensor does not match the shape "
1560 "of block @ idx=%d\n",
1561 b);
1562 this->_blocks[b] = in;
1563 }
1564 }
1565
1566 void tag() {
1567 // no-use!
1568 }
1569
1570 boost::intrusive_ptr<UniTensor_base> Conj() {
1571 boost::intrusive_ptr<UniTensor_base> out = this->clone();
1572 out->Conj_();
1573 return out;
1574 }
1575
1576 void Conj_() {
1577 for (int i = 0; i < this->_blocks.size(); i++) {
1578 this->_blocks[i].Conj_();
1579 }
1580 };
1581
1582 void Transpose_();
1583 boost::intrusive_ptr<UniTensor_base> Transpose() {
1584 boost::intrusive_ptr<UniTensor_base> out = this->clone();
1585 out->Transpose_();
1586 return out;
1587 }
1588
1589 void normalize_();
1590 boost::intrusive_ptr<UniTensor_base> normalize() {
1591 boost::intrusive_ptr<UniTensor_base> out = this->clone();
1592 out->normalize_();
1593 return out;
1594 }
1595
1596 boost::intrusive_ptr<UniTensor_base> Dagger() {
1597 boost::intrusive_ptr<UniTensor_base> out = this->Conj();
1598 out->Transpose_();
1599 return out;
1600 }
1601 void Dagger_() {
1602 this->Conj_();
1603 this->Transpose_();
1604 }
1605
1606 void Trace_(const std::string &a, const std::string &b);
1607 void Trace_(const cytnx_int64 &a, const cytnx_int64 &b);
1608
1609 boost::intrusive_ptr<UniTensor_base> Trace(const std::string &a, const std::string &b) {
1610 boost::intrusive_ptr<UniTensor_base> out = this->clone();
1611 out->Trace_(a, b);
1612 if (out->rank() == 0) {
1613 DenseUniTensor *tmp = new DenseUniTensor();
1614 tmp->_block = ((BlockUniTensor *)out.get())->_blocks[0];
1615 out = boost::intrusive_ptr<UniTensor_base>(tmp);
1616 }
1617 return out;
1618 }
1619 boost::intrusive_ptr<UniTensor_base> Trace(const cytnx_int64 &a, const cytnx_int64 &b) {
1620 boost::intrusive_ptr<UniTensor_base> out = this->clone();
1621 out->Trace_(a, b);
1622 if (out->rank() == 0) {
1623 DenseUniTensor *tmp = new DenseUniTensor();
1624 tmp->_block = ((BlockUniTensor *)out.get())->_blocks[0];
1625 out = boost::intrusive_ptr<UniTensor_base>(tmp);
1626 }
1627 return out;
1628 }
1629
1630 Tensor Norm() const;
1631
1632 bool elem_exists(const std::vector<cytnx_uint64> &locator) const;
1633
1634 const Scalar::Sproxy at_for_sparse(const std::vector<cytnx_uint64> &locator) const;
1635 const cytnx_complex128 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
1636 const cytnx_complex128 &aux) const;
1637 const cytnx_complex64 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
1638 const cytnx_complex64 &aux) const;
1639 const cytnx_double &at_for_sparse(const std::vector<cytnx_uint64> &locator,
1640 const cytnx_double &aux) const;
1641 const cytnx_float &at_for_sparse(const std::vector<cytnx_uint64> &locator,
1642 const cytnx_float &aux) const;
1643 const cytnx_uint64 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
1644 const cytnx_uint64 &aux) const;
1645 const cytnx_int64 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
1646 const cytnx_int64 &aux) const;
1647 const cytnx_uint32 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
1648 const cytnx_uint32 &aux) const;
1649 const cytnx_int32 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
1650 const cytnx_int32 &aux) const;
1651 const cytnx_uint16 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
1652 const cytnx_uint16 &aux) const;
1653 const cytnx_int16 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
1654 const cytnx_int16 &aux) const;
1655
1656 Scalar::Sproxy at_for_sparse(const std::vector<cytnx_uint64> &locator);
1657 cytnx_complex128 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
1658 const cytnx_complex128 &aux);
1659 cytnx_complex64 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
1660 const cytnx_complex64 &aux);
1661 cytnx_double &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_double &aux);
1662 cytnx_float &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_float &aux);
1663 cytnx_uint64 &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_uint64 &aux);
1664 cytnx_int64 &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_int64 &aux);
1665 cytnx_uint32 &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_uint32 &aux);
1666 cytnx_int32 &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_int32 &aux);
1667 cytnx_uint16 &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_uint16 &aux);
1668 cytnx_int16 &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_int16 &aux);
1669
1670 void _save_dispatch(std::fstream &f) const;
1671 void _load_dispatch(std::fstream &f);
1672
1673 // this will remove the [q_index]-th qnum at [bond_idx]-th Bond!
1674 void truncate_(const std::string &label, const cytnx_uint64 &q_index);
1675 void truncate_(const cytnx_int64 &bond_idx, const cytnx_uint64 &q_index);
1676
1677 void Add_(const boost::intrusive_ptr<UniTensor_base> &rhs);
1678 void Add_(const Scalar &rhs) {
1680 true,
1681 "[ERROR] cannot perform elementwise arithmetic '+' between Scalar and BlockUniTensor.\n %s "
1682 "\n",
1683 "This operation would destroy the block structure. [Suggest] Avoid or use get/set_block(s) "
1684 "to do operation on blocks.");
1685 }
1686
1687 void Mul_(const boost::intrusive_ptr<UniTensor_base> &rhs);
1688 void Mul_(const Scalar &rhs);
1689
1690 void Sub_(const boost::intrusive_ptr<UniTensor_base> &rhs);
1691 void Sub_(const Scalar &rhs) {
1693 true,
1694 "[ERROR] cannot perform elementwise arithmetic '-' between Scalar and BlockUniTensor.\n %s "
1695 "\n",
1696 "This operation would destroy the block structure. [Suggest] Avoid or use get/set_block(s) "
1697 "to do operation on blocks.");
1698 }
1699 void lSub_(const Scalar &lhs) {
1701 true,
1702 "[ERROR] cannot perform elementwise arithmetic '-' between Scalar and BlockUniTensor.\n %s "
1703 "\n",
1704 "This operation would destroy the block structure. [Suggest] Avoid or use get/set_block(s) "
1705 "to do operation on blocks.");
1706 }
1707
1708 void Div_(const boost::intrusive_ptr<UniTensor_base> &rhs);
1709 void Div_(const Scalar &rhs);
1710 void lDiv_(const Scalar &lhs) {
1712 true,
1713 "[ERROR] cannot perform elementwise arithmetic '/' between Scalar and BlockUniTensor.\n %s "
1714 "\n",
1715 "This operation would cause division by zero on non-block elements. [Suggest] Avoid or use "
1716 "get/set_block(s) to do operation on blocks.");
1717 }
1718 void from_(const boost::intrusive_ptr<UniTensor_base> &rhs, const bool &force,
1719 const cytnx_double &tol);
1720
1721 void group_basis_();
1722
1723 void combineBond(const std::vector<std::string> &indicators, const bool &force = false);
1724 void combineBonds(const std::vector<cytnx_int64> &indicators, const bool &force = false);
1725 void combineBonds(const std::vector<cytnx_int64> &indicators, const bool &force,
1726 const bool &by_label);
1727 void combineBonds(const std::vector<std::string> &indicators, const bool &force = false);
1728
1729 const std::vector<cytnx_uint64> &get_qindices(const cytnx_uint64 &bidx) const {
1731 bidx >= this->Nblocks(),
1732 "[ERROR][BlockUniTensor] bidx out of bound! only %d blocks in current UTen.\n",
1733 this->Nblocks());
1734 return this->_inner_to_outer_idx[bidx];
1735 }
1736 std::vector<cytnx_uint64> &get_qindices(const cytnx_uint64 &bidx) {
1738 bidx >= this->Nblocks(),
1739 "[ERROR][BlockUniTensor] bidx out of bound! only %d blocks in current UTen.\n",
1740 this->Nblocks());
1741 return this->_inner_to_outer_idx[bidx];
1742 }
1743
1744 const vec2d<cytnx_uint64> &get_itoi() const { return this->_inner_to_outer_idx; }
1745 vec2d<cytnx_uint64> &get_itoi() { return this->_inner_to_outer_idx; }
1746 };
1748
1749 //======================================================================
1751 class BlockFermionicUniTensor : public UniTensor_base {
1752 //[21 Aug 2024] This is a copy from BlockUniTensor; additionally sign flips are stored as
1753 //_signflip and taken care of in all the methods
1754 public:
1755 std::vector<std::vector<cytnx_uint64>>
1756 _inner_to_outer_idx; // stores the qindices for each block
1757 std::vector<Tensor> _blocks;
1758 Tensor NullRefTensor; // this returns when accessed block does not exists!
1759 // additional informaiton for fermions:
1760 std::vector<cytnx_bool>
1761 _signflip; // if true, the sign of the corresponding block needs to be flipped
1762
1763 // given an index list [loc], get qnums from this->_bonds[loc] and return the combined qnums
1764 // calculated from Symm object! this assume 1. symmetry are the same for each bond!
1765 // 2. total_qns are feeded with size len(symmetry)
1766 void _fx_get_total_fluxs(std::vector<cytnx_uint64> &loc, const std::vector<Symmetry> &syms,
1767 std::vector<cytnx_int64> &total_qns) {
1768 //[21 Aug 2024] This is a copy from BlockUniTensor;
1769 memset(&total_qns[0], 0, sizeof(cytnx_int64) * total_qns.size());
1770
1771 for (cytnx_int32 i = 0; i < syms.size(); i++) {
1772 if (this->_bonds[0].type() == BD_BRA)
1773 total_qns[i] = syms[0].reverse_rule(this->_bonds[0]._impl->_qnums[loc[0]][i]);
1774 else
1775 total_qns[i] = this->_bonds[0]._impl->_qnums[loc[0]][i];
1776
1777 for (auto j = 1; j < loc.size(); j++) {
1778 if (this->_bonds[j].type() == BD_BRA)
1779 total_qns[i] = syms[i].combine_rule(
1780 total_qns[i], syms[i].reverse_rule(this->_bonds[j]._impl->_qnums[loc[j]][i]));
1781 else {
1782 total_qns[i] =
1783 syms[i].combine_rule(total_qns[i], this->_bonds[j]._impl->_qnums[loc[j]][i]);
1784 }
1785 }
1786 }
1787 }
1788
1789 void _fx_locate_elem(cytnx_int64 &bidx, std::vector<cytnx_uint64> &loc_in_T,
1790 const std::vector<cytnx_uint64> &locator) const;
1791
1792 // internal function, grouping all duplicate qnums in all bonds
1793 void _fx_group_duplicates(const std::vector<cytnx_uint64> &dup_bond_idxs,
1794 const std::vector<std::vector<cytnx_uint64>> &idx_mappers);
1795
1796 void set_meta(BlockFermionicUniTensor *tmp, const bool &inner, const bool &outer) const {
1797 //[21 Aug 2024] This is a copy from BlockUniTensor; additionally, _signflip is set
1798 // outer meta
1799 if (outer) {
1800 tmp->_bonds = vec_clone(this->_bonds);
1801 tmp->_labels = this->_labels;
1802 tmp->_is_braket_form = this->_is_braket_form;
1803 tmp->_rowrank = this->_rowrank;
1804 tmp->_name = this->_name;
1805 // tmp->_signflip = vec_clone(this->_signflip);
1806 tmp->_signflip = this->_signflip;
1807 }
1808
1809 tmp->_is_diag = this->_is_diag;
1810
1811 // inner meta
1812 if (inner) {
1813 tmp->_inner_to_outer_idx = this->_inner_to_outer_idx;
1814 }
1815 }
1816
1817 BlockFermionicUniTensor *clone_meta(const bool &inner, const bool &outer) const {
1818 //[21 Aug 2024] This is a copy from BlockUniTensor;
1819 BlockFermionicUniTensor *tmp = new BlockFermionicUniTensor();
1820 this->set_meta(tmp, inner, outer);
1821 return tmp;
1822 };
1823
1824 friend class UniTensor;
1825 BlockFermionicUniTensor() {
1826 //[21 Aug 2024] This is a copy from BlockUniTensor;
1827 this->uten_type_id = UTenType.BlockFermionic;
1828 this->_is_tag = true;
1829 }
1830
1831 void Init(const std::vector<Bond> &bonds, const std::vector<std::string> &in_labels = {},
1832 const cytnx_int64 &rowrank = -1, const unsigned int &dtype = Type.Double,
1833 const int &device = Device.cpu, const bool &is_diag = false,
1834 const bool &no_alloc = false, const std::string &name = "");
1835
1836 void Init_by_Tensor(const Tensor &in_tensor, const bool &is_diag = false,
1837 const cytnx_int64 &rowrank = -1, const std::string &name = "") {
1838 cytnx_error_msg(true,
1839 "[ERROR][BlockFermionicUniTensor] cannot use Init_by_tensor() on a "
1840 "BlockFermionicUniTensor.%s",
1841 "\n");
1842 }
1843
1844 std::vector<cytnx_uint64> shape() const {
1845 //[21 Aug 2024] This is a copy from BlockUniTensor;
1846 std::vector<cytnx_uint64> out(this->_bonds.size());
1847 for (cytnx_uint64 i = 0; i < out.size(); i++) {
1848 out[i] = this->_bonds[i].dim();
1849 }
1850 return out;
1851 }
1852
1853 bool is_blockform() const {
1854 //[21 Aug 2024] This is a copy from BlockUniTensor;
1855 return true;
1856 }
1857
1858 bool is_contiguous() const {
1859 //[21 Aug 2024] This is a copy from BlockUniTensor;
1860 bool out = true;
1861 for (int i = 0; i < this->_blocks.size(); i++) {
1862 out &= this->_blocks[i].is_contiguous();
1863 }
1864 return out;
1865 };
1866
1867 cytnx_uint64 Nblocks() const {
1868 //[21 Aug 2024] This is a copy from BlockUniTensor;
1869 return this->_blocks.size();
1870 };
1871
1872 std::vector<bool> signflip() const override { return this->_signflip; };
1873 std::vector<bool> &signflip_() override { return this->_signflip; };
1874
1875 void to_(const int &device) {
1876 //[21 Aug 2024] This is a copy from BlockUniTensor;
1877 for (cytnx_uint64 i = 0; i < this->_blocks.size(); i++) {
1878 this->_blocks[i].to_(device);
1879 }
1880 };
1881
1882 boost::intrusive_ptr<UniTensor_base> to(const int &device) {
1883 //[21 Aug 2024] This is a copy from BlockUniTensor;
1884 if (this->device() == device) {
1885 return this;
1886 } else {
1887 boost::intrusive_ptr<UniTensor_base> out = this->clone();
1888 out->to_(device);
1889 return out;
1890 }
1891 };
1892
1893 boost::intrusive_ptr<UniTensor_base> clone() const {
1894 //[21 Aug 2024] This is a copy from BlockUniTensor; changed to BlockFermionicUniTensor as
1895 // output
1896 BlockFermionicUniTensor *tmp = this->clone_meta(true, true);
1897 tmp->_blocks = vec_clone(this->_blocks);
1898 boost::intrusive_ptr<UniTensor_base> out(tmp);
1899 return out;
1900 };
1901
1902 unsigned int dtype() const {
1903 //[21 Aug 2024] This is a copy from BlockUniTensor;
1904 #ifdef UNI_DEBUG
1905 cytnx_error_msg(this->_blocks.size() == 0, "[ERROR][internal] empty blocks for blockform.%s",
1906 "\n");
1907 #endif
1908 return this->_blocks.size() < 1 ? Type.Void : this->_blocks[0].dtype();
1909 };
1910 int device() const {
1911 //[21 Aug 2024] This is a copy from BlockUniTensor;
1912 #ifdef UNI_DEBUG
1913 cytnx_error_msg(this->_blocks.size() == 0, "[ERROR][internal] empty blocks for blockform.%s",
1914 "\n");
1915 #endif
1916 return this->_blocks.size() < 1 ? -404 : this->_blocks[0].device();
1917 };
1918 std::string dtype_str() const {
1919 //[21 Aug 2024] This is a copy from BlockUniTensor;
1920 #ifdef UNI_DEBUG
1921 cytnx_error_msg(this->_blocks.size() == 0, "[ERROR][internal] empty blocks for blockform.%s",
1922 "\n");
1923 #endif
1924 return this->_blocks.size() < 1 ? "Void, no valid blocks" : this->_blocks[0].dtype_str();
1925 };
1926 std::string device_str() const {
1927 //[21 Aug 2024] This is a copy from BlockUniTensor;
1928 #ifdef UNI_DEBUG
1929 cytnx_error_msg(this->_blocks.size() == 0, "[ERROR][internal] empty blocks for blockform.%s",
1930 "\n");
1931 #endif
1932 return this->_blocks.size() < 1 ? "None, no valid blocks" : this->_blocks[0].device_str();
1933 };
1934
1935 Tensor get_block(const cytnx_uint64 &idx = 0) const {
1936 //[21 Aug 2024] This is a copy from BlockUniTensor;
1937 cytnx_error_msg(idx >= this->_blocks.size(),
1938 "[ERROR][BlockFermionicUniTensor] index out of range%s", "\n");
1939 return this->_blocks[idx].clone();
1940 };
1941
1942 // this one for Block will return the indicies!!
1943 Tensor get_block(const std::vector<cytnx_int64> &indices, const bool &force_return) const {
1944 //[21 Aug 2024] This is a copy from BlockUniTensor;
1946 indices.size() != this->rank(),
1947 "[ERROR][get_block][BlockFermionicUniTensor] len(indices) must be the same as the "
1948 "Tensor rank (number of legs).%s",
1949 "\n");
1950
1951 std::vector<cytnx_uint64> inds(indices.begin(), indices.end());
1952
1953 // find if the indices specify exists!
1954 cytnx_int64 b = -1;
1955 for (cytnx_uint64 i = 0; i < this->_inner_to_outer_idx.size(); i++) {
1956 if (inds == this->_inner_to_outer_idx[i]) {
1957 b = i;
1958 break;
1959 }
1960 }
1961
1962 if (b < 0) {
1963 if (force_return) {
1964 return NullRefTensor;
1965 } else {
1966 cytnx_error_msg(true,
1967 "[ERROR][get_block][BlockFermionicUniTensor] no avaliable block exists, "
1968 "force_return=false, so "
1969 "error throws. \n If you want to return an empty block without "
1970 "error when block is "
1971 "not avaliable, set force_return=True.%s",
1972 "\n");
1973 }
1974 } else {
1975 return this->_blocks[b].clone();
1976 }
1977 }
1978
1979 const Tensor &get_block_(const cytnx_uint64 &idx = 0) const {
1980 //[21 Aug 2024] This is a copy from BlockUniTensor;
1981 cytnx_error_msg(idx >= this->_blocks.size(),
1982 "[ERROR][BlockFermionicUniTensor] index out of range%s", "\n");
1983 return this->_blocks[idx];
1984 };
1985
1986 Tensor &get_block_(const cytnx_uint64 &idx = 0) {
1987 //[21 Aug 2024] This is a copy from BlockUniTensor;
1988 cytnx_error_msg(idx >= this->_blocks.size(),
1989 "[ERROR][BlockFermionicUniTensor] index out of range%s", "\n");
1990 return this->_blocks[idx];
1991 };
1992
1993 const Tensor &get_block_(const std::vector<cytnx_int64> &indices,
1994 const bool &force_return) const {
1995 //[21 Aug 2024] This is a copy from BlockUniTensor;
1997 indices.size() != this->rank(),
1998 "[ERROR][get_block][BlockFermionicUniTensor] len(indices) must be the same as the "
1999 "Tensor rank (number of legs).%s",
2000 "\n");
2001
2002 std::vector<cytnx_uint64> inds(indices.begin(), indices.end());
2003
2004 // find if the indices specify exists!
2005 cytnx_int64 b = -1;
2006 for (cytnx_uint64 i = 0; i < this->_inner_to_outer_idx.size(); i++) {
2007 if (inds == this->_inner_to_outer_idx[i]) {
2008 b = i;
2009 break;
2010 }
2011 }
2012
2013 if (b < 0) {
2014 if (force_return) {
2015 return this->NullRefTensor;
2016 } else {
2017 cytnx_error_msg(true,
2018 "[ERROR][get_block][BlockFermionicUniTensor] no avaliable block exists, "
2019 "force_return=false, so "
2020 "error throws. \n If you want to return an empty block without "
2021 "error when block is "
2022 "not avaliable, set force_return=True.%s",
2023 "\n");
2024 }
2025 } else {
2026 return this->_blocks[b];
2027 }
2028 }
2029
2030 Tensor &get_block_(const std::vector<cytnx_int64> &indices, const bool &force_return) {
2031 //[21 Aug 2024] This is a copy from BlockUniTensor;
2033 indices.size() != this->rank(),
2034 "[ERROR][get_block][BlockFermionicUniTensor] len(indices) must be the same as the "
2035 "Tensor rank (number of legs).%s",
2036 "\n");
2037
2038 std::vector<cytnx_uint64> inds(indices.begin(), indices.end());
2039
2040 // find if the indices specify exists!
2041 cytnx_int64 b = -1;
2042 for (cytnx_uint64 i = 0; i < this->_inner_to_outer_idx.size(); i++) {
2043 if (inds == this->_inner_to_outer_idx[i]) {
2044 b = i;
2045 break;
2046 }
2047 }
2048
2049 if (b < 0) {
2050 if (force_return) {
2051 return this->NullRefTensor;
2052 } else {
2053 cytnx_error_msg(true,
2054 "[ERROR][get_block][BlockFermionicUniTensor] no avaliable block exists, "
2055 "force_return=false, so "
2056 "error throws. \n If you want to return an empty block without "
2057 "error when block is "
2058 "not avaliable, set force_return=True.%s",
2059 "\n");
2060 }
2061 } else {
2062 return this->_blocks[b];
2063 }
2064 }
2065
2066 std::vector<Tensor> get_blocks() const {
2067 //[21 Aug 2024] This is a copy from BlockUniTensor;
2068 return vec_clone(this->_blocks);
2069 }
2070 const std::vector<Tensor> &get_blocks_(const bool &) const {
2071 //[21 Aug 2024] This is a copy from BlockUniTensor;
2072 return this->_blocks;
2073 }
2074 std::vector<Tensor> &get_blocks_(const bool &) {
2075 //[21 Aug 2024] This is a copy from BlockUniTensor;
2076 return this->_blocks;
2077 }
2078
2079 bool same_data(const boost::intrusive_ptr<UniTensor_base> &rhs) const {
2080 //[21 Aug 2024] This is a copy from BlockUniTensor; changed to BlockFermionic UTenType
2081 if (rhs->uten_type() != UTenType.BlockFermionic) return false;
2082 if (rhs->get_blocks_(1).size() != this->get_blocks_(1).size()) return false;
2083
2084 for (int i = 0; i < rhs->get_blocks_(1).size(); i++)
2085 if (this->get_blocks_(1)[i].same_data(rhs->get_blocks_(1)[i]) == false) return false;
2086
2087 return true;
2088 }
2089
2090 void set_rowrank_(const cytnx_uint64 &new_rowrank) {
2091 //[21 Aug 2024] This is a copy from BlockUniTensor;
2093 new_rowrank > this->rank(),
2094 "[ERROR][BlockFermionicUniTensor] rowrank should be [>=0] and [<=UniTensor.rank].%s", "\n");
2095 if (this->is_diag()) {
2097 new_rowrank != 1,
2098 "[ERROR][BlockFermionicUniTensor] rowrank should be [==1] when is_diag =true!.%s", "\n");
2099 }
2100 this->_rowrank = new_rowrank;
2101 this->_is_braket_form = this->_update_braket();
2102 }
2103
2104 boost::intrusive_ptr<UniTensor_base> set_rowrank(const cytnx_uint64 &new_rowrank) const {
2105 //[21 Aug 2024] This is a copy from BlockUniTensor; output type changed to
2106 // BlockFermionicUniTensor
2107 BlockFermionicUniTensor *tmp = this->clone_meta(true, true);
2108 tmp->_blocks = this->_blocks;
2109 tmp->set_rowrank_(new_rowrank);
2110 boost::intrusive_ptr<UniTensor_base> out(tmp);
2111 return out;
2112 }
2113
2114 boost::intrusive_ptr<UniTensor_base> permute(const std::vector<cytnx_int64> &mapper,
2115 const cytnx_int64 &rowrank = -1);
2116 boost::intrusive_ptr<UniTensor_base> permute(const std::vector<std::string> &mapper,
2117 const cytnx_int64 &rowrank = -1);
2118
2119 void permute_(const std::vector<cytnx_int64> &mapper, const cytnx_int64 &rowrank = -1);
2120 void permute_(const std::vector<std::string> &mapper, const cytnx_int64 &rowrank = -1);
2121
2122 boost::intrusive_ptr<UniTensor_base> permute_nosignflip(const std::vector<cytnx_int64> &mapper,
2123 const cytnx_int64 &rowrank = -1);
2124 boost::intrusive_ptr<UniTensor_base> permute_nosignflip(const std::vector<std::string> &mapper,
2125 const cytnx_int64 &rowrank = -1);
2126 void permute_nosignflip_(const std::vector<cytnx_int64> &mapper,
2127 const cytnx_int64 &rowrank = -1);
2128 void permute_nosignflip_(const std::vector<std::string> &mapper,
2129 const cytnx_int64 &rowrank = -1);
2130
2131 // Helper function; implements the sign flips when permuting indices
2132 std::vector<bool> _swapsigns_(const std::vector<cytnx_int64> &mapper) const;
2133 std::vector<bool> _lhssigns_(const std::vector<cytnx_int64> &mapper,
2134 const cytnx_int64 contrno) const;
2135 std::vector<bool> _swapsigns_(const std::vector<cytnx_uint64> &mapper) const;
2136 std::vector<bool> _lhssigns_(const std::vector<cytnx_uint64> &mapper,
2137 const cytnx_uint64 contrno) const;
2138
2139 boost::intrusive_ptr<UniTensor_base> contiguous_() {
2140 //[21 Aug 2024] This is a copy from BlockUniTensor;
2141 for (unsigned int b = 0; b < this->_blocks.size(); b++) this->_blocks[b].contiguous_();
2142 return boost::intrusive_ptr<UniTensor_base>(this);
2143 }
2144
2145 boost::intrusive_ptr<UniTensor_base> contiguous();
2146
2147 void print_diagram(const bool &bond_info = false) const;
2148 void print_blocks(const bool &full_info = true) const;
2149 void print_block(const cytnx_int64 &idx, const bool &full_info = true) const;
2150
2151 boost::intrusive_ptr<UniTensor_base> contract(const boost::intrusive_ptr<UniTensor_base> &rhs,
2152 const bool &mv_elem_self = false,
2153 const bool &mv_elem_rhs = false);
2154
2155 boost::intrusive_ptr<UniTensor_base> relabel(const std::vector<std::string> &new_labels);
2156 boost::intrusive_ptr<UniTensor_base> relabels(const std::vector<std::string> &new_labels);
2157
2158 boost::intrusive_ptr<UniTensor_base> relabel(const std::vector<std::string> &old_labels,
2159 const std::vector<std::string> &new_labels);
2160 boost::intrusive_ptr<UniTensor_base> relabels(const std::vector<std::string> &old_labels,
2161 const std::vector<std::string> &new_labels);
2162
2163 boost::intrusive_ptr<UniTensor_base> relabel(const std::string &old_label,
2164 const std::string &new_label);
2165 boost::intrusive_ptr<UniTensor_base> relabel(const cytnx_int64 &inx,
2166 const std::string &new_label);
2167 std::vector<Symmetry> syms() const;
2168
2169 void reshape_(const std::vector<cytnx_int64> &new_shape, const cytnx_uint64 &rowrank = 0) {
2170 cytnx_error_msg(true, "[ERROR] cannot reshape a UniTensor with symmetry.%s", "\n");
2171 }
2172 boost::intrusive_ptr<UniTensor_base> reshape(const std::vector<cytnx_int64> &new_shape,
2173 const cytnx_uint64 &rowrank = 0) {
2174 cytnx_error_msg(true, "[ERROR] cannot reshape a UniTensor with symmetry.%s", "\n");
2175 return nullptr;
2176 }
2177
2178 boost::intrusive_ptr<UniTensor_base> astype(const unsigned int &dtype) const {
2179 //[21 Aug 2024] This is a copy from BlockUniTensor; the tensor type was adapted
2180 BlockFermionicUniTensor *tmp = this->clone_meta(true, true);
2181 tmp->_blocks.resize(this->_blocks.size());
2182 for (cytnx_int64 blk = 0; blk < this->_blocks.size(); blk++) {
2183 tmp->_blocks[blk] = this->_blocks[blk].astype(dtype);
2184 }
2185 boost::intrusive_ptr<UniTensor_base> out(tmp);
2186 return out;
2187 };
2188
2189 // this will only work on non-symm tensor (DenseUniTensor)
2190 boost::intrusive_ptr<UniTensor_base> get(const std::vector<Accessor> &accessors) {
2192 true,
2193 "[ERROR][BlockFermionicUniTensor][get] cannot use get on a UniTensor with "
2194 "Symmetry.\n suggestion: try get_block/get_block_/get_blocks/get_blocks_ first.%s",
2195 "\n");
2196 return nullptr;
2197 }
2198
2199 // this will only work on non-symm tensor (DenseUniTensor)
2200 void set(const std::vector<Accessor> &accessors, const Tensor &rhs) {
2201 //[21 Aug 2024] This is a copy from BlockUniTensor;
2203 true,
2204 "[ERROR][BlockFermionicUniTensor][get] cannot use get on a UniTensor with "
2205 "Symmetry.\n suggestion: try get_block/get_block_/get_blocks/get_blocks_ first.%s",
2206 "\n");
2207 }
2208
2209 void put_block(const Tensor &in, const cytnx_uint64 &idx = 0) {
2210 //[21 Aug 2024] This is a copy from BlockUniTensor;
2211 cytnx_error_msg(in.dtype() != this->dtype(),
2212 "[ERROR][DenseUniTensor][put_block] The input tensor dtype does not match.%s",
2213 "\n");
2214 cytnx_error_msg(in.device() != this->device(),
2215 "[ERROR][DenseUniTensor][put_block] The input tensor device does not "
2216 "match.%s",
2217 "\n");
2218 // We shouldn't check the contiguous
2219 // cytnx_error_msg(!in.contiguous());
2220 cytnx_error_msg(idx >= this->_blocks.size(),
2221 "[ERROR][BlockFermionicUniTensor] index out of range%s", "\n");
2223 in.shape() != this->_blocks[idx].shape(),
2224 "[ERROR][BlockFermionicUniTensor] the shape of input tensor does not match the shape "
2225 "of block @ idx=%d\n",
2226 idx);
2227
2228 this->_blocks[idx] = in.clone();
2229 }
2230 void put_block_(Tensor &in, const cytnx_uint64 &idx = 0) {
2231 //[21 Aug 2024] This is a copy from BlockUniTensor;
2232 cytnx_error_msg(in.dtype() != this->dtype(),
2233 "[ERROR][DenseUniTensor][put_block] The input tensor dtype does not match.%s",
2234 "\n");
2235 cytnx_error_msg(in.device() != this->device(),
2236 "[ERROR][DenseUniTensor][put_block] The input tensor device does not "
2237 "match.%s",
2238 "\n");
2239 // We shouldn't check the contiguous
2240 // cytnx_error_msg(!in.contiguous());
2241 cytnx_error_msg(idx >= this->_blocks.size(),
2242 "[ERROR][BlockFermionicUniTensor] index out of range%s", "\n");
2244 in.shape() != this->_blocks[idx].shape(),
2245 "[ERROR][BlockFermionicUniTensor] the shape of input tensor does not match the shape "
2246 "of block @ idx=%d\n",
2247 idx);
2248
2249 this->_blocks[idx] = in;
2250 }
2251 void put_block(const Tensor &in, const std::vector<cytnx_int64> &indices, const bool &check) {
2252 //[21 Aug 2024] This is a copy from BlockUniTensor;
2253 cytnx_error_msg(in.dtype() != this->dtype(),
2254 "[ERROR][DenseUniTensor][put_block] The input tensor dtype does not match.%s",
2255 "\n");
2256 cytnx_error_msg(in.device() != this->device(),
2257 "[ERROR][DenseUniTensor][put_block] The input tensor device does not "
2258 "match.%s",
2259 "\n");
2260 // We shouldn't check the contiguous
2261 // cytnx_error_msg(!in.contiguous());
2263 indices.size() != this->rank(),
2264 "[ERROR][put_block][BlockFermionicUniTensor] len(indices) must be the same as the "
2265 "Tensor rank (number of legs).%s",
2266 "\n");
2267
2268 std::vector<cytnx_uint64> inds(indices.begin(), indices.end());
2269
2270 // find if the indices specify exists!
2271 cytnx_int64 b = -1;
2272 for (cytnx_uint64 i = 0; i < this->_inner_to_outer_idx.size(); i++) {
2273 if (inds == this->_inner_to_outer_idx[i]) {
2274 b = i;
2275 break;
2276 }
2277 }
2278
2279 if (b < 0) {
2280 if (check) {
2281 cytnx_error_msg(true,
2282 "[ERROR][put_block][BlockFermionicUniTensor] no avaliable block exists, "
2283 "check=true, so error throws. \n If you want without error when block "
2284 "is not avaliable, set check=false.%s",
2285 "\n");
2286 }
2287 } else {
2289 in.shape() != this->_blocks[b].shape(),
2290 "[ERROR][BlockFermionicUniTensor] the shape of input tensor does not match the shape "
2291 "of block @ idx=%d\n",
2292 b);
2293
2294 this->_blocks[b] = in.clone();
2295 }
2296 }
2297 void put_block_(Tensor &in, const std::vector<cytnx_int64> &indices, const bool &check) {
2298 //[21 Aug 2024] This is a copy from BlockUniTensor;
2299 cytnx_error_msg(in.dtype() != this->dtype(),
2300 "[ERROR][DenseUniTensor][put_block] The input tensor dtype does not match.%s",
2301 "\n");
2302 cytnx_error_msg(in.device() != this->device(),
2303 "[ERROR][DenseUniTensor][put_block] The input tensor device does not "
2304 "match.%s",
2305 "\n");
2306 // We shouldn't check the contiguous
2307 // cytnx_error_msg(!in.contiguous());
2309 indices.size() != this->rank(),
2310 "[ERROR][put_block][BlockFermionicUniTensor] len(indices) must be the same as the "
2311 "Tensor rank (number of legs).%s",
2312 "\n");
2313
2314 std::vector<cytnx_uint64> inds(indices.begin(), indices.end());
2315
2316 // find if the indices specify exists!
2317 cytnx_int64 b = -1;
2318 for (cytnx_uint64 i = 0; i < this->_inner_to_outer_idx.size(); i++) {
2319 if (inds == this->_inner_to_outer_idx[i]) {
2320 b = i;
2321 break;
2322 }
2323 }
2324
2325 if (b < 0) {
2326 if (check) {
2327 cytnx_error_msg(true,
2328 "[ERROR][put_block][BlockFermionicUniTensor] no avaliable block exists, "
2329 "check=true, so error throws. \n If you want without error when block "
2330 "is not avaliable, set check=false.%s",
2331 "\n");
2332 }
2333 } else {
2335 in.shape() != this->_blocks[b].shape(),
2336 "[ERROR][BlockFermionicUniTensor] the shape of input tensor does not match the shape "
2337 "of block @ idx=%d\n",
2338 b);
2339 this->_blocks[b] = in;
2340 }
2341 }
2342
2343 void tag() {
2344 // no-use!
2345 }
2346
2347 boost::intrusive_ptr<UniTensor_base> Conj() {
2348 //[21 Aug 2024] This is a copy from BlockUniTensor;
2349 boost::intrusive_ptr<UniTensor_base> out = this->clone();
2350 out->Conj_();
2351 return out;
2352 }
2353
2354 void Conj_() {
2355 //[21 Aug 2024] This is a copy from BlockUniTensor;
2356 for (int i = 0; i < this->_blocks.size(); i++) {
2357 this->_blocks[i].Conj_();
2358 }
2359 };
2360
2361 // Transpose(_) changes the index order without sign flips and reverses the bond directions for
2362 // fermionic tensors
2363 void Transpose_();
2364 boost::intrusive_ptr<UniTensor_base> Transpose() {
2365 //[21 Aug 2024] This is a copy from BlockUniTensor;
2366 boost::intrusive_ptr<UniTensor_base> out = this->clone();
2367 out->Transpose_();
2368 return out;
2369 }
2370
2371 void normalize_();
2372 boost::intrusive_ptr<UniTensor_base> normalize() {
2373 //[21 Aug 2024] This is a copy from BlockUniTensor;
2374 boost::intrusive_ptr<UniTensor_base> out = this->clone();
2375 out->normalize_();
2376 return out;
2377 }
2378
2379 boost::intrusive_ptr<UniTensor_base> Dagger() {
2380 //[21 Aug 2024] This is a copy from BlockUniTensor;
2381 boost::intrusive_ptr<UniTensor_base> out = this->Conj();
2382 out->Transpose_();
2383 return out;
2384 }
2385 void Dagger_() {
2386 //[21 Aug 2024] This is a copy from BlockUniTensor;
2387 this->Conj_();
2388 this->Transpose_();
2389 }
2390
2391 void Trace_(const std::string &a, const std::string &b);
2392 void Trace_(const cytnx_int64 &a, const cytnx_int64 &b);
2393
2394 boost::intrusive_ptr<UniTensor_base> Trace(const std::string &a, const std::string &b) {
2395 //[21 Aug 2024] This is a copy from BlockUniTensor; the tensor type was adapted
2396 boost::intrusive_ptr<UniTensor_base> out = this->clone();
2397 out->Trace_(a, b);
2398 if (out->rank() == 0) {
2399 DenseUniTensor *tmp = new DenseUniTensor();
2400 tmp->_block = ((BlockFermionicUniTensor *)out.get())->_blocks[0];
2401 out = boost::intrusive_ptr<UniTensor_base>(tmp);
2402 }
2403 return out;
2404 }
2405 boost::intrusive_ptr<UniTensor_base> Trace(const cytnx_int64 &a, const cytnx_int64 &b) {
2406 //[21 Aug 2024] This is a copy from BlockUniTensor; the tensor type was adapted
2407 boost::intrusive_ptr<UniTensor_base> out = this->clone();
2408 out->Trace_(a, b);
2409 if (out->rank() == 0) {
2410 DenseUniTensor *tmp = new DenseUniTensor();
2411 tmp->_block = ((BlockFermionicUniTensor *)out.get())->_blocks[0];
2412 out = boost::intrusive_ptr<UniTensor_base>(tmp);
2413 }
2414 return out;
2415 }
2416
2417 Tensor Norm() const;
2418
2419 bool elem_exists(const std::vector<cytnx_uint64> &locator) const;
2420
2421 const Scalar::Sproxy at_for_sparse(const std::vector<cytnx_uint64> &locator) const;
2422 const cytnx_complex128 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
2423 const cytnx_complex128 &aux) const;
2424 const cytnx_complex64 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
2425 const cytnx_complex64 &aux) const;
2426 const cytnx_double &at_for_sparse(const std::vector<cytnx_uint64> &locator,
2427 const cytnx_double &aux) const;
2428 const cytnx_float &at_for_sparse(const std::vector<cytnx_uint64> &locator,
2429 const cytnx_float &aux) const;
2430 const cytnx_uint64 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
2431 const cytnx_uint64 &aux) const;
2432 const cytnx_int64 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
2433 const cytnx_int64 &aux) const;
2434 const cytnx_uint32 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
2435 const cytnx_uint32 &aux) const;
2436 const cytnx_int32 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
2437 const cytnx_int32 &aux) const;
2438 const cytnx_uint16 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
2439 const cytnx_uint16 &aux) const;
2440 const cytnx_int16 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
2441 const cytnx_int16 &aux) const;
2442
2443 Scalar::Sproxy at_for_sparse(const std::vector<cytnx_uint64> &locator);
2444 cytnx_complex128 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
2445 const cytnx_complex128 &aux);
2446 cytnx_complex64 &at_for_sparse(const std::vector<cytnx_uint64> &locator,
2447 const cytnx_complex64 &aux);
2448 cytnx_double &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_double &aux);
2449 cytnx_float &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_float &aux);
2450 cytnx_uint64 &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_uint64 &aux);
2451 cytnx_int64 &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_int64 &aux);
2452 cytnx_uint32 &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_uint32 &aux);
2453 cytnx_int32 &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_int32 &aux);
2454 cytnx_uint16 &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_uint16 &aux);
2455 cytnx_int16 &at_for_sparse(const std::vector<cytnx_uint64> &locator, const cytnx_int16 &aux);
2456
2457 void _save_dispatch(std::fstream &f) const;
2458 void _load_dispatch(std::fstream &f);
2459
2460 // this will remove the [q_index]-th qnum at [bond_idx]-th Bond!
2461 void truncate_(const std::string &label, const cytnx_uint64 &q_index);
2462 void truncate_(const cytnx_int64 &bond_idx, const cytnx_uint64 &q_index);
2463
2464 void Add_(const boost::intrusive_ptr<UniTensor_base> &rhs);
2465 void Add_(const Scalar &rhs) {
2466 cytnx_error_msg(true,
2467 "[ERROR] cannot perform elementwise arithmetic '+' between Scalar and "
2468 "BlockFermionicUniTensor.\n %s "
2469 "\n",
2470 "This operation would destroy the block structure. [Suggest] Avoid or use "
2471 "get/set_block(s) to do operation on blocks.");
2472 }
2473
2474 void Mul_(const boost::intrusive_ptr<UniTensor_base> &rhs);
2475 void Mul_(const Scalar &rhs);
2476
2477 void Sub_(const boost::intrusive_ptr<UniTensor_base> &rhs);
2478 void Sub_(const Scalar &rhs) {
2479 cytnx_error_msg(true,
2480 "[ERROR] cannot perform elementwise arithmetic '-' between Scalar and "
2481 "BlockFermionicUniTensor.\n %s "
2482 "\n",
2483 "This operation would destroy the block structure. [Suggest] Avoid or use "
2484 "get/set_block(s) to do operation on blocks.");
2485 }
2486 void lSub_(const Scalar &lhs) {
2487 cytnx_error_msg(true,
2488 "[ERROR] cannot perform elementwise arithmetic '-' between Scalar and "
2489 "BlockFermionicUniTensor.\n %s "
2490 "\n",
2491 "This operation would destroy the block structure. [Suggest] Avoid or use "
2492 "get/set_block(s) to do operation on blocks.");
2493 }
2494
2495 void Div_(const boost::intrusive_ptr<UniTensor_base> &rhs);
2496 void Div_(const Scalar &rhs);
2497 void lDiv_(const Scalar &lhs) {
2498 cytnx_error_msg(true,
2499 "[ERROR] cannot perform elementwise arithmetic '/' between Scalar and "
2500 "BlockFermionicUniTensor.\n %s "
2501 "\n",
2502 "This operation would cause division by zero on non-block elements. "
2503 "[Suggest] Avoid or use get/set_block(s) to do operation on blocks.");
2504 }
2505 void from_(const boost::intrusive_ptr<UniTensor_base> &rhs, const bool &force);
2506
2507 void group_basis_();
2508
2509 void combineBond(const std::vector<std::string> &indicators, const bool &force = false);
2510 void combineBonds(const std::vector<cytnx_int64> &indicators, const bool &force = false);
2511 void combineBonds(const std::vector<cytnx_int64> &indicators, const bool &force,
2512 const bool &by_label);
2513 void combineBonds(const std::vector<std::string> &indicators, const bool &force = false);
2514
2515 const std::vector<cytnx_uint64> &get_qindices(const cytnx_uint64 &bidx) const {
2516 //[21 Aug 2024] This is a copy from BlockUniTensor;
2518 bidx >= this->Nblocks(),
2519 "[ERROR][BlockFermionicUniTensor] bidx out of bound! only %d blocks in current UTen.\n",
2520 this->Nblocks());
2521 return this->_inner_to_outer_idx[bidx];
2522 }
2523 std::vector<cytnx_uint64> &get_qindices(const cytnx_uint64 &bidx) {
2524 //[21 Aug 2024] This is a copy from BlockUniTensor;
2526 bidx >= this->Nblocks(),
2527 "[ERROR][BlockFermionicUniTensor] bidx out of bound! only %d blocks in current UTen.\n",
2528 this->Nblocks());
2529 return this->_inner_to_outer_idx[bidx];
2530 }
2531
2532 const vec2d<cytnx_uint64> &get_itoi() const {
2533 //[21 Aug 2024] This is a copy from BlockUniTensor;
2534 return this->_inner_to_outer_idx;
2535 }
2536 vec2d<cytnx_uint64> &get_itoi() {
2537 //[21 Aug 2024] This is a copy from BlockUniTensor;
2538 return this->_inner_to_outer_idx;
2539 }
2540
2541 void beauty_print_block(std::ostream &os, const cytnx_uint64 &Nin, const cytnx_uint64 &Nout,
2542 const std::vector<cytnx_uint64> &qn_indices,
2543 const std::vector<Bond> &bonds, const Tensor &block) const;
2544 };
2546
2547 //======================================================================
2548
2550 class UniTensor_options {
2551 public:
2552 bool _is_diag;
2553 int _dtype;
2554 int _device;
2555 int _rowrank;
2556
2557 UniTensor_options() {
2558 this->_is_diag = false;
2559 this->_dtype = Type.Double;
2560 this->_device = Device.cpu;
2561 this->_rowrank = -1;
2562 }
2563
2564 UniTensor_options(const UniTensor_options &rhs) {
2565 this->_is_diag = rhs._is_diag;
2566 this->_dtype = rhs._dtype;
2567 this->_device = rhs._device;
2568 this->_rowrank = rhs._rowrank;
2569 }
2570
2571 UniTensor_options &operator=(const UniTensor_options &rhs) {
2572 this->_is_diag = rhs._is_diag;
2573 this->_dtype = rhs._dtype;
2574 this->_device = rhs._device;
2575 this->_rowrank = rhs._rowrank;
2576 return *this;
2577 }
2578
2579 UniTensor_options &is_diag(const bool &in) {
2580 this->_is_diag = in;
2581 return *this;
2582 }
2583 UniTensor_options &dtype(const int &in) {
2584 this->_dtype = in;
2585 return *this;
2586 }
2587 UniTensor_options &device(const int &in) {
2588 this->_device = in;
2589 return *this;
2590 }
2591 UniTensor_options &rowrank(const int &in) {
2592 this->_rowrank = in;
2593 return *this;
2594 }
2595 };
2597
2600 public:
2602 boost::intrusive_ptr<UniTensor_base> _impl;
2603 UniTensor() : _impl(new UniTensor_base()){};
2604 UniTensor(const UniTensor &rhs) { this->_impl = rhs._impl; }
2605 UniTensor &operator=(const UniTensor &rhs) {
2606 this->_impl = rhs._impl;
2607 return *this;
2608 }
2610
2612
2638 explicit UniTensor(const Tensor &in_tensor, const bool &is_diag = false,
2639 const cytnx_int64 &rowrank = -1,
2640 const std::vector<std::string> &in_labels = {}, const std::string &name = "")
2641 : _impl(new UniTensor_base()) {
2642 this->Init(in_tensor, is_diag, rowrank, in_labels, name);
2643 }
2661 void Init(const Tensor &in_tensor, const bool &is_diag = false, const cytnx_int64 &rowrank = -1,
2662 const std::vector<std::string> &in_labels = {}, const std::string &name = "") {
2663 // std::cout << "[entry!]" << std::endl;
2664 boost::intrusive_ptr<UniTensor_base> out(new DenseUniTensor());
2665 out->Init_by_Tensor(in_tensor, is_diag, rowrank, name);
2666 this->_impl = out;
2667 if (in_labels.size() != 0) this->set_labels(in_labels);
2668 }
2670
2672
2689 UniTensor(const std::vector<Bond> &bonds, const std::vector<std::string> &in_labels = {},
2690 const cytnx_int64 &rowrank = -1, const unsigned int &dtype = Type.Double,
2691 const int &device = Device.cpu, const bool &is_diag = false,
2692 const std::string &name = "")
2693 : _impl(new UniTensor_base()) {
2694 #ifdef UNI_DEBUG
2696 true,
2697 "[DEBUG] message: entry for UniTensor(const std::vector<Bond> &bonds, const "
2698 "std::vector<std::string> &in_labels={}, const cytnx_int64 &rowrank=-1, const unsigned "
2699 "int "
2700 "&dtype=Type.Double, const int &device = Device.cpu, const bool &is_diag=false)%s",
2701 "\n");
2702 #endif
2703 this->Init(bonds, in_labels, rowrank, dtype, device, is_diag, name);
2704 }
2705
2707 /* [developing]
2708 void Init(const std::vector<Bond> &bonds, const std::vector<std::string> &in_labels = {},
2709 const UniTensor_options &UToptions = UniTensor_options(), const std::string &name =
2710 ""){ this->Init(bonds,in_labels, UToptions._rowrank, UToptions._dtype , UToptions._device ,
2711 UToptions._is_diag,
2712 name);
2713 }
2714 */
2716
2740 void Init(const std::vector<Bond> &bonds, const std::vector<std::string> &in_labels = {},
2741 const cytnx_int64 &rowrank = -1, const unsigned int &dtype = Type.Double,
2742 const int &device = Device.cpu, const bool &is_diag = false,
2743 const std::string &name = "") {
2744 // checking type:
2745 bool is_sym = false;
2746 int sym_fver = -1;
2747 bool fermionic = false;
2748
2749 for (cytnx_uint64 i = 0; i < bonds.size(); i++) {
2750 // check
2751 if (bonds[i].syms().size() != 0) {
2752 is_sym = true;
2753 if (sym_fver == -1)
2754 sym_fver = bonds[i]._impl->_degs.size();
2755 else {
2756 // std::cout << sym_fver << " " <<
2757 // bonds[i]._impl->_degs.size() << std::endl;
2758 cytnx_error_msg((bool(sym_fver) ^ bool(bonds[i]._impl->_degs.size())),
2759 "[ERROR] When initializing a UniTensor with symmetries, all Bonds must "
2760 "be in the same format!%s",
2761 "\n");
2762 }
2763 if (!fermionic) {
2764 std::vector<Symmetry> symms = bonds[i].syms();
2765 for (cytnx_uint64 i = 0; i < symms.size(); i++) {
2766 if (symms[i].is_fermionic()) fermionic = true;
2767 }
2768 }
2769 } else
2771 is_sym, "[ERROR] cannot have bonds with mixing of symmetry and non-symmetry.%s", "\n");
2772 }
2773
2774 // dynamical dispatch:
2775 if (is_sym) {
2776 #ifdef UNI_DEBUG
2777 cytnx_warning_msg(true, "[DEBUG] message: entry dispatch: UniTensor: symmetric%s", "\n");
2778 #endif
2779 // cytnx_warning_msg(true,"[warning, still developing, some functions will display
2780 // \"[Developing]\"][SparseUniTensor]%s","\n");
2781 if (sym_fver == 0) {
2782 // boost::intrusive_ptr<UniTensor_base> out(new SparseUniTensor());
2783 // this->_impl = out;
2784 cytnx_error_msg(true,
2785 "[ERROR] internal error! [legacy Sparse entry] the Bond is symmetry but "
2786 "the version is not properly determined!%s",
2787 "\n")
2788 } else if (sym_fver == -1) {
2789 cytnx_error_msg(true,
2790 "[ERROR] internal error! the Bond is symmetry but the version is not "
2791 "properly determined!%s",
2792 "\n");
2793 } else {
2794 if (fermionic) {
2795 boost::intrusive_ptr<UniTensor_base> out(new BlockFermionicUniTensor());
2796 this->_impl = out;
2797 } else {
2798 boost::intrusive_ptr<UniTensor_base> out(new BlockUniTensor());
2799 this->_impl = out;
2800 }
2801 }
2802 } else {
2803 boost::intrusive_ptr<UniTensor_base> out(new DenseUniTensor());
2804 this->_impl = out;
2805 }
2806 this->_impl->Init(bonds, in_labels, rowrank, dtype, device, is_diag, false, name);
2807 }
2808
2815 UniTensor &set_name(const std::string &in) {
2816 this->_impl->set_name(in);
2817 return *this;
2818 }
2819
2830 UniTensor &set_label(const cytnx_int64 &idx, const std::string &new_label) {
2831 this->_impl->set_label(idx, new_label);
2832 return *this;
2833 }
2834
2839 this->_impl->set_label(idx, std::string(new_label));
2840 return *this;
2841 }
2842
2853 UniTensor &set_label(const std::string &old_label, const std::string &new_label) {
2854 this->_impl->set_label(old_label, new_label);
2855 return *this;
2856 }
2857
2861 UniTensor &set_label(const char *old_label, const std::string &new_label) {
2862 this->_impl->set_label(std::string(old_label), new_label);
2863 return *this;
2864 }
2865
2869 UniTensor &set_label(const std::string &old_label, const char *new_label) {
2870 this->_impl->set_label(old_label, std::string(new_label));
2871 return *this;
2872 }
2873
2877 UniTensor &set_label(const char *old_label, const char *new_label) {
2878 this->_impl->set_label(std::string(old_label), std::string(new_label));
2879 return *this;
2880 }
2881
2882 /*
2883 UniTensor& change_label(const cytnx_int64 &old_label, const cytnx_int64 &new_label){
2884 this->_impl->change_label(old_label,new_label);
2885 return *this;
2886 }
2887 */
2888
2898 UniTensor &set_labels(const std::vector<std::string> &new_labels) {
2899 this->_impl->set_labels(new_labels);
2900 return *this;
2901 }
2902
2907 UniTensor &set_labels(const std::initializer_list<char *> &new_labels) {
2908 std::vector<char *> new_lbls(new_labels);
2909 std::vector<std::string> vs(new_lbls.size());
2910 transform(new_lbls.begin(), new_lbls.end(), vs.begin(),
2911 [](char *x) -> std::string { return std::string(x); });
2912
2913 this->_impl->set_labels(vs);
2914 return *this;
2915 }
2916
2924 this->_impl->set_rowrank_(new_rowrank);
2925 return *this;
2926 }
2927
2929 UniTensor out;
2930 out._impl = this->_impl->set_rowrank(new_rowrank);
2931 return out;
2932 }
2933
2934 template <class T>
2935 T &item() {
2937 "[ERROR] cannot use item on UniTensor with Symmetry.\n suggestion: use "
2938 "get_block()/get_blocks() first.%s",
2939 "\n");
2940
2941 DenseUniTensor *tmp = static_cast<DenseUniTensor *>(this->_impl.get());
2942 return tmp->_block.item<T>();
2943 }
2944
2945 Scalar::Sproxy item() const {
2947 "[ERROR] cannot use item on UniTensor with Symmetry.\n suggestion: use "
2948 "get_block()/get_blocks() first.%s",
2949 "\n");
2950
2951 DenseUniTensor *tmp = static_cast<DenseUniTensor *>(this->_impl.get());
2952 return tmp->_block.item();
2953 }
2958 cytnx_uint64 Nblocks() const { return this->_impl->Nblocks(); }
2959
2964 cytnx_uint64 rank() const { return this->_impl->rank(); }
2965
2970 cytnx_uint64 rowrank() const { return this->_impl->rowrank(); }
2971
2977 unsigned int dtype() const { return this->_impl->dtype(); }
2978
2985 int uten_type() const { return this->_impl->uten_type(); }
2986
2992 int device() const { return this->_impl->device(); }
2993
2998 std::string name() const { return this->_impl->name(); }
2999
3005 std::string dtype_str() const { return this->_impl->dtype_str(); }
3006
3012 std::string device_str() const { return this->_impl->device_str(); }
3013
3019 std::string uten_type_str() const { return this->_impl->uten_type_str(); }
3020
3026 bool is_contiguous() const { return this->_impl->is_contiguous(); }
3027
3032 bool is_diag() const { return this->_impl->is_diag(); }
3033
3039 bool is_tag() const { return this->_impl->is_tag(); }
3040
3046 std::vector<Symmetry> syms() const { return this->_impl->syms(); }
3047
3054 const bool &is_braket_form() const { return this->_impl->is_braket_form(); }
3055
3060 const std::vector<std::string> &labels() const { return this->_impl->labels(); }
3067 cytnx_int64 get_index(std::string label) const { return this->_impl->get_index(label); }
3068
3073 const std::vector<Bond> &bonds() const { return this->_impl->bonds(); }
3074
3078 std::vector<Bond> &bonds() { return this->_impl->bonds(); }
3079
3080 const Bond &bond_(const cytnx_uint64 &idx) const { return this->_impl->bond_(idx); }
3081 Bond &bond_(const cytnx_uint64 &idx) { return this->_impl->bond_(idx); }
3082
3083 const Bond &bond_(const std::string &label) const { return this->_impl->bond_(label); }
3084 Bond &bond_(const std::string &label) { return this->_impl->bond_(label); }
3085
3086 Bond bond(const cytnx_uint64 &idx) const { return this->_impl->bond_(idx).clone(); }
3087 Bond bond(const std::string &label) const { return this->_impl->bond_(label).clone(); }
3088
3093 std::vector<cytnx_uint64> shape() const { return this->_impl->shape(); }
3094
3101 std::vector<bool> signflip() const { return this->_impl->signflip(); }
3102
3111 std::vector<bool> &signflip_() { return this->_impl->signflip_(); }
3112
3118 bool is_blockform() const { return this->_impl->is_blockform(); }
3119
3126 void to_(const int &device) { this->_impl->to_(device); }
3127
3137 UniTensor to(const int &device) const {
3138 UniTensor out;
3139 out._impl = this->_impl->to(device);
3140 return out;
3141 }
3142
3148 UniTensor out;
3149 out._impl = this->_impl->clone();
3150 return out;
3151 }
3152
3162 UniTensor &relabel_(const std::vector<std::string> &new_labels) {
3163 this->_impl->relabel_(new_labels);
3164 return *this;
3165 }
3172 [[deprecated(
3173 "Please use "
3174 "UniTensor &relabel_(const std::vector<std::string> &old_labels, const "
3175 "std::vector<std::string> &new_labels) "
3176 "instead.")]] UniTensor &
3177 relabels_(const std::vector<std::string> &new_labels) {
3178 this->_impl->relabels_(new_labels);
3179 return *this;
3180 }
3181
3193 UniTensor relabel(const std::vector<std::string> &new_labels) const {
3194 UniTensor out;
3195 out._impl = this->_impl->relabel(new_labels);
3196 return out;
3197 }
3203 [[deprecated(
3204 "Please use "
3205 "UniTensor relabel(const std::vector<std::string> &new_labels) const "
3206 "instead.")]] UniTensor
3207 relabels(const std::vector<std::string> &new_labels) const {
3208 UniTensor out;
3209 out._impl = this->_impl->relabels(new_labels);
3210 return out;
3211 }
3212
3217 UniTensor relabel(const std::initializer_list<char *> &new_labels) const {
3218 std::vector<char *> new_lbls(new_labels);
3219 std::vector<std::string> vs(new_lbls.size());
3220 transform(new_lbls.begin(), new_lbls.end(), vs.begin(),
3221 [](char *x) -> std::string { return std::string(x); });
3222
3223 UniTensor out;
3224 out._impl = this->_impl->relabel(vs);
3225 return out;
3226 }
3232 [[deprecated(
3233 "Please use "
3234 "UniTensor relabel(const std::initializer_list<char *> &new_labels) const "
3235 "instead.")]] UniTensor
3236 relabels(const std::initializer_list<char *> &new_labels) const {
3237 std::vector<char *> new_lbls(new_labels);
3238 std::vector<std::string> vs(new_lbls.size());
3239 transform(new_lbls.begin(), new_lbls.end(), vs.begin(),
3240 [](char *x) -> std::string { return std::string(x); });
3241
3242 UniTensor out;
3243 out._impl = this->_impl->relabels(vs);
3244 return out;
3245 }
3249 UniTensor &relabel_(const std::initializer_list<char *> &new_labels) {
3250 std::vector<char *> new_lbls(new_labels);
3251 std::vector<std::string> vs(new_lbls.size());
3252 transform(new_lbls.begin(), new_lbls.end(), vs.begin(),
3253 [](char *x) -> std::string { return std::string(x); });
3254
3255 this->_impl->relabel_(vs);
3256 return *this;
3257 }
3263 [[deprecated(
3264 "Please use "
3265 "UniTensor &relabel_(const std::initializer_list<char *> &new_labels) "
3266 "instead.")]] UniTensor &
3267 relabels_(const std::initializer_list<char *> &new_labels) {
3268 std::vector<char *> new_lbls(new_labels);
3269 std::vector<std::string> vs(new_lbls.size());
3270 transform(new_lbls.begin(), new_lbls.end(), vs.begin(),
3271 [](char *x) -> std::string { return std::string(x); });
3272
3273 this->_impl->relabels_(vs);
3274 return *this;
3275 }
3276
3284 UniTensor relabel(const std::vector<std::string> &old_labels,
3285 const std::vector<std::string> &new_labels) const {
3286 UniTensor out;
3287 out._impl = this->_impl->relabel(old_labels, new_labels);
3288 return out;
3289 }
3295 [[deprecated(
3296 "Please use "
3297 "UniTensor relabel(const std::vector<std::string> &old_labels, const "
3298 "std::vector<std::string> &new_labels) const "
3299 "instead.")]] UniTensor
3300 relabels(const std::vector<std::string> &old_labels,
3301 const std::vector<std::string> &new_labels) const {
3302 UniTensor out;
3303 out._impl = this->_impl->relabels(old_labels, new_labels);
3304 return out;
3305 }
3306
3322 UniTensor &relabel_(const std::vector<std::string> &old_labels,
3323 const std::vector<std::string> &new_labels) {
3324 this->_impl->relabel_(old_labels, new_labels);
3325 return *this;
3326 }
3332 [[deprecated(
3333 "Please use "
3334 "UniTensor &relabel_(const std::vector<std::string> &old_labels, const "
3335 "std::vector<std::string> &new_labels) "
3336 "instead.")]] UniTensor &
3337 relabels_(const std::vector<std::string> &old_labels,
3338 const std::vector<std::string> &new_labels) {
3339 this->_impl->relabels_(old_labels, new_labels);
3340 return *this;
3341 }
3342
3347 UniTensor relabel(const std::initializer_list<char *> &old_labels,
3348 const std::initializer_list<char *> &new_labels) const {
3349 std::vector<char *> new_lbls(new_labels);
3350 std::vector<std::string> vs(new_lbls.size());
3351 transform(new_lbls.begin(), new_lbls.end(), vs.begin(),
3352 [](char *x) -> std::string { return std::string(x); });
3353
3354 std::vector<char *> old_lbls(old_labels);
3355 std::vector<std::string> vs_old(old_lbls.size());
3356 transform(old_lbls.begin(), old_lbls.end(), vs_old.begin(),
3357 [](char *x) -> std::string { return std::string(x); });
3358
3359 return this->relabel(vs_old, vs);
3360 }
3361
3367 [[deprecated(
3368 "Please use "
3369 "UniTensor relabel(const std::initializer_list<char *> &old_labels, const "
3370 "std::initializer_list<char *> &new_labels) const "
3371 "instead.")]] UniTensor
3372 relabels(const std::initializer_list<char *> &old_labels,
3373 const std::initializer_list<char *> &new_labels) const {
3374 std::vector<char *> new_lbls(new_labels);
3375 std::vector<std::string> vs(new_lbls.size());
3376 transform(new_lbls.begin(), new_lbls.end(), vs.begin(),
3377 [](char *x) -> std::string { return std::string(x); });
3378
3379 std::vector<char *> old_lbls(old_labels);
3380 std::vector<std::string> vs_old(old_lbls.size());
3381 transform(old_lbls.begin(), old_lbls.end(), vs_old.begin(),
3382 [](char *x) -> std::string { return std::string(x); });
3383
3384 return this->relabels(vs_old, vs);
3385 }
3386
3391 UniTensor &relabel_(const std::initializer_list<char *> &old_labels,
3392 const std::initializer_list<char *> &new_labels) {
3393 std::vector<char *> new_lbls(new_labels);
3394 std::vector<std::string> vs(new_lbls.size());
3395 transform(new_lbls.begin(), new_lbls.end(), vs.begin(),
3396 [](char *x) -> std::string { return std::string(x); });
3397
3398 std::vector<char *> old_lbls(old_labels);
3399 std::vector<std::string> vs_old(old_lbls.size());
3400 transform(old_lbls.begin(), old_lbls.end(), vs_old.begin(),
3401 [](char *x) -> std::string { return std::string(x); });
3402
3403 this->relabel_(vs_old, vs);
3404 return *this;
3405 }
3411 [[deprecated(
3412 "Please use "
3413 "UniTensor &relabel_(const std::initializer_list<char *> &old_labels, const "
3414 "std::initializer_list<char *> &new_labels) "
3415 "instead.")]] UniTensor &
3416 relabels_(const std::initializer_list<char *> &old_labels,
3417 const std::initializer_list<char *> &new_labels) {
3418 std::vector<char *> new_lbls(new_labels);
3419 std::vector<std::string> vs(new_lbls.size());
3420 transform(new_lbls.begin(), new_lbls.end(), vs.begin(),
3421 [](char *x) -> std::string { return std::string(x); });
3422
3423 std::vector<char *> old_lbls(old_labels);
3424 std::vector<std::string> vs_old(old_lbls.size());
3425 transform(old_lbls.begin(), old_lbls.end(), vs_old.begin(),
3426 [](char *x) -> std::string { return std::string(x); });
3427
3428 this->relabels_(vs_old, vs);
3429 return *this;
3430 }
3431
3446 UniTensor relabel(const cytnx_int64 &inx, const std::string &new_label) const {
3447 UniTensor out;
3448 out._impl = this->_impl->relabel(inx, new_label);
3449 return out;
3450 }
3459 UniTensor &relabel_(const cytnx_int64 &inx, const std::string &new_label) {
3460 this->_impl->relabel_(inx, new_label);
3461 return *this;
3462 }
3463
3472 UniTensor &relabel_(const std::string &old_label, const std::string &new_label) {
3473 this->_impl->relabel_(old_label, new_label);
3474 return *this;
3475 }
3476
3491 UniTensor relabel(const std::string &old_label, const std::string &new_label) const {
3492 UniTensor out;
3493 out._impl = this->_impl->relabel(old_label, new_label);
3494 return out;
3495 }
3496
3503 UniTensor astype(const unsigned int &dtype) const {
3504 UniTensor out;
3505 if (this->dtype() == dtype) {
3506 out._impl = this->_impl;
3507 } else {
3508 out._impl = this->_impl->astype(dtype);
3509 }
3510 return out;
3511 }
3512
3521 UniTensor permute(const std::vector<cytnx_int64> &mapper,
3522 const cytnx_int64 &rowrank = -1) const {
3523 UniTensor out;
3524 out._impl = this->_impl->permute(mapper, rowrank);
3525 return out;
3526 }
3527
3534 UniTensor permute(const std::vector<std::string> &mapper,
3535 const cytnx_int64 &rowrank = -1) const {
3536 UniTensor out;
3537 out._impl = this->_impl->permute(mapper, rowrank);
3538 return out;
3539 }
3540
3544 UniTensor permute(const std::initializer_list<char *> &mapper,
3545 const cytnx_int64 &rowrank = -1) const {
3546 std::vector<char *> mprs = mapper;
3547 std::vector<std::string> vs(mprs.size());
3548 transform(mprs.begin(), mprs.end(), vs.begin(),
3549 [](char *x) -> std::string { return std::string(x); });
3550
3551 return this->permute(vs, rowrank);
3552 }
3553
3563 [[deprecated(
3564 "Please use "
3565 "UniTensor &permute_(const std::vector<std::string> &mapper, const cytnx_int64 &rowrank) "
3566 "instead.")]] UniTensor &
3567 permute_(const std::vector<cytnx_int64> &mapper, const cytnx_int64 &rowrank = -1) {
3568 this->_impl->permute_(mapper, rowrank);
3569 return *this;
3570 }
3571
3578 UniTensor &permute_(const std::vector<std::string> &mapper, const cytnx_int64 &rowrank = -1) {
3579 this->_impl->permute_(mapper, rowrank);
3580 return *this;
3581 }
3582
3594 UniTensor permute_nosignflip(const std::vector<cytnx_int64> &mapper,
3595 const cytnx_int64 &rowrank = -1) const {
3596 UniTensor out;
3597 out._impl = this->_impl->permute_nosignflip(mapper, rowrank);
3598 return out;
3599 }
3600
3610 UniTensor permute_nosignflip(const std::vector<std::string> &mapper,
3611 const cytnx_int64 &rowrank = -1) const {
3612 UniTensor out;
3613 out._impl = this->_impl->permute_nosignflip(mapper, rowrank);
3614 return out;
3615 }
3616
3621 UniTensor permute_nosignflip(const std::initializer_list<char *> &mapper,
3622 const cytnx_int64 &rowrank = -1) const {
3623 std::vector<char *> mprs = mapper;
3624 std::vector<std::string> vs(mprs.size());
3625 transform(mprs.begin(), mprs.end(), vs.begin(),
3626 [](char *x) -> std::string { return std::string(x); });
3627
3628 return this->permute_nosignflip(vs, rowrank);
3629 }
3630
3642 void permute_nosignflip_(const std::vector<cytnx_int64> &mapper,
3643 const cytnx_int64 &rowrank = -1) {
3644 this->_impl->permute_nosignflip_(mapper, rowrank);
3645 }
3646
3656 void permute_nosignflip_(const std::vector<std::string> &mapper,
3657 const cytnx_int64 &rowrank = -1) {
3658 this->_impl->permute_nosignflip_(mapper, rowrank);
3659 }
3660
3661 // void permute_( const std::initializer_list<char*> &mapper, const cytnx_int64 &rowrank= -1){
3662 // std::vector<char*> mprs = mapper;
3663 // std::vector<std::string> vs(mprs.size());
3664 // transform(mprs.begin(),mprs.end(),vs.begin(),[](char * x) -> std::string { return
3665 // std::string(x); });
3666
3667 // this->permute_(vs,rowrank);
3668 // }
3669
3670 // void permute_(const std::vector<cytnx_int64> &mapper, const cytnx_int64 &rowrank = -1) {
3671 // this->_impl->permute_(mapper, rowrank);
3672 // }
3673
3679 UniTensor out;
3680 out._impl = this->_impl->contiguous();
3681 return out;
3682 }
3683
3688 void contiguous_() { this->_impl = this->_impl->contiguous_(); }
3689
3694 void print_diagram(const bool &bond_info = false) const {
3695 this->_impl->print_diagram(bond_info);
3696 }
3697
3702 void print_blocks(const bool &full_info = true) const { this->_impl->print_blocks(full_info); }
3703
3709 void print_block(const cytnx_int64 &idx, const bool &full_info = true) const {
3710 this->_impl->print_block(idx, full_info);
3711 }
3712
3719 void group_basis_() { this->_impl->group_basis_(); }
3720
3722 UniTensor out = this->clone();
3723 out.group_basis_();
3724 return out;
3725 }
3726
3732 template <class T>
3733 T &at(const std::vector<cytnx_uint64> &locator) {
3734 // std::cout << "at " << this->is_blockform() << std::endl;
3735 if (this->uten_type() == UTenType.Block || this->uten_type() == UTenType.BlockFermionic) {
3736 // [NEW] this will not check if it exists, if it is not then error will throw!
3737 T aux;
3738 return this->_impl->at_for_sparse(locator, aux);
3739
3740 } else if (this->uten_type() == UTenType.Sparse) {
3741 if (this->_impl->elem_exists(locator)) {
3742 T aux;
3743 return this->_impl->at_for_sparse(locator, aux);
3744 } else {
3745 cytnx_error_msg(true, "[ERROR][SparseUniTensor] invalid location. break qnum block.%s",
3746 "\n");
3747 }
3748 } else {
3749 return this->get_block_().at<T>(locator);
3750 }
3751 }
3752
3758 template <class T>
3759 const T &at(const std::vector<cytnx_uint64> &locator) const {
3760 // std::cout << "at " << this->is_blockform() << std::endl;
3761 if (this->uten_type() == UTenType.Block || this->uten_type() == UTenType.BlockFermionic) {
3762 // [NEW] this will not check if it exists, if it is not then error will throw!
3763 T aux;
3764 return this->_impl->at_for_sparse(locator, aux);
3765
3766 } else if (this->uten_type() == UTenType.Sparse) {
3767 if (this->_impl->elem_exists(locator)) {
3768 T aux; // [workaround] use aux to dispatch.
3769 return this->_impl->at_for_sparse(locator, aux);
3770 } else {
3771 cytnx_error_msg(true, "[ERROR][SparseUniTensor] invalid location. break qnum block.%s",
3772 "\n");
3773 }
3774 } else {
3775 return this->get_block_().at<T>(locator);
3776 }
3777 }
3778
3779 template <class T>
3780 const T &at(const std::vector<std::string> &labels,
3781 const std::vector<cytnx_uint64> &locator) const {
3782 // giving label <-> locator one to one corresponding, return the element:
3783 cytnx_error_msg(locator.size() != labels.size(),
3784 "[ERROR][at] length of list should be the same for label and locator.%s",
3785 "\n");
3787 labels.size() != this->rank(),
3788 "[ERROR][at] length of lists must be the same as UniTensor.rank (# of legs)%s", "\n");
3789 std::vector<cytnx_uint64> new_locator(this->rank());
3791 for (int i = 0; i < labels.size(); i++) {
3792 auto res = std::find(this->_impl->_labels.begin(), this->_impl->_labels.end(), labels[i]);
3793 cytnx_error_msg(res == this->_impl->_labels.end(),
3794 "[ERROR] label:%s does not exist in current UniTensor.\n",
3795 labels[i].c_str());
3796 new_loc = std::distance(this->_impl->_labels.begin(), res);
3798 }
3799 return this->at<T>(new_locator);
3800 }
3801 template <class T>
3802 T &at(const std::vector<std::string> &labels, const std::vector<cytnx_uint64> &locator) {
3803 // giving label <-> locator one to one corresponding, return the element:
3804 cytnx_error_msg(locator.size() != labels.size(),
3805 "[ERROR][at] length of list should be the same for label and locator.%s",
3806 "\n");
3808 labels.size() != this->rank(),
3809 "[ERROR][at] length of lists must be the same as UniTensor.rank (# of legs)%s", "\n");
3810 std::vector<cytnx_uint64> new_locator(this->rank());
3812 for (int i = 0; i < labels.size(); i++) {
3813 auto res = std::find(this->_impl->_labels.begin(), this->_impl->_labels.end(), labels[i]);
3814 cytnx_error_msg(res == this->_impl->_labels.end(),
3815 "[ERROR] label:%s does not exist in current UniTensor.\n",
3816 labels[i].c_str());
3817 new_loc = std::distance(this->_impl->_labels.begin(), res);
3819 }
3820 return this->at<T>(new_locator);
3821 }
3822
3827 const Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) const {
3828 if (this->uten_type() == UTenType.Block || this->uten_type() == UTenType.BlockFermionic) {
3829 return this->_impl->at_for_sparse(locator);
3830 } else if (this->uten_type() == UTenType.Sparse) {
3831 if (this->_impl->elem_exists(locator)) {
3832 return this->_impl->at_for_sparse(locator);
3833 } else {
3834 cytnx_error_msg(true, "[ERROR][SparseUniTensor] invalid location. break qnum block.%s",
3835 "\n");
3836 }
3837 } else {
3838 return this->get_block_().at(locator);
3839 }
3840 }
3841
3846 Scalar::Sproxy at(const std::vector<cytnx_uint64> &locator) {
3847 if (this->uten_type() == UTenType.Block || this->uten_type() == UTenType.BlockFermionic) {
3848 return this->_impl->at_for_sparse(locator);
3849 } else if (this->uten_type() == UTenType.Sparse) {
3850 if (this->_impl->elem_exists(locator)) {
3851 return this->_impl->at_for_sparse(locator);
3852 } else {
3853 cytnx_error_msg(true, "[ERROR][SparseUniTensor] invalid location. break qnum block.%s",
3854 "\n");
3855 }
3856 } else {
3857 return this->get_block_().at(locator);
3858 }
3859 }
3860
3861 Scalar::Sproxy at(const std::vector<std::string> &labels,
3862 const std::vector<cytnx_uint64> &locator) {
3863 // giving label <-> locator one to one corresponding, return the element:
3864 cytnx_error_msg(locator.size() != labels.size(),
3865 "[ERROR][at] length of list should be the same for label and locator.%s",
3866 "\n");
3868 labels.size() != this->rank(),
3869 "[ERROR][at] length of lists must be the same as UniTensor.rank (# of legs)%s", "\n");
3870 std::vector<cytnx_uint64> new_locator(this->rank());
3872 for (int i = 0; i < labels.size(); i++) {
3873 auto res = std::find(this->_impl->_labels.begin(), this->_impl->_labels.end(), labels[i]);
3874 cytnx_error_msg(res == this->_impl->_labels.end(),
3875 "[ERROR] label:%s does not exist in current UniTensor.\n",
3876 labels[i].c_str());
3877 new_loc = std::distance(this->_impl->_labels.begin(), res);
3879 }
3880 return this->at(new_locator);
3881 }
3882
3883 const Scalar::Sproxy at(const std::vector<std::string> &labels,
3884 const std::vector<cytnx_uint64> &locator) const {
3885 // giving label <-> locator one to one corresponding, return the element:
3886 cytnx_error_msg(locator.size() != labels.size(),
3887 "[ERROR][at] length of list should be the same for label and locator.%s",
3888 "\n");
3890 labels.size() != this->rank(),
3891 "[ERROR][at] length of lists must be the same as UniTensor.rank (# of legs)%s", "\n");
3892 std::vector<cytnx_uint64> new_locator(this->rank());
3894 for (int i = 0; i < labels.size(); i++) {
3895 auto res = std::find(this->_impl->_labels.begin(), this->_impl->_labels.end(), labels[i]);
3896 cytnx_error_msg(res == this->_impl->_labels.end(),
3897 "[ERROR] label:%s does not exist in current UniTensor.\n",
3898 labels[i].c_str());
3899 new_loc = std::distance(this->_impl->_labels.begin(), res);
3901 }
3902 return this->at(new_locator);
3903 }
3904
3905 // return a clone of block
3911 Tensor get_block(const cytnx_uint64 &idx = 0) const { return this->_impl->get_block(idx); };
3912 //================================
3913 // return a clone of block
3922 Tensor get_block(const std::vector<cytnx_int64> &qidx, const bool &force = false) const {
3923 return this->_impl->get_block(qidx, force);
3924 }
3925
3926 Tensor get_block(const std::vector<std::string> &labels, const std::vector<cytnx_int64> &qidx,
3927 const bool &force = false) const {
3929 labels.size() != qidx.size(),
3930 "[ERROR][get_block] length of lists must be the same for both lables and qnidices%s", "\n");
3931 cytnx_error_msg(labels.size() != this->rank(),
3932 "[ERROR][get_block] length of lists must be the rank (# of legs)%s", "\n");
3933
3934 std::vector<cytnx_int64> loc_id(this->rank());
3935 std::vector<cytnx_int64> new_qidx(this->rank());
3936
3938 std::vector<cytnx_uint64> new_order(this->rank());
3939 for (int i = 0; i < labels.size(); i++) {
3940 auto res = std::find(this->_impl->_labels.begin(), this->_impl->_labels.end(), labels[i]);
3941 cytnx_error_msg(res == this->_impl->_labels.end(),
3942 "[ERROR][get_block] label:%s does not exists in current Tensor.\n",
3943 labels[i].c_str());
3944 new_loc = std::distance(this->_impl->_labels.begin(), res);
3945 new_qidx[new_loc] = qidx[i];
3946 new_order[i] = new_loc;
3947 }
3948 auto out = this->_impl->get_block(new_qidx, force);
3949 if (out.dtype() != Type.Void) out.permute_(new_order);
3950 return out;
3951 }
3952
3957 Tensor get_block(const std::initializer_list<cytnx_int64> &qnum,
3958 const bool &force = false) const {
3959 std::vector<cytnx_int64> tmp = qnum;
3960 return get_block(tmp, force);
3961 }
3962
3967 Tensor get_block(const std::vector<cytnx_uint64> &qnum, const bool &force = false) const {
3968 std::vector<cytnx_int64> iqnum(qnum.begin(), qnum.end());
3969 return this->_impl->get_block(iqnum, force);
3970 }
3971
3972 Tensor get_block(const std::vector<std::string> &labels, const std::vector<cytnx_uint64> &qidx,
3973 const bool &force = false) const {
3974 std::vector<cytnx_int64> iqnum(qidx.begin(), qidx.end());
3975 return this->get_block(labels, iqnum, force);
3976 }
3977
3983 const Tensor &get_block_(const cytnx_uint64 &idx = 0) const {
3984 return this->_impl->get_block_(idx);
3985 }
3986
3991 Tensor &get_block_(const cytnx_uint64 &idx = 0) { return this->_impl->get_block_(idx); }
3992
4001 Tensor &get_block_(const std::vector<cytnx_int64> &qidx, const bool &force = false) {
4002 return this->_impl->get_block_(qidx, force);
4003 }
4004
4019 // developer note: Tensor is not the same object (Thus Tensor instead of Tensor& ),
4020 // since we permute! but they have shared data memory.
4021 Tensor get_block_(const std::vector<std::string> &labels, const std::vector<cytnx_int64> &qidx,
4022 const bool &force = false) {
4024 labels.size() != qidx.size(),
4025 "[ERROR][get_block] length of lists must be the same for both lables and qnidices%s", "\n");
4026 cytnx_error_msg(labels.size() != this->rank(),
4027 "[ERROR][get_block] length of lists must be the rank (# of legs)%s", "\n");
4028
4029 std::vector<cytnx_int64> loc_id(this->rank());
4030 std::vector<cytnx_int64> new_qidx(this->rank());
4031
4033 std::vector<cytnx_uint64> new_order(this->rank());
4034 for (int i = 0; i < labels.size(); i++) {
4035 auto res = std::find(this->_impl->_labels.begin(), this->_impl->_labels.end(), labels[i]);
4036 cytnx_error_msg(res == this->_impl->_labels.end(),
4037 "[ERROR][get_block] label:%s does not exists in current Tensor.\n",
4038 labels[i].c_str());
4039 new_loc = std::distance(this->_impl->_labels.begin(), res);
4040 new_qidx[new_loc] = qidx[i];
4041 new_order[i] = new_loc;
4042 }
4043 auto out = this->_impl->get_block_(new_qidx, force);
4044 if (out.dtype() != Type.Void) {
4045 out = out.permute(new_order);
4046 }
4047 return out;
4048 }
4049
4053 Tensor &get_block_(const std::initializer_list<cytnx_int64> &qidx, const bool &force = false) {
4054 std::vector<cytnx_int64> tmp = qidx;
4055 return get_block_(tmp, force);
4056 }
4057
4061 Tensor &get_block_(const std::vector<cytnx_uint64> &qidx, const bool &force = false) {
4062 std::vector<cytnx_int64> iqidx(qidx.begin(), qidx.end());
4063 return get_block_(iqidx, force);
4064 }
4065
4066 Tensor get_block_(const std::vector<std::string> &labels, const std::vector<cytnx_uint64> &qidx,
4067 const bool &force = false) {
4068 std::vector<cytnx_int64> iqidx(qidx.begin(), qidx.end());
4069 return get_block_(labels, iqidx, force);
4070 }
4071 //================================
4072
4073 // this only work for non-symm tensor. return a shared view of block
4077 const Tensor &get_block_(const std::vector<cytnx_int64> &qidx,
4078 const bool &force = false) const {
4079 return this->_impl->get_block_(qidx, force);
4080 }
4081
4085 const Tensor &get_block_(const std::initializer_list<cytnx_int64> &qidx,
4086 const bool &force = false) const {
4087 std::vector<cytnx_int64> tmp = qidx;
4088 return this->_impl->get_block_(tmp, force);
4089 }
4090
4094 const Tensor &get_block_(const std::vector<cytnx_uint64> &qidx,
4095 const bool &force = false) const {
4096 std::vector<cytnx_int64> iqidx(qidx.begin(), qidx.end());
4097 return get_block_(iqidx, force);
4098 }
4099
4100 //================================
4109 //[dev]
4110 std::vector<Tensor> get_blocks() const { return this->_impl->get_blocks(); }
4111
4117 //[dev]
4118 const std::vector<Tensor> &get_blocks_(const bool &silent = false) const {
4119 return this->_impl->get_blocks_(silent);
4120 }
4121
4125 //[dev]
4126 std::vector<Tensor> &get_blocks_(const bool &silent = false) {
4127 return this->_impl->get_blocks_(silent);
4128 }
4129
4135 void put_block(const Tensor &in, const cytnx_uint64 &idx = 0) {
4136 this->_impl->put_block(in, idx);
4137 }
4138
4146 void put_block(const Tensor &in_tens, const std::vector<cytnx_int64> &qidx, const bool &force) {
4147 this->_impl->put_block(in_tens, qidx, force);
4148 }
4149
4154 void put_block(Tensor &in, const std::vector<std::string> &lbls,
4155 const std::vector<cytnx_int64> &qidx, const bool &force = false) {
4157 lbls.size() != qidx.size(),
4158 "[ERROR][put_block] length of lists must be the same for both lables and qnidices%s", "\n");
4159 cytnx_error_msg(lbls.size() != this->rank(),
4160 "[ERROR][put_block] length of lists must be the rank (# of legs)%s", "\n");
4161
4162 std::vector<cytnx_int64> loc_id(this->rank());
4163 std::vector<cytnx_int64> new_qidx(this->rank());
4164
4166 // std::vector<cytnx_uint64> new_order(this->rank());
4167 std::vector<cytnx_uint64> inv_order(this->rank());
4168 for (int i = 0; i < lbls.size(); i++) {
4169 auto res = std::find(this->_impl->_labels.begin(), this->_impl->_labels.end(), lbls[i]);
4170 cytnx_error_msg(res == this->_impl->_labels.end(),
4171 "[ERROR][put_block] label:%s does not exists in current Tensor.\n",
4172 lbls[i].c_str());
4173 new_loc = std::distance(this->_impl->_labels.begin(), res);
4174 new_qidx[new_loc] = qidx[i];
4175 // new_order[i] = new_loc;
4176 inv_order[new_loc] = i;
4177 }
4178 this->_impl->put_block(in.permute(inv_order), new_qidx, force);
4179 }
4180
4186 void put_block_(Tensor &in, const cytnx_uint64 &idx = 0) { this->_impl->put_block_(in, idx); }
4187
4194 void put_block_(Tensor &in, const std::vector<cytnx_int64> &qidx, const bool &force) {
4195 this->_impl->put_block_(in, qidx, force);
4196 }
4197
4202 void put_block_(Tensor &in, const std::vector<std::string> &lbls,
4203 const std::vector<cytnx_int64> &qidx, const bool &force = false) {
4205 lbls.size() != qidx.size(),
4206 "[ERROR][put_block_] length of lists must be the same for both lables and qnidices%s",
4207 "\n");
4208 cytnx_error_msg(lbls.size() != this->rank(),
4209 "[ERROR][put_block_] length of lists must be the rank (# of legs)%s", "\n");
4210
4211 std::vector<cytnx_int64> loc_id(this->rank());
4212 std::vector<cytnx_int64> new_qidx(this->rank());
4213
4215 std::vector<cytnx_uint64> new_order(this->rank());
4216 std::vector<cytnx_uint64> inv_order(this->rank());
4217 for (int i = 0; i < lbls.size(); i++) {
4218 auto res = std::find(this->_impl->_labels.begin(), this->_impl->_labels.end(), lbls[i]);
4219 cytnx_error_msg(res == this->_impl->_labels.end(),
4220 "[ERROR][put_block_] label:%s does not exists in current Tensor.\n",
4221 lbls[i].c_str());
4222 new_loc = std::distance(this->_impl->_labels.begin(), res);
4223 new_qidx[new_loc] = qidx[i];
4224 new_order[i] = new_loc;
4225 inv_order[new_loc] = i;
4226 }
4227 in.permute_(inv_order);
4228 this->_impl->put_block_(in, new_qidx, force);
4229 in.permute_(new_order);
4230 }
4231 UniTensor get(const std::vector<Accessor> &accessors) const {
4232 UniTensor out;
4233 out._impl = this->_impl->get(accessors);
4234 return out;
4235 }
4236 void set(const std::vector<Accessor> &accessors, const Tensor &rhs) {
4237 this->_impl->set(accessors, rhs);
4238 }
4239 void set(const std::vector<Accessor> &accessors, const UniTensor &rhs) {
4241 rhs.uten_type() != UTenType.Dense,
4242 "[ERROR] cannot set elements from UniTensor with symmetry. Use at() instead.%s", "\n");
4243 cytnx_error_msg(this->is_diag(), "[ERROR] cannot set UniTensor with is_diag=True.%s", "\n");
4245 "[ERROR] cannot set UniTensor. incoming UniTensor is_diag=True.%s", "\n");
4246
4247 this->_impl->set(accessors, rhs.get_block());
4248 }
4256 UniTensor reshape(const std::vector<cytnx_int64> &new_shape, const cytnx_uint64 &rowrank = 0) {
4257 UniTensor out;
4258 out._impl = this->_impl->reshape(new_shape, rowrank);
4259 return out;
4260 }
4261
4268 UniTensor &reshape_(const std::vector<cytnx_int64> &new_shape,
4269 const cytnx_uint64 &rowrank = 0) {
4270 this->_impl->reshape_(new_shape, rowrank);
4271 return *this;
4272 }
4273
4286 UniTensor out;
4287 out._impl = this->_impl->to_dense();
4288 return out;
4289 }
4290
4295 void to_dense_() { this->_impl->to_dense_(); }
4296
4302 [[deprecated(
4303 "Please use "
4304 "combineBond(const std::vector<std::string> &indicators, const bool &force) "
4305 "instead.")]] void
4306 combineBonds(const std::vector<cytnx_int64> &indicators, const bool &force,
4307 const bool &by_label) {
4308 this->_impl->combineBonds(indicators, force, by_label);
4309 }
4310
4316 [[deprecated(
4317 "Please use "
4318 "combineBond(const std::vector<std::string> &indicators, const bool &force) "
4319 "instead.")]] void
4320 combineBonds(const std::vector<std::string> &indicators, const bool &force = false) {
4321 this->_impl->combineBonds(indicators, force);
4322 }
4323
4329 [[deprecated(
4330 "Please use "
4331 "combineBond(const std::vector<std::string> &indicators, const bool &force) "
4332 "instead.")]] void
4333 combineBonds(const std::vector<cytnx_int64> &indicators, const bool &force = false) {
4334 this->_impl->combineBonds(indicators, force);
4335 }
4336
4347 void combineBond(const std::vector<std::string> &indicators, const bool &force = false) {
4348 this->_impl->combineBond(indicators, force);
4349 }
4350
4369 UniTensor contract(const UniTensor &inR, const bool &mv_elem_self = false,
4370 const bool &mv_elem_rhs = false) const {
4371 UniTensor out;
4372 out._impl = this->_impl->contract(inR._impl, mv_elem_self, mv_elem_rhs);
4373 return out;
4374 }
4375
4377
4385 std::vector<Bond> getTotalQnums(const bool physical = false) const {
4386 return this->_impl->getTotalQnums(physical);
4387 }
4388
4392 std::vector<std::vector<cytnx_int64>> get_blocks_qnums() const {
4393 return this->_impl->get_blocks_qnums();
4394 }
4396
4401 bool same_data(const UniTensor &rhs) const {
4402 // check same type:
4403 if (this->_impl->uten_type() != rhs._impl->uten_type()) return false;
4404
4405 return this->_impl->same_data(rhs._impl);
4406 }
4407
4425 this->_impl->Add_(rhs._impl);
4426 return *this;
4427 }
4428
4446 this->_impl->Mul_(rhs._impl);
4447 return *this;
4448 }
4449
4467 this->_impl->Sub_(rhs._impl);
4468 return *this;
4469 }
4470
4488 this->_impl->Div_(rhs._impl);
4489 return *this;
4490 }
4491
4503 this->_impl->Add_(rhs);
4504 return *this;
4505 }
4506
4518 this->_impl->Mul_(rhs);
4519 return *this;
4520 }
4521
4533 this->_impl->Sub_(rhs);
4534 return *this;
4535 }
4536
4548 this->_impl->Div_(rhs);
4549 return *this;
4550 }
4551
4569
4580 UniTensor Add(const Scalar &rhs) const;
4581
4599
4610 UniTensor Mul(const Scalar &rhs) const;
4611
4629
4640 UniTensor Div(const Scalar &rhs) const;
4641
4659
4670 UniTensor Sub(const Scalar &rhs) const;
4671
4680 Tensor Norm() const { return this->_impl->Norm(); };
4681
4698 this->Add_(rhs);
4699 return *this;
4700 }
4701
4718 this->Sub_(rhs);
4719 return *this;
4720 }
4721
4738 this->Div_(rhs);
4739 return *this;
4740 }
4741
4758 this->Mul_(rhs);
4759 return *this;
4760 }
4761
4773 this->Add_(rhs);
4774 return *this;
4775 }
4776
4788 this->Sub_(rhs);
4789 return *this;
4790 }
4791
4803 this->Div_(rhs);
4804 return *this;
4805 }
4806
4818 this->Mul_(rhs);
4819 return *this;
4820 }
4821
4829 UniTensor Conj() const {
4830 UniTensor out;
4831 out._impl = this->_impl->Conj();
4832 return out;
4833 }
4834
4843 this->_impl->Conj_();
4844 return *this;
4845 }
4846
4860 UniTensor out;
4861 out._impl = this->_impl->Transpose();
4862 return out;
4863 }
4864
4872 this->_impl->Transpose_();
4873 return *this;
4874 }
4875
4883 UniTensor out;
4884 out._impl = this->_impl->normalize();
4885 return out;
4886 }
4887
4895 this->_impl->normalize_();
4896 return *this;
4897 }
4898
4908 UniTensor Trace(const std::string &a, const std::string &b) const {
4909 UniTensor out;
4910 out._impl = this->_impl->Trace(a, b);
4911 return out;
4912 }
4913
4923 UniTensor Trace(const cytnx_int64 &a = 0, const cytnx_int64 &b = 1) const {
4924 UniTensor out;
4925 out._impl = this->_impl->Trace(a, b);
4926 return out;
4927 }
4928
4938 UniTensor &Trace_(const std::string &a, const std::string &b) {
4939 this->_impl->Trace_(a, b);
4940 if (this->uten_type() == UTenType.Block || this->uten_type() == UTenType.BlockFermionic) {
4941 // handle if no leg left case for BlockUniTensor.
4942 if (this->rank() == 0) {
4943 DenseUniTensor *tmp = new DenseUniTensor();
4944 tmp->_block = this->get_blocks_(true)[0];
4945 this->_impl = boost::intrusive_ptr<UniTensor_base>(tmp);
4946 }
4947 }
4948 return *this;
4949 }
4950
4960 UniTensor &Trace_(const cytnx_int64 &a = 0, const cytnx_int64 &b = 1) {
4961 this->_impl->Trace_(a, b);
4962 if (this->uten_type() == UTenType.Block || this->uten_type() == UTenType.BlockFermionic) {
4963 // handle if no leg left case for BlockUniTensor.
4964 if (this->rank() == 0) {
4965 DenseUniTensor *tmp = new DenseUniTensor();
4966 tmp->_block = this->get_blocks_(true)[0];
4967 this->_impl = boost::intrusive_ptr<UniTensor_base>(tmp);
4968 }
4969 }
4970 return *this;
4971 }
4972
4980 UniTensor out;
4981 out._impl = this->_impl->Dagger();
4982 return out;
4983 }
4984
4992 this->_impl->Dagger_();
4993 return *this;
4994 }
4995
5005 this->_impl->tag();
5006 return *this;
5007 }
5008
5017 UniTensor Pow(const double &p) const;
5018
5027 UniTensor &Pow_(const double &p);
5028
5035 bool elem_exists(const std::vector<cytnx_uint64> &locator) const {
5036 return this->_impl->elem_exists(locator);
5037 }
5038
5044 template <class T>
5045 T get_elem(const std::vector<cytnx_uint64> &locator) const {
5046 return this->at<T>(locator);
5047 }
5048
5054 template <class T2>
5055 void set_elem(const std::vector<cytnx_uint64> &locator, const T2 &rc) {
5056 // cytnx_error_msg(true,"[ERROR] invalid type%s","\n");
5057 this->at(locator) = rc;
5058 }
5059
5066 void Save(const std::string &fname) const;
5067
5074 void Save(const char *fname) const;
5075
5084 static UniTensor Load(const std::string &fname);
5085
5094 static UniTensor Load(const char *fname);
5095
5104 UniTensor &truncate_(const std::string &label, const cytnx_uint64 &dim) {
5105 this->_impl->truncate_(label, dim);
5106 return *this;
5107 }
5108
5118 this->_impl->truncate_(bond_idx, dim);
5119 return *this;
5120 }
5121
5131 UniTensor truncate(const std::string &label, const cytnx_uint64 &dim) const {
5132 UniTensor out = this->clone();
5133 out.truncate_(label, dim);
5134 return out;
5135 }
5136
5147 UniTensor out = this->clone();
5148 out.truncate_(bond_idx, dim);
5149 return out;
5150 }
5151
5159 const std::vector<cytnx_uint64> &get_qindices(const cytnx_uint64 &bidx) const {
5160 return this->_impl->get_qindices(bidx);
5161 }
5169 std::vector<cytnx_uint64> &get_qindices(const cytnx_uint64 &bidx) {
5170 return this->_impl->get_qindices(bidx);
5171 }
5172
5179 const vec2d<cytnx_uint64> &get_itoi() const { return this->_impl->get_itoi(); }
5180 vec2d<cytnx_uint64> &get_itoi() { return this->_impl->get_itoi(); }
5181
5183 void _Load(std::fstream &f);
5184 void _Save(std::fstream &f) const;
5186
5187 UniTensor &convert_from(const UniTensor &rhs, const bool &force = false,
5188 const cytnx_double &tol = 1e-14) {
5189 this->_impl->from_(rhs._impl, force, tol);
5190 return *this;
5191 }
5192
5193 // Generators:
5207 const std::vector<std::string> &in_labels = {},
5208 const unsigned int &dtype = Type.Double, const int &device = Device.cpu,
5209 const std::string &name = "") {
5210 return UniTensor(cytnx::zeros(Nelem, dtype, device), false, -1, in_labels, name);
5211 }
5212
5226 static UniTensor zeros(const std::vector<cytnx_uint64> &shape,
5227 const std::vector<std::string> &in_labels = {},
5228 const unsigned int &dtype = Type.Double, const int &device = Device.cpu,
5229 const std::string &name = "") {
5230 return UniTensor(cytnx::zeros(shape, dtype, device), false, -1, in_labels, name);
5231 }
5232
5244 static UniTensor ones(const cytnx_uint64 &Nelem, const std::vector<std::string> &in_labels = {},
5245 const unsigned int &dtype = Type.Double, const int &device = Device.cpu,
5246 const std::string &name = "") {
5247 return UniTensor(cytnx::ones(Nelem, dtype, device), false, -1, in_labels, name);
5248 }
5249
5263 const std::vector<std::string> &in_labels = {},
5264 const cytnx_bool &is_diag = false,
5265 const unsigned int &dtype = Type.Double,
5266 const int &device = Device.cpu, const std::string &name = "") {
5267 if (is_diag) {
5268 return UniTensor(cytnx::ones(dim, dtype, device), is_diag, -1, in_labels, name);
5269 } else {
5271 }
5272 }
5273
5289 static UniTensor eye(const cytnx_uint64 &dim, const std::vector<std::string> &in_labels = {},
5290 const cytnx_bool &is_diag = false, const unsigned int &dtype = Type.Double,
5291 const int &device = Device.cpu, const std::string &name = "") {
5292 return identity(dim, in_labels, is_diag, dtype, device, name);
5293 }
5294
5307 static UniTensor ones(const std::vector<cytnx_uint64> &shape,
5308 const std::vector<std::string> &in_labels = {},
5309 const unsigned int &dtype = Type.Double, const int &device = Device.cpu,
5310 const std::string &name = "") {
5311 return UniTensor(cytnx::ones(shape, dtype, device), false, -1, in_labels, name);
5312 }
5313
5328 const std::vector<std::string> &in_labels = {},
5329 const std::string &name = "") {
5330 return UniTensor(cytnx::arange(Nelem), false, -1, in_labels, name);
5331 }
5332
5350 const cytnx_double &step = 1,
5351 const std::vector<std::string> &in_labels = {},
5352 const unsigned int &dtype = Type.Double, const int &device = Device.cpu,
5353 const std::string &name = "") {
5354 return UniTensor(cytnx::arange(start, end, step, dtype, device), false, -1, in_labels, name);
5355 }
5356
5377 const cytnx_uint64 &Nelem, const bool &endpoint = true,
5378 const std::vector<std::string> &in_labels = {},
5379 const unsigned int &dtype = Type.Double,
5380 const int &device = Device.cpu, const std::string &name = "") {
5381 return UniTensor(cytnx::linspace(start, end, Nelem, endpoint, dtype, device), false, -1,
5382 in_labels, name);
5383 }
5384
5385 // Random Generators:
5404 static UniTensor normal(const cytnx_uint64 &Nelem, const double &mean, const double &std,
5405 const std::vector<std::string> &in_labels = {},
5406 const unsigned int &seed = cytnx::random::__static_random_device(),
5407 const unsigned int &dtype = Type.Double, const int &device = Device.cpu,
5408 const std::string &name = "");
5409
5428 static UniTensor normal(const std::vector<cytnx_uint64> &shape, const double &mean,
5429 const double &std, const std::vector<std::string> &in_labels = {},
5430 const unsigned int &seed = cytnx::random::__static_random_device(),
5431 const unsigned int &dtype = Type.Double, const int &device = Device.cpu,
5432 const std::string &name = "");
5433
5452 static UniTensor uniform(const cytnx_uint64 &Nelem, const double &low, const double &high,
5453 const std::vector<std::string> &in_labels = {},
5454 const unsigned int &seed = cytnx::random::__static_random_device(),
5455 const unsigned int &dtype = Type.Double,
5456 const int &device = Device.cpu, const std::string &name = "");
5457
5495 static UniTensor uniform(const std::vector<cytnx_uint64> &shape, const double &low,
5496 const double &high, const std::vector<std::string> &in_labels = {},
5497 const unsigned int &seed = cytnx::random::__static_random_device(),
5498 const unsigned int &dtype = Type.Double,
5499 const int &device = Device.cpu, const std::string &name = "");
5500
5501 // Inplace Random Generators:
5514 void normal_(const double &mean, const double &std,
5515 const unsigned int &seed = cytnx::random::__static_random_device());
5516
5529 void uniform_(const double &low = 0, const double &high = 1,
5530 const unsigned int &seed = cytnx::random::__static_random_device());
5531
5532 }; // class UniTensor
5533
5535 std::ostream &operator<<(std::ostream &os, const UniTensor &in);
5537
5550 UniTensor Contract(const UniTensor &inL, const UniTensor &inR, const bool &cacheL = false,
5551 const bool &cacheR = false);
5552
5565 UniTensor Contract(const std::vector<UniTensor> &TNs, const std::string &order,
5566 const bool &optimal);
5567
5573 [[deprecated(
5574 "Please use "
5575 "UniTensor Contract(const std::vector<UniTensor> &TNs, const std::string &order, const bool "
5576 "&optimal) "
5577 "instead.")]] UniTensor
5578 Contracts(const std::vector<UniTensor> &TNs, const std::string &order, const bool &optimal);
5579
5581 void _resolve_CT(std::vector<UniTensor> &TNlist);
5582 template <class... T>
5583 void _resolve_CT(std::vector<UniTensor> &TNlist, const UniTensor &in, const T &...args) {
5584 TNlist.push_back(in);
5585 _resolve_CT(TNlist, args...);
5586 }
5588
5600 template <class... T>
5601 UniTensor Contract(const UniTensor &in, const T &...args, const std::string &order,
5602 const bool &optimal) {
5603 std::vector<UniTensor> TNlist;
5604 _resolve_CT(TNlist, in, args...);
5605 return Contract(TNlist, order, optimal);
5606 }
5607
5613 template <class... T>
5614 [[deprecated(
5615 "Please use "
5616 "UniTensor Contract(const UniTensor &in, const T &...args, const std::string &order, const "
5617 "bool &optimal) "
5618 "instead.")]] UniTensor
5619 Contracts(const UniTensor &in, const T &...args, const std::string &order,
5620 const bool &optimal) {
5621 std::vector<UniTensor> TNlist;
5622 _resolve_CT(TNlist, in, args...);
5623 return Contracts(TNlist, order, optimal);
5624 }
5625
5626} // namespace cytnx
5627
5628#endif // BACKEND_TORCH
5629
5630#endif // CYTNX_UNITENSOR_H_
constexpr Type_class Type
data type
Definition Type.hpp:426
the object contains auxiliary properties for each Tensor rank (bond)
Definition Bond.hpp:178
Bond clone() const
return a copy of the instance Bond
Definition Bond.hpp:490
an tensor (multi-dimensional array)
Definition Tensor.hpp:41
void to_(const int &device)
move the current Tensor to the device.
Definition Tensor.hpp:675
Tensor contiguous_()
Make the Tensor contiguous by coalescing the memory (storage), inplacely.
Definition Tensor.hpp:762
unsigned int dtype() const
the dtype-id of the Tensor
Definition Tensor.hpp:574
Tensor contiguous() const
Make the Tensor contiguous by coalescing the memory (storage).
Definition Tensor.hpp:742
T & at(const std::vector< cytnx_uint64 > &locator)
Get an element at specific location.
Definition Tensor.hpp:924
Tensor clone() const
return a clone of the current Tensor.
Definition Tensor.hpp:626
void set(const std::vector< cytnx::Accessor > &accessors, const Tensor &rhs)
set elements with the input Tensor using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1059
Tensor Norm() const
the Norm member function. Same as linalg::Norm(const Tensor &Tin), where Tin is the current Tensor.
Tensor astype(const int &new_type) const
return a new Tensor that cast to different dtype.
Definition Tensor.hpp:889
const bool & is_contiguous() const
return whether the Tensor is contiguous or not.
Definition Tensor.hpp:681
Tensor & Conj_()
the Conj_ member function. Same as cytnx::linalg::Conj_(Tensor &Tin), where Tin is the current Tensor...
int device() const
the device-id of the Tensor
Definition Tensor.hpp:581
Tensor get(const std::vector< cytnx::Accessor > &accessors) const
get elements using Accessor (C++ API) / slices (python API)
Definition Tensor.hpp:1027
const std::vector< cytnx_uint64 > & shape() const
the shape of the Tensor
Definition Tensor.hpp:601
An Enhanced tensor specifically designed for physical Tensor network simulation.
Definition UniTensor.hpp:2599
void put_block(const Tensor &in_tens, const std::vector< cytnx_int64 > &qidx, const bool &force)
Put the block into the UniTensor with given quantum number.
Definition UniTensor.hpp:4146
UniTensor to(const int &device) const
move the current UniTensor to the assigned device.
Definition UniTensor.hpp:3137
UniTensor & operator*=(const UniTensor &rhs)
The multiplication assignment operator of the UniTensor.
Definition UniTensor.hpp:4757
static UniTensor eye(const cytnx_uint64 &dim, const std::vector< std::string > &in_labels={}, const cytnx_bool &is_diag=false, const unsigned int &dtype=Type.Double, const int &device=Device.cpu, const std::string &name="")
Generate a 2-bond identity UniTensor.
Definition UniTensor.hpp:5289
Tensor & get_block_(const std::initializer_list< cytnx_int64 > &qidx, const bool &force=false)
Definition UniTensor.hpp:4053
std::vector< Tensor > & get_blocks_(const bool &silent=false)
Definition UniTensor.hpp:4126
Tensor & get_block_(const std::vector< cytnx_uint64 > &qidx, const bool &force=false)
Definition UniTensor.hpp:4061
void print_block(const cytnx_int64 &idx, const bool &full_info=true) const
Given a index and print out the corresponding block of the UniTensor.
Definition UniTensor.hpp:3709
UniTensor & operator/=(const UniTensor &rhs)
The division assignment operator of the UniTensor.
Definition UniTensor.hpp:4737
T & item()
Definition UniTensor.hpp:2935
bool is_contiguous() const
To tell whether the UniTensor is contiguous.
Definition UniTensor.hpp:3026
T get_elem(const std::vector< cytnx_uint64 > &locator) const
Definition UniTensor.hpp:5045
UniTensor Div(const UniTensor &rhs) const
The division function of the UniTensor.
Tensor get_block(const std::vector< std::string > &labels, const std::vector< cytnx_int64 > &qidx, const bool &force=false) const
Definition UniTensor.hpp:3926
std::vector< cytnx_uint64 > & get_qindices(const cytnx_uint64 &bidx)
get the q-indices on each leg for the [bidx]-th block
Definition UniTensor.hpp:5169
std::vector< bool > signflip() const
Get the sign information of a fermionic UniTensor.
Definition UniTensor.hpp:3101
UniTensor & set_label(const cytnx_int64 &idx, const char *new_label)
Definition UniTensor.hpp:2838
UniTensor & operator+=(const UniTensor &rhs)
The addition assignment operator of the UniTensor.
Definition UniTensor.hpp:4697
void to_dense_()
Convert the UniTensor to non-diagonal form, inplacely.
Definition UniTensor.hpp:4295
UniTensor reshape(const std::vector< cytnx_int64 > &new_shape, const cytnx_uint64 &rowrank=0)
Reshape the UniTensor.
Definition UniTensor.hpp:4256
std::vector< Tensor > get_blocks() const
Get all the blocks of the UniTensor.
Definition UniTensor.hpp:4110
void combineBond(const std::vector< std::string > &indicators, const bool &force=false)
Combine the sevral bonds of the UniTensor.
Definition UniTensor.hpp:4347
UniTensor permute(const std::initializer_list< char * > &mapper, const cytnx_int64 &rowrank=-1) const
Definition UniTensor.hpp:3544
bool is_tag() const
To tell whether the UniTensor is tagged. That is, all of the Bond in the UniTensor is directional (al...
Definition UniTensor.hpp:3039
std::string uten_type_str() const
Return the UniTensor type (cytnx::UTenType) of the UniTensor in 'string' form.
Definition UniTensor.hpp:3019
void permute_nosignflip_(const std::vector< cytnx_int64 > &mapper, const cytnx_int64 &rowrank=-1)
permute the legs of the UniTensor without fermionic sign flips, inplacely.
Definition UniTensor.hpp:3642
UniTensor permute_nosignflip(const std::vector< std::string > &mapper, const cytnx_int64 &rowrank=-1) const
permute the legs of the UniTensor by labels without sign flips
Definition UniTensor.hpp:3610
UniTensor(const std::vector< Bond > &bonds, const std::vector< std::string > &in_labels={}, const cytnx_int64 &rowrank=-1, const unsigned int &dtype=Type.Double, const int &device=Device.cpu, const bool &is_diag=false, const std::string &name="")
Construct a UniTensor.
Definition UniTensor.hpp:2689
Tensor get_block(const std::vector< cytnx_int64 > &qidx, const bool &force=false) const
Get the block of the UniTensor for the given quantun indices.
Definition UniTensor.hpp:3922
UniTensor & tag()
Set the UniTensor as a tagged UniTensor.
Definition UniTensor.hpp:5004
const Bond & bond_(const cytnx_uint64 &idx) const
Definition UniTensor.hpp:3080
void combineBonds(const std::vector< cytnx_int64 > &indicators, const bool &force=false)
Definition UniTensor.hpp:4333
UniTensor & relabel_(const std::string &old_label, const std::string &new_label)
relabel the legs in the UniTensor by a given label.
Definition UniTensor.hpp:3472
const T & at(const std::vector< std::string > &labels, const std::vector< cytnx_uint64 > &locator) const
Definition UniTensor.hpp:3780
UniTensor & relabels_(const std::initializer_list< char * > &new_labels)
Definition UniTensor.hpp:3267
UniTensor(const Tensor &in_tensor, const bool &is_diag=false, const cytnx_int64 &rowrank=-1, const std::vector< std::string > &in_labels={}, const std::string &name="")
Construct a UniTensor from a cytnx::Tensor.
Definition UniTensor.hpp:2638
Tensor & get_block_(const std::vector< cytnx_int64 > &qidx, const bool &force=false)
Get the shared view of block for the given quantum indices.
Definition UniTensor.hpp:4001
void Init(const Tensor &in_tensor, const bool &is_diag=false, const cytnx_int64 &rowrank=-1, const std::vector< std::string > &in_labels={}, const std::string &name="")
Initialize a UniTensor from a cytnx::Tensor.
Definition UniTensor.hpp:2661
static UniTensor zeros(const std::vector< cytnx_uint64 > &shape, const std::vector< std::string > &in_labels={}, const unsigned int &dtype=Type.Double, const int &device=Device.cpu, const std::string &name="")
Generate a UniTensor with all elements set to zero.
Definition UniTensor.hpp:5226
UniTensor get(const std::vector< Accessor > &accessors) const
Definition UniTensor.hpp:4231
T & at(const std::vector< std::string > &labels, const std::vector< cytnx_uint64 > &locator)
Definition UniTensor.hpp:3802
UniTensor Conj() const
Apply complex conjugate on each entry of the UniTensor.
Definition UniTensor.hpp:4829
UniTensor & set_label(const std::string &old_label, const std::string &new_label)
set a new label for bond to replace one of the current label.
Definition UniTensor.hpp:2853
UniTensor set_rowrank(const cytnx_uint64 &new_rowrank) const
Definition UniTensor.hpp:2928
void Save(const std::string &fname) const
save a UniTensor to file
UniTensor & operator-=(const Scalar &rhs)
The subtraction assignment operator for a given scalar.
Definition UniTensor.hpp:4787
cytnx_uint64 rowrank() const
Return the row rank of the UniTensor.
Definition UniTensor.hpp:2970
UniTensor & relabels_(const std::vector< std::string > &old_labels, const std::vector< std::string > &new_labels)
Definition UniTensor.hpp:3337
UniTensor Sub(const Scalar &rhs) const
The subtraction function for a given scalar.
Tensor Norm() const
Return the norm of the UniTensor.
Definition UniTensor.hpp:4680
UniTensor Mul(const Scalar &rhs) const
The multiplication function for a given scalar.
UniTensor & set_labels(const std::initializer_list< char * > &new_labels)
Definition UniTensor.hpp:2907
UniTensor & relabel_(const std::initializer_list< char * > &new_labels)
Definition UniTensor.hpp:3249
UniTensor & Sub_(const Scalar &rhs)
The subtraction function for a given scalar.
Definition UniTensor.hpp:4532
UniTensor & relabel_(const cytnx_int64 &inx, const std::string &new_label)
rebable the legs in the UniTensor by given index.
Definition UniTensor.hpp:3459
UniTensor & operator/=(const Scalar &rhs)
The division assignment operator for a given scalar.
Definition UniTensor.hpp:4802
const bool & is_braket_form() const
Check whether the UniTensor is in braket form.
Definition UniTensor.hpp:3054
void set_elem(const std::vector< cytnx_uint64 > &locator, const T2 &rc)
Definition UniTensor.hpp:5055
UniTensor Add(const Scalar &rhs) const
The addition function for a given scalar.
static UniTensor linspace(const cytnx_double &start, const cytnx_double &end, const cytnx_uint64 &Nelem, const bool &endpoint=true, const std::vector< std::string > &in_labels={}, const unsigned int &dtype=Type.Double, const int &device=Device.cpu, const std::string &name="")
Generate a one-bond UniTensor with all elements are evenly spaced numbers over a specified interval.
Definition UniTensor.hpp:5376
std::vector< cytnx_uint64 > shape() const
Get the shape of the UniTensor.
Definition UniTensor.hpp:3093
UniTensor to_dense()
Convert the UniTensor to non-diagonal form.
Definition UniTensor.hpp:4285
UniTensor & permute_(const std::vector< cytnx_int64 > &mapper, const cytnx_int64 &rowrank=-1)
permute the lags of the UniTensor, inplacely.
Definition UniTensor.hpp:3567
Scalar::Sproxy at(const std::vector< std::string > &labels, const std::vector< cytnx_uint64 > &locator)
Definition UniTensor.hpp:3861
static UniTensor normal(const std::vector< cytnx_uint64 > &shape, const double &mean, const double &std, const std::vector< std::string > &in_labels={}, const unsigned int &seed=cytnx::random::__static_random_device(), const unsigned int &dtype=Type.Double, const int &device=Device.cpu, const std::string &name="")
Generate a UniTensor with all elements are random numbers sampled from a normal (Gaussian) distributi...
const Tensor & get_block_(const std::vector< cytnx_int64 > &qidx, const bool &force=false) const
Definition UniTensor.hpp:4077
UniTensor & set_labels(const std::vector< std::string > &new_labels)
Set new labels for all the bonds.
Definition UniTensor.hpp:2898
std::string name() const
Return the name of the UniTensor.
Definition UniTensor.hpp:2998
const Scalar::Sproxy at(const std::vector< std::string > &labels, const std::vector< cytnx_uint64 > &locator) const
Definition UniTensor.hpp:3883
UniTensor & Add_(const Scalar &rhs)
The addition function for a given scalar.
Definition UniTensor.hpp:4502
UniTensor & Trace_(const std::string &a, const std::string &b)
Take the partial trace of the UniTensor, inplacely.
Definition UniTensor.hpp:4938
UniTensor & truncate_(const cytnx_int64 &bond_idx, const cytnx_uint64 &dim)
truncate bond dimension of the UniTensor by the given bond index and dimension.
Definition UniTensor.hpp:5117
Scalar::Sproxy at(const std::vector< cytnx_uint64 > &locator)
Get an element at specific location.
Definition UniTensor.hpp:3846
UniTensor Trace(const std::string &a, const std::string &b) const
Take the partial trace of the UniTensor.
Definition UniTensor.hpp:4908
bool same_data(const UniTensor &rhs) const
Check whether the Blocks address are the same.
Definition UniTensor.hpp:4401
UniTensor astype(const unsigned int &dtype) const
Return a new UniTensor that cast to different data type.
Definition UniTensor.hpp:3503
static UniTensor ones(const std::vector< cytnx_uint64 > &shape, const std::vector< std::string > &in_labels={}, const unsigned int &dtype=Type.Double, const int &device=Device.cpu, const std::string &name="")
Generate a UniTensor with all elements set to one.
Definition UniTensor.hpp:5307
UniTensor truncate(const std::string &label, const cytnx_uint64 &dim) const
truncate bond dimension of the UniTensor by the given bond label and dimension.
Definition UniTensor.hpp:5131
UniTensor Transpose() const
Take the transpose of the UniTensor.
Definition UniTensor.hpp:4859
static UniTensor arange(const cytnx_double &start, const cytnx_double &end, const cytnx_double &step=1, const std::vector< std::string > &in_labels={}, const unsigned int &dtype=Type.Double, const int &device=Device.cpu, const std::string &name="")
Generate a UniTensor with all elements are arange from start to end.
Definition UniTensor.hpp:5349
UniTensor & set_label(const char *old_label, const std::string &new_label)
Definition UniTensor.hpp:2861
static UniTensor Load(const char *fname)
load a UniTensor from file
UniTensor & Dagger_()
Take the conjugate transpose to the UniTensor, inplacely.
Definition UniTensor.hpp:4991
std::vector< Bond > & bonds()
Definition UniTensor.hpp:3078
T & at(const std::vector< cytnx_uint64 > &locator)
Get an element at specific location.
Definition UniTensor.hpp:3733
UniTensor & set_rowrank_(const cytnx_uint64 &new_rowrank)
Set the rowrank of the UniTensor.
Definition UniTensor.hpp:2923
void put_block(Tensor &in, const std::vector< std::string > &lbls, const std::vector< cytnx_int64 > &qidx, const bool &force=false)
Put the block into the UniTensor with given quantum indices, will copy the input tensor.
Definition UniTensor.hpp:4154
void permute_nosignflip_(const std::vector< std::string > &mapper, const cytnx_int64 &rowrank=-1)
permute the legs of the UniTensor without fermionic sign flips, inplacely.
Definition UniTensor.hpp:3656
UniTensor & set_label(const cytnx_int64 &idx, const std::string &new_label)
Set a new label for bond at the assigned index.
Definition UniTensor.hpp:2830
UniTensor permute_nosignflip(const std::vector< cytnx_int64 > &mapper, const cytnx_int64 &rowrank=-1) const
permute the legs of the UniTensor without sign flips
Definition UniTensor.hpp:3594
UniTensor contiguous() const
Make the UniTensor contiguous by coalescing the memory (storage).
Definition UniTensor.hpp:3678
Tensor get_block(const cytnx_uint64 &idx=0) const
Get the block of the UniTensor for a given index.
Definition UniTensor.hpp:3911
const T & at(const std::vector< cytnx_uint64 > &locator) const
Get an element at specific location.
Definition UniTensor.hpp:3759
UniTensor & operator*=(const Scalar &rhs)
The multiplication assignment operator for a given scalar.
Definition UniTensor.hpp:4817
UniTensor relabels(const std::vector< std::string > &old_labels, const std::vector< std::string > &new_labels) const
Definition UniTensor.hpp:3300
void contiguous_()
Make the UniTensor contiguous by coalescing the memory (storage), inplacely.
Definition UniTensor.hpp:3688
UniTensor normalize() const
normalize the current UniTensor instance with 2-norm.
Definition UniTensor.hpp:4882
static UniTensor ones(const cytnx_uint64 &Nelem, const std::vector< std::string > &in_labels={}, const unsigned int &dtype=Type.Double, const int &device=Device.cpu, const std::string &name="")
Generate a one-bond UniTensor with all elements set to one.
Definition UniTensor.hpp:5244
UniTensor & relabel_(const std::vector< std::string > &new_labels)
Set new labels for all the bonds.
Definition UniTensor.hpp:3162
std::vector< Symmetry > syms() const
Return the symmetry type of the UniTensor.
Definition UniTensor.hpp:3046
UniTensor relabel(const std::vector< std::string > &new_labels) const
relabel all of the labels in UniTensor.
Definition UniTensor.hpp:3193
UniTensor & Mul_(const UniTensor &rhs)
The multiplcation function of the UniTensor.
Definition UniTensor.hpp:4445
static UniTensor zeros(const cytnx_uint64 &Nelem, const std::vector< std::string > &in_labels={}, const unsigned int &dtype=Type.Double, const int &device=Device.cpu, const std::string &name="")
Generate a one-bond UniTensor with all elements set to zero.
Definition UniTensor.hpp:5206
void put_block_(Tensor &in, const cytnx_uint64 &idx=0)
Put the block into the UniTensor with given index, inplacely.
Definition UniTensor.hpp:4186
void combineBonds(const std::vector< std::string > &indicators, const bool &force=false)
Definition UniTensor.hpp:4320
UniTensor relabel(const std::string &old_label, const std::string &new_label) const
relabel the legs in the UniTensor by a given label.
Definition UniTensor.hpp:3491
UniTensor & normalize_()
normalize the UniTensor, inplacely.
Definition UniTensor.hpp:4894
bool is_diag() const
To tell whether the UniTensor is in diagonal form.
Definition UniTensor.hpp:3032
int device() const
Return the device of the UniTensor.
Definition UniTensor.hpp:2992
UniTensor & Pow_(const double &p)
Power function.
const Tensor & get_block_(const std::vector< cytnx_uint64 > &qidx, const bool &force=false) const
Definition UniTensor.hpp:4094
Tensor get_block_(const std::vector< std::string > &labels, const std::vector< cytnx_int64 > &qidx, const bool &force=false)
Get the shared (data) view of block for the given quantum indices on given labels.
Definition UniTensor.hpp:4021
void put_block_(Tensor &in, const std::vector< std::string > &lbls, const std::vector< cytnx_int64 > &qidx, const bool &force=false)
Put the block into the UniTensor with given quantum indices, inplacely.
Definition UniTensor.hpp:4202
void set(const std::vector< Accessor > &accessors, const Tensor &rhs)
Definition UniTensor.hpp:4236
std::string dtype_str() const
Return the data type of the UniTensor in 'string' form.
Definition UniTensor.hpp:3005
UniTensor & operator+=(const Scalar &rhs)
The addition assignment operator for a given scalar.
Definition UniTensor.hpp:4772
UniTensor & Div_(const UniTensor &rhs)
The division function of the UniTensor.
Definition UniTensor.hpp:4487
UniTensor & Div_(const Scalar &rhs)
The division function for a given scalar.
Definition UniTensor.hpp:4547
Bond & bond_(const std::string &label)
Definition UniTensor.hpp:3084
cytnx_uint64 rank() const
Return the rank of the UniTensor.
Definition UniTensor.hpp:2964
Bond bond(const std::string &label) const
Definition UniTensor.hpp:3087
std::vector< bool > & signflip_()
Get reference to the sign information of a fermionic UniTensor.
Definition UniTensor.hpp:3111
void group_basis_()
Group the same quantum number basis together.
Definition UniTensor.hpp:3719
UniTensor permute_nosignflip(const std::initializer_list< char * > &mapper, const cytnx_int64 &rowrank=-1) const
Definition UniTensor.hpp:3621
void to_(const int &device)
move the current UniTensor to the assigned device (inplace).
Definition UniTensor.hpp:3126
UniTensor & Trace_(const cytnx_int64 &a=0, const cytnx_int64 &b=1)
Take the partial trace of the UniTensor, inplacely.
Definition UniTensor.hpp:4960
UniTensor relabel(const std::vector< std::string > &old_labels, const std::vector< std::string > &new_labels) const
replace part or all labels by given new labels for the bonds.
Definition UniTensor.hpp:3284
UniTensor & set_name(const std::string &in)
Set the name of a UniTensor.
Definition UniTensor.hpp:2815
UniTensor Trace(const cytnx_int64 &a=0, const cytnx_int64 &b=1) const
Take the partial trace of the UniTensor.
Definition UniTensor.hpp:4923
UniTensor & Add_(const UniTensor &rhs)
The addition function of the UniTensor.
Definition UniTensor.hpp:4424
void normal_(const double &mean, const double &std, const unsigned int &seed=cytnx::random::__static_random_device())
Generate a one-bond UniTensor with all elements are random numbers sampled from a normal (Gaussian) d...
UniTensor permute(const std::vector< cytnx_int64 > &mapper, const cytnx_int64 &rowrank=-1) const
permute the legs of the UniTensor
Definition UniTensor.hpp:3521
UniTensor & set_label(const std::string &old_label, const char *new_label)
Definition UniTensor.hpp:2869
UniTensor permute(const std::vector< std::string > &mapper, const cytnx_int64 &rowrank=-1) const
permute the legs of the UniTensor by labels
Definition UniTensor.hpp:3534
unsigned int dtype() const
Return the data type of the UniTensor.
Definition UniTensor.hpp:2977
UniTensor Pow(const double &p) const
Power function.
UniTensor & Mul_(const Scalar &rhs)
The multiplication function for a given scalar.
Definition UniTensor.hpp:4517
UniTensor & operator-=(const UniTensor &rhs)
The subtraction assignment operator of the UniTensor.
Definition UniTensor.hpp:4717
Tensor get_block_(const std::vector< std::string > &labels, const std::vector< cytnx_uint64 > &qidx, const bool &force=false)
Definition UniTensor.hpp:4066
void combineBonds(const std::vector< cytnx_int64 > &indicators, const bool &force, const bool &by_label)
Definition UniTensor.hpp:4306
UniTensor & set_label(const char *old_label, const char *new_label)
Definition UniTensor.hpp:2877
static UniTensor arange(const cytnx_int64 &Nelem, const std::vector< std::string > &in_labels={}, const std::string &name="")
Generate a one-bond UniTensor with all elements are arange from 0 to Nelem-1.
Definition UniTensor.hpp:5327
bool is_blockform() const
Check whether the UniTensor is in block form.
Definition UniTensor.hpp:3118
UniTensor group_basis() const
Definition UniTensor.hpp:3721
UniTensor Dagger() const
Take the conjugate transpose to the UniTensor.
Definition UniTensor.hpp:4979
static UniTensor uniform(const std::vector< cytnx_uint64 > &shape, const double &low, const double &high, const std::vector< std::string > &in_labels={}, const unsigned int &seed=cytnx::random::__static_random_device(), const unsigned int &dtype=Type.Double, const int &device=Device.cpu, const std::string &name="")
Generate a UniTensor with all elements are random numbers sampled from a uniform distribution.
void set(const std::vector< Accessor > &accessors, const UniTensor &rhs)
Definition UniTensor.hpp:4239
const std::vector< Tensor > & get_blocks_(const bool &silent=false) const
Get all the blocks of the UniTensor, inplacely.
Definition UniTensor.hpp:4118
UniTensor & relabels_(const std::initializer_list< char * > &old_labels, const std::initializer_list< char * > &new_labels)
Definition UniTensor.hpp:3416
const Tensor & get_block_(const std::initializer_list< cytnx_int64 > &qidx, const bool &force=false) const
Definition UniTensor.hpp:4085
bool elem_exists(const std::vector< cytnx_uint64 > &locator) const
Geiven the locator, check if the element exists.
Definition UniTensor.hpp:5035
UniTensor & Sub_(const UniTensor &rhs)
The subtraction function of the UniTensor.
Definition UniTensor.hpp:4466
UniTensor relabels(const std::initializer_list< char * > &new_labels) const
Definition UniTensor.hpp:3236
UniTensor contract(const UniTensor &inR, const bool &mv_elem_self=false, const bool &mv_elem_rhs=false) const
Contract the UniTensor with common labels.
Definition UniTensor.hpp:4369
void put_block(const Tensor &in, const cytnx_uint64 &idx=0)
Put the block into the UniTensor with given index.
Definition UniTensor.hpp:4135
const Tensor & get_block_(const cytnx_uint64 &idx=0) const
Get the shared view of block for the given index.
Definition UniTensor.hpp:3983
UniTensor & relabel_(const std::vector< std::string > &old_labels, const std::vector< std::string > &new_labels)
relabel part or all of the labels in UniTensor by given new labels
Definition UniTensor.hpp:3322
static UniTensor Load(const std::string &fname)
load a UniTensor from file
UniTensor & permute_(const std::vector< std::string > &mapper, const cytnx_int64 &rowrank=-1)
permute the legs of the UniTensor, inplacely.
Definition UniTensor.hpp:3578
const Bond & bond_(const std::string &label) const
Definition UniTensor.hpp:3083
static UniTensor identity(const cytnx_uint64 &dim, const std::vector< std::string > &in_labels={}, const cytnx_bool &is_diag=false, const unsigned int &dtype=Type.Double, const int &device=Device.cpu, const std::string &name="")
Generate a identity UniTensor.
Definition UniTensor.hpp:5262
Bond & bond_(const cytnx_uint64 &idx)
Definition UniTensor.hpp:3081
static UniTensor normal(const cytnx_uint64 &Nelem, const double &mean, const double &std, const std::vector< std::string > &in_labels={}, const unsigned int &seed=cytnx::random::__static_random_device(), const unsigned int &dtype=Type.Double, const int &device=Device.cpu, const std::string &name="")
Generate a one-bond UniTensor with all elements are random numbers sampled from a normal (Gaussian) d...
UniTensor relabel(const cytnx_int64 &inx, const std::string &new_label) const
rebabel the legs in the UniTensor by given index.
Definition UniTensor.hpp:3446
UniTensor Mul(const UniTensor &rhs) const
The multiplication function of the UniTensor.
void uniform_(const double &low=0, const double &high=1, const unsigned int &seed=cytnx::random::__static_random_device())
Generate a UniTensor with all elements are random numbers sampled from a uniform distribution,...
UniTensor & reshape_(const std::vector< cytnx_int64 > &new_shape, const cytnx_uint64 &rowrank=0)
Reshape the UniTensor, inplacely.
Definition UniTensor.hpp:4268
void print_diagram(const bool &bond_info=false) const
Plot the diagram of the UniTensor.
Definition UniTensor.hpp:3694
UniTensor relabels(const std::initializer_list< char * > &old_labels, const std::initializer_list< char * > &new_labels) const
Definition UniTensor.hpp:3372
const std::vector< cytnx_uint64 > & get_qindices(const cytnx_uint64 &bidx) const
get the q-indices on each leg for the [bidx]-th block
Definition UniTensor.hpp:5159
UniTensor Div(const Scalar &rhs) const
The division function for a given scalar.
const std::vector< std::string > & labels() const
Return the labels of the UniTensor.
Definition UniTensor.hpp:3060
UniTensor & relabel_(const std::initializer_list< char * > &old_labels, const std::initializer_list< char * > &new_labels)
Definition UniTensor.hpp:3391
cytnx_int64 get_index(std::string label) const
Get the index of an desired label string.
Definition UniTensor.hpp:3067
const std::vector< Bond > & bonds() const
Get the bonds of the UniTensor.
Definition UniTensor.hpp:3073
Tensor get_block(const std::vector< std::string > &labels, const std::vector< cytnx_uint64 > &qidx, const bool &force=false) const
Definition UniTensor.hpp:3972
UniTensor truncate(const cytnx_int64 &bond_idx, const cytnx_uint64 &dim) const
truncate bond dimension of the UniTensor by the given bond index and dimension.
Definition UniTensor.hpp:5146
UniTensor & Transpose_()
Take the transpose of the UniTensor, inplacely.
Definition UniTensor.hpp:4871
static UniTensor uniform(const cytnx_uint64 &Nelem, const double &low, const double &high, const std::vector< std::string > &in_labels={}, const unsigned int &seed=cytnx::random::__static_random_device(), const unsigned int &dtype=Type.Double, const int &device=Device.cpu, const std::string &name="")
Generate a one-bond UniTensor with all elements are random numbers sampled from a uniform distributio...
UniTensor clone() const
Clone (deep copy) the UniTensor.
Definition UniTensor.hpp:3147
vec2d< cytnx_uint64 > & get_itoi()
Definition UniTensor.hpp:5180
void print_blocks(const bool &full_info=true) const
Print all of the blocks in the UniTensor.
Definition UniTensor.hpp:3702
Scalar::Sproxy item() const
Definition UniTensor.hpp:2945
UniTensor & convert_from(const UniTensor &rhs, const bool &force=false, const cytnx_double &tol=1e-14)
Definition UniTensor.hpp:5187
const vec2d< cytnx_uint64 > & get_itoi() const
get the q-indices on each leg for all the blocks
Definition UniTensor.hpp:5179
std::string device_str() const
Return the device of the UniTensor in 'string' form.
Definition UniTensor.hpp:3012
UniTensor relabels(const std::vector< std::string > &new_labels) const
Definition UniTensor.hpp:3207
void put_block_(Tensor &in, const std::vector< cytnx_int64 > &qidx, const bool &force)
Put the block into the UniTensor with given quantum indices, inplacely.
Definition UniTensor.hpp:4194
UniTensor relabel(const std::initializer_list< char * > &old_labels, const std::initializer_list< char * > &new_labels) const
Definition UniTensor.hpp:3347
UniTensor Sub(const UniTensor &rhs) const
The subtraction function of the UniTensor.
void Init(const std::vector< Bond > &bonds, const std::vector< std::string > &in_labels={}, const cytnx_int64 &rowrank=-1, const unsigned int &dtype=Type.Double, const int &device=Device.cpu, const bool &is_diag=false, const std::string &name="")
Initialize the UniTensor with the given arguments.
Definition UniTensor.hpp:2740
UniTensor & Conj_()
Apply complex conjugate on each entry of the UniTensor.
Definition UniTensor.hpp:4842
UniTensor relabel(const std::initializer_list< char * > &new_labels) const
relables all of the labels in UniTensor.
Definition UniTensor.hpp:3217
cytnx_uint64 Nblocks() const
Return the number of blocks in the UniTensor.
Definition UniTensor.hpp:2958
Tensor get_block(const std::vector< cytnx_uint64 > &qnum, const bool &force=false) const
Definition UniTensor.hpp:3967
void Save(const char *fname) const
save a UniTensor to file
Bond bond(const cytnx_uint64 &idx) const
Definition UniTensor.hpp:3086
UniTensor Add(const UniTensor &rhs) const
The addition function of the UniTensor.
UniTensor & truncate_(const std::string &label, const cytnx_uint64 &dim)
truncate bond dimension of the UniTensor by the given bond label and dimension.
Definition UniTensor.hpp:5104
Tensor get_block(const std::initializer_list< cytnx_int64 > &qnum, const bool &force=false) const
Definition UniTensor.hpp:3957
UniTensor & relabels_(const std::vector< std::string > &new_labels)
Definition UniTensor.hpp:3177
const Scalar::Sproxy at(const std::vector< cytnx_uint64 > &locator) const
Get an element at specific location.
Definition UniTensor.hpp:3827
int uten_type() const
Return the UniTensor type (cytnx::UTenType) of the UniTensor.
Definition UniTensor.hpp:2985
Tensor & get_block_(const cytnx_uint64 &idx=0)
Definition UniTensor.hpp:3991
#define cytnx_warning_msg(is_true, format,...)
Definition cytnx_error.hpp:52
#define cytnx_error_msg(is_true, format,...)
Definition cytnx_error.hpp:18
cytnx::UniTensor Conj(const cytnx::UniTensor &UT)
Elementwise conjugate of the UniTensor.
cytnx::UniTensor Trace(const cytnx::UniTensor &Tin, const cytnx_int64 &a=0, const cytnx_int64 &b=1)
void Conj_(cytnx::UniTensor &UT)
Inplace elementwise conjugate of the UniTensor.
Tensor Norm(const Tensor &Tl)
Calculate the norm of a tensor.
std::random_device __static_random_device
Definition UniTensor.hpp:29
Definition Accessor.hpp:12
Device_class Device
data on which devices.
Tensor linspace(const cytnx_double &start, const cytnx_double &end, const cytnx_uint64 &Nelem, const bool &endpoint=true, const unsigned int &dtype=Type.Double, const int &device=Device.cpu)
UniTensorType_class UTenType
UniTensor type.
@ Void
Definition Symmetry.hpp:32
Tensor arange(const cytnx_int64 &Nelem)
create an rank-1 Tensor with incremental unsigned integer elements start with [0,Nelem)
UniTensor Contract(const UniTensor &inL, const UniTensor &inR, const bool &cacheL=false, const bool &cacheR=false)
Contract two UniTensor by tracing the ranks with common labels.
Tensor ones(const cytnx_uint64 &Nelem, const unsigned int &dtype=Type.Double, const int &device=Device.cpu)
create an rank-1 Tensor with all the elements are initialized with one.
UniTensor Contracts(const std::vector< UniTensor > &TNs, const std::string &order, const bool &optimal)
Tensor zeros(const cytnx_uint64 &Nelem, const unsigned int &dtype=Type.Double, const int &device=Device.cpu)
create an rank-1 Tensor with all the elements are initialized with zero.
Tensor identity(const cytnx_uint64 &Dim, const unsigned int &dtype=Type.Double, const int &device=Device.cpu)
create an square rank-2 Tensor with all diagonal to be one.