From 9744aa6dba026ae0a257b9f2fec108685ac0e09e Mon Sep 17 00:00:00 2001 From: Manuel Schneider Date: Fri, 5 Dec 2025 16:29:39 +0800 Subject: [PATCH 01/10] replaced .relabels(_) by .relabel(_) --- Readme.md | 4 +- benchmarks/linalg/Arnoldi_bm.cpp | 8 +- developer_tools/dev_test.py | 2 +- ...xGuide_PythonCodeExamples_2023_06_30.ipynb | 80 +++++++++---------- .../guide_contraction_contract_Contract.py | 4 +- .../guide_contraction_contract_Contracts.py | 6 +- .../guide_contraction_contract_relabels.py | 4 +- .../guide_contraction_network_label_ord-1.py | 4 +- .../doc_codes/guide_uniten_labels_relabel_.py | 2 +- docs/source/example/DMRG.rst | 36 ++++----- docs/source/example/iDMRG.rst | 4 +- docs/source/example/iTEBD.rst | 48 +++++------ docs/source/guide/contraction/contract.rst | 4 +- docs/source/guide/uniten/labels.rst | 4 +- dox.md | 4 +- example/TDVP/tdvp1_dense.py | 32 ++++---- pybind/unitensor_py.cpp | 8 +- src/RegularGncon.cpp | 2 +- src/RegularNetwork.cpp | 4 +- src/linalg/Lanczos_Exp.cpp | 36 ++++----- src/tn_algo/DMRG.cpp | 8 +- src/tn_algo/RegularMPS.cpp | 6 +- tests/BlockUniTensor_test.cpp | 28 +++---- tests/DenseUniTensor_test.cpp | 32 ++++---- tests/OLDtest.cpp | 4 +- tests/gpu/BlockUniTensor_test.cpp | 56 ++++++------- tests/gpu/DenseUniTensor_test.cpp | 28 +++---- tests/gpu/OLDtest.cpp | 4 +- tests/gpu/linalg_test/Arnoldi_Ut_test.cpp | 10 +-- tests/linalg_test/Arnoldi_Ut_test.cpp | 8 +- tests/linalg_test/Lanczos_Exp_test.cpp | 8 +- 31 files changed, 244 insertions(+), 244 deletions(-) diff --git a/Readme.md b/Readme.md index 07aeee09c..e478682d4 100644 --- a/Readme.md +++ b/Readme.md @@ -153,8 +153,8 @@ Tensor A({3,4,5},Type.Double); UniTensor tA = UniTensor(A); // convert directly. UniTensor tB = UniTensor({Bond(3),Bond(4),Bond(5)},{}); // init from scratch. // Relabel the tensor and then contract. -tA.relabels_({"common_1", "common_2", "out_a"}); -tB.relabels_({"common_1", "common_2", "out_b"}); +tA.relabel_({"common_1", "common_2", "out_a"}); +tB.relabel_({"common_1", "common_2", "out_b"}); UniTensor out = cytnx::Contract(tA,tB); tA.print_diagram(); tB.print_diagram(); diff --git a/benchmarks/linalg/Arnoldi_bm.cpp b/benchmarks/linalg/Arnoldi_bm.cpp index 90df0e05a..9359ef564 100644 --- a/benchmarks/linalg/Arnoldi_bm.cpp +++ b/benchmarks/linalg/Arnoldi_bm.cpp @@ -11,7 +11,7 @@ namespace BMTest_Arnoldi { const unsigned int& dtype = Type.Double, const int& device = Device.cpu); UniTensor matvec(const UniTensor& l) override { auto tmp = Contracts({A, l, B}, "", true); - tmp.relabels_(l.labels()).set_rowrank(l.rowrank()); + tmp.relabel_(l.labels()).set_rowrank(l.rowrank()); return tmp; } }; @@ -22,15 +22,15 @@ namespace BMTest_Arnoldi { std::vector bonds = {Bond(D), Bond(d), Bond(D)}; A = UniTensor(bonds, {}, -1, in_dtype, in_device) .set_name("A") - .relabels_({"al", "phys", "ar"}) + .relabel_({"al", "phys", "ar"}) .set_rowrank(2); B = UniTensor(bonds, {}, -1, in_dtype, in_device) .set_name("B") - .relabels_({"bl", "phys", "br"}) + .relabel_({"bl", "phys", "br"}) .set_rowrank(2); T_init = UniTensor({Bond(D), Bond(D)}, {}, -1, in_dtype, in_device) .set_name("l") - .relabels_({"al", "bl"}) + .relabel_({"al", "bl"}) .set_rowrank(1); if (Type.is_float(this->dtype())) { double low = -1.0, high = 1.0; diff --git a/developer_tools/dev_test.py b/developer_tools/dev_test.py index b1c32201a..c89438c44 100644 --- a/developer_tools/dev_test.py +++ b/developer_tools/dev_test.py @@ -7,7 +7,7 @@ # uT = cytnx.UniTensor.arange(2*3*4, name="tensor uT") # uT.reshape_(2,3,4); -# uT.relabels_(["a","b","c"]) +# uT.relabel_(["a","b","c"]) # T = cytnx.random.uniform([4,4], low=-1., high=1.) # print(T) diff --git a/docs/code/CytnxGuide_PythonCodeExamples_2023_06_30.ipynb b/docs/code/CytnxGuide_PythonCodeExamples_2023_06_30.ipynb index 92976fc2b..9c8137c95 100644 --- a/docs/code/CytnxGuide_PythonCodeExamples_2023_06_30.ipynb +++ b/docs/code/CytnxGuide_PythonCodeExamples_2023_06_30.ipynb @@ -1071,7 +1071,7 @@ "metadata": {}, "outputs": [], "source": [ - "uT=cytnx.UniTensor(cytnx.ones([2,3,4]), name=\"untagged tensor\").relabels_([\"a\",\"b\",\"c\"])\n", + "uT=cytnx.UniTensor(cytnx.ones([2,3,4]), name=\"untagged tensor\").relabel_([\"a\",\"b\",\"c\"])\n", "\n", "bond_d = cytnx.Bond(cytnx.BD_IN, [cytnx.Qs(1)>>1, cytnx.Qs(-1)>>1],[cytnx.Symmetry.U1()])\n", "\n", @@ -1085,9 +1085,9 @@ "\n", "bond_h = cytnx.Bond(2,cytnx.BD_IN)\n", "\n", - "Tsymm = cytnx.UniTensor([bond_d, bond_e, bond_f], name=\"symm. tensor\").relabels_([\"d\",\"e\",\"f\"])\n", + "Tsymm = cytnx.UniTensor([bond_d, bond_e, bond_f], name=\"symm. tensor\").relabel_([\"d\",\"e\",\"f\"])\n", "\n", - "Tdiag= cytnx.UniTensor([bond_g, bond_h], is_diag=True, name=\"diag tensor\").relabels_([\"g\",\"h\"])\n" + "Tdiag= cytnx.UniTensor([bond_g, bond_h], is_diag=True, name=\"diag tensor\").relabel_([\"g\",\"h\"])\n" ] }, { @@ -1238,7 +1238,7 @@ "uT.print_diagram()\n", "\n", "\n", - "uT.relabels_([\"a\",\"b\",\"c\"])\n", + "uT.relabel_([\"a\",\"b\",\"c\"])\n", "\n", "uT.print_diagram()" ] @@ -1443,7 +1443,7 @@ "\n", "bond_f = cytnx.Bond(cytnx.BD_OUT, [cytnx.Qs(2)>>1, cytnx.Qs(0)>>2, cytnx.Qs(-2)>>1],[cytnx.Symmetry.U1()])\n", "\n", - "Tsymm = cytnx.UniTensor([bond_d, bond_e, bond_f], name=\"symm. tensor\").relabels_([\"d\",\"e\",\"f\"])\n", + "Tsymm = cytnx.UniTensor([bond_d, bond_e, bond_f], name=\"symm. tensor\").relabel_([\"d\",\"e\",\"f\"])\n", "\n", "Tsymm.print_diagram()" ] @@ -1495,7 +1495,7 @@ "\n", " [cytnx.Qs(2)>>1, cytnx.Qs(0)>>2, cytnx.Qs(-2)>>1],[cytnx.Symmetry.U1()])\n", "\n", - "Tsymm = cytnx.UniTensor([bond_d, bond_e, bond_f], name=\"symm. tensor\").relabels_([\"d\",\"e\",\"f\"])" + "Tsymm = cytnx.UniTensor([bond_d, bond_e, bond_f], name=\"symm. tensor\").relabel_([\"d\",\"e\",\"f\"])" ] }, { @@ -1623,7 +1623,7 @@ "\n", " [cytnx.Qs(2)>>1, cytnx.Qs(0)>>2, cytnx.Qs(-2)>>1],[cytnx.Symmetry.U1()])\n", "\n", - "Tsymm = cytnx.UniTensor([bond_d, bond_e, bond_f], name=\"symm. tensor\").relabels_([\"d\",\"e\",\"f\"])" + "Tsymm = cytnx.UniTensor([bond_d, bond_e, bond_f], name=\"symm. tensor\").relabel_([\"d\",\"e\",\"f\"])" ] }, { @@ -1685,7 +1685,7 @@ "\n", " [cytnx.Qs(2)>>1, cytnx.Qs(0)>>2, cytnx.Qs(-2)>>1],[cytnx.Symmetry.U1()])\n", "\n", - "Tsymm = cytnx.UniTensor([bond_d, bond_e, bond_f], name=\"symm. tensor\").relabels_([\"d\",\"e\",\"f\"])\n", + "Tsymm = cytnx.UniTensor([bond_d, bond_e, bond_f], name=\"symm. tensor\").relabel_([\"d\",\"e\",\"f\"])\n", "\n", "Tsymm.print_diagram()\n", "\n", @@ -1892,11 +1892,11 @@ "source": [ "A = cytnx.UniTensor(cytnx.ones([2,8,8]));\n", "\n", - "A.relabels_([\"phy\", \"left\", \"right\"])\n", + "A.relabel_([\"phy\", \"left\", \"right\"])\n", "\n", "B = cytnx.UniTensor(cytnx.ones([2,8,8]));\n", "\n", - "B.relabels_([\"phy\", \"left\", \"right\"])\n", + "B.relabel_([\"phy\", \"left\", \"right\"])\n", "\n" ] }, @@ -1951,12 +1951,12 @@ "\n", "A = cytnx.UniTensor(cytnx.ones([2,3,4]), rowrank = 1)\n", "\n", - "A.relabels_([\"i\",\"j\",\"l\"])\n", + "A.relabel_([\"i\",\"j\",\"l\"])\n", "\n", "\n", "B = cytnx.UniTensor(cytnx.ones([3,2,4,5]), rowrank = 2)\n", "\n", - "B.relabels_([\"j\",\"k\",\"l\",\"m\"])\n", + "B.relabel_([\"j\",\"k\",\"l\",\"m\"])\n", "\n", "\n", "C = cytnx.Contract(A, B)\n", @@ -1978,16 +1978,16 @@ "source": [ "A = cytnx.UniTensor(cytnx.ones([2,3,4]), rowrank = 1)\n", "\n", - "A.relabels_([\"i\",\"j\",\"l\"])\n", + "A.relabel_([\"i\",\"j\",\"l\"])\n", "\n", - "Are = A.relabels([\"i\",\"j\",\"lA\"])\n", + "Are = A.relabel([\"i\",\"j\",\"lA\"])\n", "\n", "\n", "B = cytnx.UniTensor(cytnx.ones([3,2,4,5]), rowrank = 2)\n", "\n", - "B.relabels_([\"j\",\"k\",\"l\",\"m\"])\n", + "B.relabel_([\"j\",\"k\",\"l\",\"m\"])\n", "\n", - "Bre = B.relabels([\"j\",\"k\",\"lB\",\"m\"])\n", + "Bre = B.relabel([\"j\",\"k\",\"lB\",\"m\"])\n", "\n", "\n", "C = cytnx.Contract(Are, Bre)\n", @@ -2018,11 +2018,11 @@ "\n", "# Assign labels\n", "\n", - "A1.relabels_([\"phy1\",\"v1\",\"v2\"])\n", + "A1.relabel_([\"phy1\",\"v1\",\"v2\"])\n", "\n", - "M.relabels_([\"phy1\",\"phy2\",\"v3\",\"v4\"])\n", + "M.relabel_([\"phy1\",\"phy2\",\"v3\",\"v4\"])\n", "\n", - "A2.relabels_([\"phy2\",\"v5\",\"v6\"])\n", + "A2.relabel_([\"phy2\",\"v5\",\"v6\"])\n", "\n", "\n", "# Use Contracts\n", @@ -2587,13 +2587,13 @@ "metadata": {}, "outputs": [], "source": [ - "A.relabels_([\"a\",\"0\",\"b\"])\n", + "A.relabel_([\"a\",\"0\",\"b\"])\n", "\n", - "B.relabels_([\"c\",\"1\",\"d\"])\n", + "B.relabel_([\"c\",\"1\",\"d\"])\n", "\n", - "la.relabels_([\"b\",\"c\"])\n", + "la.relabel_([\"b\",\"c\"])\n", "\n", - "lb.relabels_([\"d\",\"e\"])\n", + "lb.relabel_([\"d\",\"e\"])\n", "\n", "\n", "## contract all\n", @@ -2616,7 +2616,7 @@ "\n", "XH = cytnx.Contract(X,H)\n", "\n", - "XH.relabels_([\"d\",\"e\",\"0\",\"1\"])\n", + "XH.relabel_([\"d\",\"e\",\"0\",\"1\"])\n", "\n", "XHX = cytnx.Contract(Xt,XH).item() ## rank-0\n", "\n", @@ -2665,7 +2665,7 @@ "lb_inv = 1./lb\n", "\n", "\n", - "lb_inv.relabels_([\"e\",\"d\"])\n", + "lb_inv.relabel_([\"e\",\"d\"])\n", "\n", "A = cytnx.Contract(lb_inv,A)\n", "\n", @@ -2689,13 +2689,13 @@ "for i in range(10000):\n", "\n", "\n", - " A.relabels_(['a','0','b'])\n", + " A.relabel_(['a','0','b'])\n", "\n", - " B.relabels_(['c','1','d'])\n", + " B.relabel_(['c','1','d'])\n", "\n", - " la.relabels_(['b','c'])\n", + " la.relabel_(['b','c'])\n", "\n", - " lb.relabels_(['d','e'])\n", + " lb.relabel_(['d','e'])\n", "\n", "\n", " ## contract all\n", @@ -2732,7 +2732,7 @@ "\n", " XH = cytnx.Contract(X,H)\n", "\n", - " XH.relabels_(['d','e','0','1'])\n", + " XH.relabel_(['d','e','0','1'])\n", "\n", "\n", " XHX = cytnx.Contract(Xt,XH).item() ## rank-0\n", @@ -2799,7 +2799,7 @@ "\n", " # lb_inv.print_diagram();\n", "\n", - " lb_inv.relabels_(['e','d'])\n", + " lb_inv.relabel_(['e','d'])\n", "\n", "\n", " A = cytnx.Contract(lb_inv,A)\n", @@ -2903,7 +2903,7 @@ "\n", " A[k] = cytnx.UniTensor(cytnx.random.normal([dim1, dim2, dim3],0.,1.),2)\n", "\n", - " A[k].relabels_([2*k,2*k+1,2*k+2])\n", + " A[k].relabel_([2*k,2*k+1,2*k+2])\n", "\n" ] }, @@ -3011,7 +3011,7 @@ "\n", " psi = cytnx.UniTensor(psi_T,2);\n", "\n", - " psi.relabels_(lbl);\n", + " psi.relabel_(lbl);\n", "\n", " Ekeep.append(Entemp);\n", "\n", @@ -3029,7 +3029,7 @@ "\n", " s = s/s.get_block_().Norm().item()\n", "\n", - " s.relabels_(slabel)\n", + " s.relabel_(slabel)\n", "\n", "\n", " A[p] = cytnx.Contract(A[p],s) ## absorb s into next neighbor\n", @@ -3162,7 +3162,7 @@ "\n", "s = s/s.get_block_().Norm().item()\n", "\n", - "s.relabels_(slabel)\n", + "s.relabel_(slabel)\n", "\n", "\n", "A[p] = cytnx.Contract(A[p],s) ## absorb s into next neighbor" @@ -3247,7 +3247,7 @@ "\n", " psi = cytnx.UniTensor(psi_T,2);\n", "\n", - " psi.relabels_(lbl);\n", + " psi.relabel_(lbl);\n", "\n", " Ekeep.append(Entemp);\n", "\n", @@ -3266,7 +3266,7 @@ "\n", " s = s/s.get_block_().Norm().item()\n", "\n", - " s.relabels_(slabel)\n", + " s.relabel_(slabel)\n", "\n", "\n", "\n", @@ -3313,7 +3313,7 @@ "\n", " psi_T.reshape_(dim_l,d,d,dim_r)## convert psi back to 4-leg form\n", "\n", - " psi = cytnx.UniTensor(psi_T,2); psi.relabels_(lbl);\n", + " psi = cytnx.UniTensor(psi_T,2); psi.relabel_(lbl);\n", "\n", " Ekeep.append(Entemp);\n", "\n", @@ -3330,7 +3330,7 @@ "\n", " s = s/s.get_block_().Norm().item()\n", "\n", - " s.relabels_(slabel)\n", + " s.relabel_(slabel)\n", "\n", "\n", " A[p+1] = cytnx.Contract(s,A[p+1]) ## absorb s into next neighbor.\n", @@ -3812,7 +3812,7 @@ "\n", " [cytnx.Qs(2)>>1, cytnx.Qs(0)>>2, cytnx.Qs(-2)>>1],[cytnx.Symmetry.U1()])\n", "\n", - "Tsymm = cytnx.UniTensor([bond_d, bond_e, bond_f], name=\"symm. tensor\").relabels_([\"d\",\"e\",\"f\"])\n", + "Tsymm = cytnx.UniTensor([bond_d, bond_e, bond_f], name=\"symm. tensor\").relabel_([\"d\",\"e\",\"f\"])\n", "\n", "\n", "for block in Tsymm.get_blocks_():\n", diff --git a/docs/code/python/doc_codes/guide_contraction_contract_Contract.py b/docs/code/python/doc_codes/guide_contraction_contract_Contract.py index 4056ab462..f7c398550 100644 --- a/docs/code/python/doc_codes/guide_contraction_contract_Contract.py +++ b/docs/code/python/doc_codes/guide_contraction_contract_Contract.py @@ -1,12 +1,12 @@ A = cytnx.UniTensor(cytnx.ones([2,3,4]), rowrank=1, labels=["i","j","l"]) -Are = A.relabels(["i","j","lA"]) +Are = A.relabel(["i","j","lA"]) B = cytnx.UniTensor(cytnx.ones([3,2,4,5]), rowrank=2, labels=["j","k","l","m"]) -Bre = B.relabels(["j","k","lB","m"]) +Bre = B.relabel(["j","k","lB","m"]) C = cytnx.Contract(Are, Bre) diff --git a/docs/code/python/doc_codes/guide_contraction_contract_Contracts.py b/docs/code/python/doc_codes/guide_contraction_contract_Contracts.py index e7f7fc7a3..5cd6a37cf 100644 --- a/docs/code/python/doc_codes/guide_contraction_contract_Contracts.py +++ b/docs/code/python/doc_codes/guide_contraction_contract_Contracts.py @@ -11,9 +11,9 @@ name = "M") # Assign labels -A1.relabels_(["phy1","v1","v2"]) -M.relabels_(["phy1","phy2","v3","v4"]) -A2.relabels_(["phy2","v5","v6"]) +A1.relabel_(["phy1","v1","v2"]) +M.relabel_(["phy1","phy2","v3","v4"]) +A2.relabel_(["phy2","v5","v6"]) # Use Contracts Res = cytnx.Contracts(TNs = [A1,M,A2], diff --git a/docs/code/python/doc_codes/guide_contraction_contract_relabels.py b/docs/code/python/doc_codes/guide_contraction_contract_relabels.py index 4056ab462..f7c398550 100644 --- a/docs/code/python/doc_codes/guide_contraction_contract_relabels.py +++ b/docs/code/python/doc_codes/guide_contraction_contract_relabels.py @@ -1,12 +1,12 @@ A = cytnx.UniTensor(cytnx.ones([2,3,4]), rowrank=1, labels=["i","j","l"]) -Are = A.relabels(["i","j","lA"]) +Are = A.relabel(["i","j","lA"]) B = cytnx.UniTensor(cytnx.ones([3,2,4,5]), rowrank=2, labels=["j","k","l","m"]) -Bre = B.relabels(["j","k","lB","m"]) +Bre = B.relabel(["j","k","lB","m"]) C = cytnx.Contract(Are, Bre) diff --git a/docs/code/python/doc_codes/guide_contraction_network_label_ord-1.py b/docs/code/python/doc_codes/guide_contraction_network_label_ord-1.py index 7c5e43845..94f689ca5 100644 --- a/docs/code/python/doc_codes/guide_contraction_network_label_ord-1.py +++ b/docs/code/python/doc_codes/guide_contraction_network_label_ord-1.py @@ -3,6 +3,6 @@ [2,8,8], mean=0., std=1., dtype=cytnx.Type.ComplexDouble)); -A1.relabels_(["phy","v1","v2"]); +A1.relabel_(["phy","v1","v2"]); A2 = A1.Conj(); -A2.relabels_(["phy*","v1*","v2*"]); +A2.relabel_(["phy*","v1*","v2*"]); diff --git a/docs/code/python/doc_codes/guide_uniten_labels_relabel_.py b/docs/code/python/doc_codes/guide_uniten_labels_relabel_.py index a222a2de4..0eae14a92 100644 --- a/docs/code/python/doc_codes/guide_uniten_labels_relabel_.py +++ b/docs/code/python/doc_codes/guide_uniten_labels_relabel_.py @@ -4,5 +4,5 @@ uT.relabel_(1,"xx") uT.print_diagram() -uT.relabels_(["a","b","c"]) +uT.relabel_(["a","b","c"]) uT.print_diagram() diff --git a/docs/source/example/DMRG.rst b/docs/source/example/DMRG.rst index 9b48c6a37..5386c8339 100644 --- a/docs/source/example/DMRG.rst +++ b/docs/source/example/DMRG.rst @@ -195,7 +195,7 @@ Next, we are going to prepare our variational ansatz (MPS). Here, **chi** is the lbls = [] # List for storing the MPS labels A = [None for i in range(Nsites)] A[0] = cytnx.UniTensor(cytnx.random.normal([1, d, min(chi, d)], 0., 1.), rowrank = 2) - A[0].relabels_(["0","1","2"]) + A[0].relabel_(["0","1","2"]) lbls.append(["0","1","2"]) # store the labels for later convinience. for k in range(1,Nsites): @@ -204,7 +204,7 @@ Next, we are going to prepare our variational ansatz (MPS). Here, **chi** is the A[k] = cytnx.UniTensor(cytnx.random.normal([dim1, dim2, dim3],0.,1.), rowrank = 2) lbl = [str(2*k),str(2*k+1),str(2*k+2)] - A[k].relabels_(lbl) + A[k].relabel_(lbl) lbls.append(lbl) # store the labels for later convinience. @@ -281,11 +281,11 @@ The full implementation looks like: LR[p+1] = anet.Launch() # Recover the original MPS labels - A[p].relabels_(lbls[p]) - A[p+1].relabels_(lbls[p+1]) + A[p].relabel_(lbls[p]) + A[p+1].relabel_(lbls[p+1]) _,A[-1] = cytnx.linalg.Gesvd(A[-1],is_U=True,is_vT=False) ## last one. - A[-1].relabels_(lbls[-1]) # Recover the original MPS labels + A[-1].relabel_(lbls[-1]) # Recover the original MPS labels @@ -319,12 +319,12 @@ Now we are ready for describing the main DMRG algorithm that optimize our MPS, t psi.set_rowrank_(2) # maintain rowrank to perform the svd s,A[p],A[p+1] = cytnx.linalg.Svd_truncate(psi,new_dim) - A[p+1].relabels_(lbls[p+1]); # set the label back to be consistent + A[p+1].relabel_(lbls[p+1]); # set the label back to be consistent s = s/s.Norm().item() # normalize s A[p] = cytnx.Contract(A[p],s) # absorb s into next neighbor - A[p].relabels_(lbls[p]); # set the label back to be consistent + A[p].relabel_(lbls[p]); # set the label back to be consistent # update LR from right to left: anet = cytnx.Network("R_AMAH.net") @@ -335,7 +335,7 @@ Now we are ready for describing the main DMRG algorithm that optimize our MPS, t A[0].set_rowrank_(1) # maintain rowrank to perform the svd _,A[0] = cytnx.linalg.Gesvd(A[0],is_U=False, is_vT=True) - A[0].relabels_(lbls[0]); # set the label back to be consistent + A[0].relabel_(lbls[0]); # set the label back to be consistent There are lots of things happening here, let's break it up a bit, from right to left, the first thing we do is to contract two tensors A[p] and A[p+1]: @@ -389,7 +389,7 @@ To ultilize the Lanczos function, the opertion of acting Hamitonian (which invol lbl = v.labels() self.anet.PutUniTensor("psi",v) out = self.anet.Launch() - out.relabels_(lbl) + out.relabel_(lbl) return out .. Hint:: @@ -429,12 +429,12 @@ we have to make our psi into the canonical form, for which we do the SVD for the psi.set_rowrank_(2) # maintain rowrank to perform the svd s,A[p],A[p+1] = cytnx.linalg.Svd_truncate(psi,new_dim) - A[p+1].relabels_(lbls[p+1]); # set the label back to be consistent + A[p+1].relabel_(lbls[p+1]); # set the label back to be consistent s = s/s.Norm().item() # normalize s A[p] = cytnx.Contract(A[p],s) # absorb s into next neighbor - A[p].relabels_(lbls[p]); # set the label back to be consistent + A[p].relabel_(lbls[p]); # set the label back to be consistent @@ -482,7 +482,7 @@ The for loop is finished, now we arrived at the left end of the system, with the A[0].set_rowrank_(1) _,A[0] = cytnx.linalg.Gesvd(A[0],is_U=False, is_vT=True) - A[0].relabels_(lbls[0]); #set the label back to be consistent + A[0].relabel_(lbls[0]); #set the label back to be consistent looks like the same as we did for the right-end site in the beginning, this time we saves the vT, the purpose of the set_rowrank_(1) is only for the convenience of calling Svd/Svd_truncate in the next sweeping procedure from left to right. @@ -514,12 +514,12 @@ So we are done! With the other loop to control the number of times we sweep, we psi.set_rowrank_(2) # maintain rowrank to perform the svd s,A[p],A[p+1] = cytnx.linalg.Svd_truncate(psi,new_dim) - A[p+1].relabels_(lbls[p+1]); # set the label back to be consistent + A[p+1].relabel_(lbls[p+1]); # set the label back to be consistent s = s/s.Norm().item() # normalize s A[p] = cytnx.Contract(A[p],s) # absorb s into next neighbor - A[p].relabels_(lbls[p]); # set the label back to be consistent + A[p].relabel_(lbls[p]); # set the label back to be consistent # update LR from right to left: anet = cytnx.Network("R_AMAH.net") @@ -530,7 +530,7 @@ So we are done! With the other loop to control the number of times we sweep, we A[0].set_rowrank_(1) _,A[0] = cytnx.linalg.Gesvd(A[0],is_U=False, is_vT=True) - A[0].relabels_(lbls[0]); #set the label back to be consistent + A[0].relabel_(lbls[0]); #set the label back to be consistent for p in range(Nsites-1): dim_l = A[p].shape()[0] @@ -543,12 +543,12 @@ So we are done! With the other loop to control the number of times we sweep, we psi.set_rowrank_(2) # maintain rowrank to perform the svd s,A[p],A[p+1] = cytnx.linalg.Svd_truncate(psi,new_dim) - A[p].relabels_(lbls[p]); #set the label back to be consistent + A[p].relabel_(lbls[p]); #set the label back to be consistent s = s/s.Norm().item() # normalize s A[p+1] = cytnx.Contract(s,A[p+1]) ## absorb s into next neighbor. - A[p+1].relabels_(lbls[p+1]); #set the label back to be consistent + A[p+1].relabel_(lbls[p+1]); #set the label back to be consistent # update LR from left to right: anet = cytnx.Network("L_AMAH.net") @@ -559,7 +559,7 @@ So we are done! With the other loop to control the number of times we sweep, we A[-1].set_rowrank_(2) _,A[-1] = cytnx.linalg.Gesvd(A[-1],is_U=True,is_vT=False) ## last one. - A[-1].relabels_(lbls[-1]); #set the label back to be consistent + A[-1].relabel_(lbls[-1]); #set the label back to be consistent Compare DMRG Results ************************************ diff --git a/docs/source/example/iDMRG.rst b/docs/source/example/iDMRG.rst index 6b696ae92..b2562c912 100644 --- a/docs/source/example/iDMRG.rst +++ b/docs/source/example/iDMRG.rst @@ -252,10 +252,10 @@ The construction of trial state and optimization is done as follows: sR.relabel_(0,"1") sL.relabel_(1,"0") s0 = 1./s0 - s0.relabels_(["0","1"]) + s0.relabel_(["0","1"]) s2 = cytnx.Contract(cytnx.Contract(sL,s0),sR) - s2.relabels_(["-10","-11"]) + s2.relabel_(["-10","-11"]) A.relabel_(2,"-10") B.relabel_(0,"-11") psi = cytnx.Contract(cytnx.Contract(A,s2),B) diff --git a/docs/source/example/iTEBD.rst b/docs/source/example/iTEBD.rst index d4a987d7b..3c34d1a8f 100644 --- a/docs/source/example/iTEBD.rst +++ b/docs/source/example/iTEBD.rst @@ -304,10 +304,10 @@ At the beginning of each iteration, we evaluate the energy expectation value :ma .. code-block:: python :linenos: - A.relabels_(["a","0","b"]) - B.relabels_(["c","1","d"]) - la.relabels_(["b","c"]) - lb.relabels_(["d","e"]) + A.relabel_(["a","0","b"]) + B.relabel_(["c","1","d"]) + la.relabel_(["b","c"]) + lb.relabel_(["d","e"]) ## contract all @@ -329,7 +329,7 @@ At the beginning of each iteration, we evaluate the energy expectation value :ma # Note that X,Xt contract will result a rank-0 tensor, which can use item() toget element XNorm = cytnx.Contract(X,Xt).item() XH = cytnx.Contract(X,H) - XH.relabels_(["d","e","0","1"]) + XH.relabel_(["d","e","0","1"]) XHX = cytnx.Contract(Xt,XH).item() ## rank-0 @@ -348,10 +348,10 @@ At the beginning of each iteration, we evaluate the energy expectation value :ma .. .. code-block:: c++ .. :linenos: -.. A.relabels_({"a","0","b"}); -.. B.relabels_({"c","1","d"}); -.. la.relabels_({"b","c"}); -.. lb.relabels_({"d","e"}); +.. A.relabel_({"a","0","b"}); +.. B.relabel_({"c","1","d"}); +.. la.relabel_({"b","c"}); +.. lb.relabel_({"d","e"}); .. // contract all @@ -366,7 +366,7 @@ At the beginning of each iteration, we evaluate the energy expectation value :ma .. Scalar XNorm = cyx::Contract(X,Xt).item(); .. UniTensor XH = cyx::Contract(X,H); -.. XH.relabels_({"d","e","0","1"}); +.. XH.relabel_({"d","e","0","1"}); .. Scalar XHX = cyx::Contract(Xt,XH).item(); .. double E = double(XHX/XNorm); @@ -439,7 +439,7 @@ Now we have the envolved :math:`\Gamma_A`, :math:`\Gamma_B` and :math:`\lambda_A :linenos: lb_inv = 1./lb - lb_inv.relabels_(["e","d"]) + lb_inv.relabel_(["e","d"]) A = cytnx.Contract(lb_inv,A) B = cytnx.Contract(B,lb_inv) # translation symmetry, exchange A and B site @@ -453,7 +453,7 @@ Now we have the envolved :math:`\Gamma_A`, :math:`\Gamma_B` and :math:`\lambda_A .. UniTensor lb_inv = 1./lb; -.. lb_inv.relabels_({"e","d"}); +.. lb_inv.relabel_({"e","d"}); .. A = cyx.Contract(lb_inv,A); .. B = cyx.Contract(B,lb_inv); @@ -473,10 +473,10 @@ Let's put everything together in a loop for iteration: for i in range(10000): - A.relabels_(["a","0","b"]) - B.relabels_(["c","1","d"]) - la.relabels_(["b","c"]) - lb.relabels_(["d","e"]) + A.relabel_(["a","0","b"]) + B.relabel_(["c","1","d"]) + la.relabel_(["b","c"]) + lb.relabel_(["d","e"]) ## contract all X = cytnx.Contract(cytnx.Contract(A,la),cytnx.Contract(B,lb)) @@ -488,7 +488,7 @@ Let's put everything together in a loop for iteration: # Note that X,Xt contract will result a rank-0 tensor, which can use item() toget element XNorm = cytnx.Contract(X,Xt).item() XH = cytnx.Contract(X,H) - XH.relabels_(["d","e","0","1"]) + XH.relabel_(["d","e","0","1"]) XHX = cytnx.Contract(Xt,XH).item() ## rank-0 E = XHX/XNorm @@ -512,7 +512,7 @@ Let's put everything together in a loop for iteration: la.normalize_() lb_inv = 1./lb - lb_inv.relabels_(["e","d"]) + lb_inv.relabel_(["e","d"]) A = cytnx.Contract(lb_inv,A) B = cytnx.Contract(B,lb_inv) @@ -532,10 +532,10 @@ Let's put everything together in a loop for iteration: .. for(unsigned int i=0;i<10000;i++){ -.. A.relabels_({"a","0","b"}); -.. B.relabels_({"c","1","d"}); -.. la.relabels_({"b","c"}); -.. lb.relabels_({"d","e"}); +.. A.relabel_({"a","0","b"}); +.. B.relabel_({"c","1","d"}); +.. la.relabel_({"b","c"}); +.. lb.relabel_({"d","e"}); .. // contract all @@ -550,7 +550,7 @@ Let's put everything together in a loop for iteration: .. Scalar XNorm = cyx::Contract(X,Xt).item(); .. UniTensor XH = cyx::Contract(X,H); -.. XH.relabels_({"d","e","0","1"}); +.. XH.relabel_({"d","e","0","1"}); .. Scalar XHX = cyx::Contract(Xt,XH).item(); .. double E = double(XHX/XNorm); @@ -582,7 +582,7 @@ Let's put everything together in a loop for iteration: .. // again, but A' and B' are updated .. UniTensor lb_inv = 1./lb; -.. lb_inv.relabels_({"e","d"}); +.. lb_inv.relabel_({"e","d"}); .. A = cyx::Contract(lb_inv,A); .. B = cyx::Contract(B,lb_inv); diff --git a/docs/source/guide/contraction/contract.rst b/docs/source/guide/contraction/contract.rst index 7d153b28f..19c29b037 100644 --- a/docs/source/guide/contraction/contract.rst +++ b/docs/source/guide/contraction/contract.rst @@ -21,7 +21,7 @@ Output >> Here we see that the labels **j** and **l** appear on both input tensors. Thus, they are contracted. Note that the bond dimensions of the contracted tensors must agree on both tensors. -In order to define which indices shall be contracted without changing the labels on the initial tensors, Cyntx provides the method **.relabels()**. It allows to set common labels on the indices to be contracted and distinct labels on the others. Also, the labels on the resulting tensor can be defined this way. See :ref:`Changing labels` for further details. Suppose that we only want to contract the index *j* in the previous example, but not sum over *l*. We can use **.relabels()** for this task: +In order to define which indices shall be contracted without changing the labels on the initial tensors, Cyntx provides the method **.relabel()**. It allows to set common labels on the indices to be contracted and distinct labels on the others. Also, the labels on the resulting tensor can be defined this way. See :ref:`Changing labels` for further details. Suppose that we only want to contract the index *j* in the previous example, but not sum over *l*. We can use **.relabel()** for this task: * In Python: @@ -36,7 +36,7 @@ Output >> :language: text -The function **.relabels()** creates a copy of the initial UniTensor and changes the labels, while keeping the labels on the initial tensor unchanged. The actual data is shared between the old and new tensor, only the meta is independent. +The function **.relabel()** creates a copy of the initial UniTensor and changes the labels, while keeping the labels on the initial tensor unchanged. The actual data is shared between the old and new tensor, only the meta is independent. Contracts ------------------ diff --git a/docs/source/guide/uniten/labels.rst b/docs/source/guide/uniten/labels.rst index f5d1b6160..542a8dfcc 100644 --- a/docs/source/guide/uniten/labels.rst +++ b/docs/source/guide/uniten/labels.rst @@ -20,13 +20,13 @@ Alternatively, if we don't know the index of the target bond in the current orde If we wish to change the labels of all legs, we can use: -.. py:function:: UniTensor.relabels_( new_labels) +.. py:function:: UniTensor.relabel_( new_labels) :param List[string] new_labels: a list of new labels or -.. py:function:: UniTensor.relabels_(old_labels, new_labels) +.. py:function:: UniTensor.relabel_(old_labels, new_labels) :param List[string] old_labels: a list of current labels :param List[string] new_labels: a list of the corresponding new labels diff --git a/dox.md b/dox.md index ccfb7b156..65842497c 100644 --- a/dox.md +++ b/dox.md @@ -232,8 +232,8 @@ Tensor A({3,4,5},Type.Double); UniTensor tA = UniTensor(A); // convert directly. UniTensor tB = UniTensor({Bond(3),Bond(4),Bond(5)},{}); // init from scratch. // Relabel the tensor and then contract. -tA.relabels_({"common_1", "common_2", "out_a"}); -tB.relabels_({"common_1", "common_2", "out_b"}); +tA.relabel_({"common_1", "common_2", "out_a"}); +tB.relabel_({"common_1", "common_2", "out_b"}); UniTensor out = cytnx::Contract(tA,tB); tA.print_diagram(); tB.print_diagram(); diff --git a/example/TDVP/tdvp1_dense.py b/example/TDVP/tdvp1_dense.py index 7a55e3968..480f5535c 100644 --- a/example/TDVP/tdvp1_dense.py +++ b/example/TDVP/tdvp1_dense.py @@ -25,7 +25,7 @@ def __init__(self, L, M, R): def matvec(self, v): self.anet.PutUniTensor("psi",v) out = self.anet.Launch() - out.relabels_(v.labels()) + out.relabel_(v.labels()) return out class ZeroSiteOp(cytnx.LinOp): @@ -44,7 +44,7 @@ def __init__(self, L, R): def matvec(self, v): self.anet.PutUniTensor("C",v) out = self.anet.Launch() - out.relabels_(v.labels()) + out.relabel_(v.labels()) return out def time_evolve_Lan_f(psi, functArgs, delta): @@ -54,7 +54,7 @@ def time_evolve_Lan_f(psi, functArgs, delta): R = R.astype(cytnx.Type.ComplexDouble) op = OneSiteOp(L,M,R) exp_iH_v = cytnx.linalg.Lanczos_Exp(op, psi, -1.0j*delta*0.5, 1.0e-8) - exp_iH_v.relabels_(psi.labels()) + exp_iH_v.relabel_(psi.labels()) return exp_iH_v def time_evolve_Lan_b(psi, functArgs, delta): @@ -63,7 +63,7 @@ def time_evolve_Lan_b(psi, functArgs, delta): R = R.astype(cytnx.Type.ComplexDouble) op = ZeroSiteOp(L,R) exp_iH_v = cytnx.linalg.Lanczos_Exp(op, psi, 1.0j*delta*0.5, 1.0e-8) - exp_iH_v.relabels_(psi.labels()) + exp_iH_v.relabel_(psi.labels()) return exp_iH_v def get_energy(A, M): @@ -116,7 +116,7 @@ def get_energy(A, M): for k in range(1,Nsites): lbl = [str(2*k),str(2*k+1),str(2*k+2)] - A[k].relabels_(lbl) + A[k].relabel_(lbl) lbls.append(lbl) # store the labels for later convinience. LR = [None for i in range(Nsites+1)] @@ -142,8 +142,8 @@ def get_energy(A, M): LR[p+1] = anet.Launch() # Recover the original MPS labels - A[p].relabels_(lbls[p]) - A[p+1].relabels_(lbls[p+1]) + A[p].relabel_(lbls[p]) + A[p+1].relabel_(lbls[p+1]) As = [] As.append(A.copy()) @@ -166,7 +166,7 @@ def get_energy(A, M): psi.set_rowrank_(1) # maintain rowrank to perform the svd s,_,A[p] = cytnx.linalg.Svd_truncate(psi,new_dim) - A[p].relabels_(lbls[p]); # set the label back to be consistent + A[p].relabel_(lbls[p]); # set the label back to be consistent # update LR from right to left: anet = cytnx.Network() anet.FromString(["R: -2,-1,-3",\ @@ -184,7 +184,7 @@ def get_energy(A, M): #C = time_evolve_b(C, (old_LR, LR[p]), dt) C = time_evolve_Lan_b(C, (old_LR, LR[p]), dt) - A[p-1] = cytnx.Contract(A[p-1], C).relabels_(A[p-1].labels()) + A[p-1] = cytnx.Contract(A[p-1], C).relabel_(A[p-1].labels()) @@ -193,7 +193,7 @@ def get_energy(A, M): A[0].set_rowrank_(1) _,A[0] = cytnx.linalg.Gesvd(A[0],is_U=False, is_vT=True) - A[0].relabels_(lbls[0]); #set the label back to be consistent + A[0].relabel_(lbls[0]); #set the label back to be consistent for p in range(Nsites): @@ -206,7 +206,7 @@ def get_energy(A, M): psi.set_rowrank_(2) # maintain rowrank to perform the svd s,A[p],_ = cytnx.linalg.Svd_truncate(psi,new_dim) - A[p].relabels_(lbls[p]); #set the label back to be consistent + A[p].relabel_(lbls[p]); #set the label back to be consistent # update LR from left to right: anet = cytnx.Network() anet.FromString(["L: -2,-1,-3",\ @@ -226,13 +226,13 @@ def get_energy(A, M): C = time_evolve_Lan_b(C, (LR[p+1],old_LR), dt) A[p+1] = cytnx.Contract(A[p+1], C) A[p+1].permute_(['_aux_L', lbls[p+1][1], lbls[p+1][2]]) - A[p+1].relabels_(lbls[p+1]) + A[p+1].relabel_(lbls[p+1]) print('Sweep[l->r]: %d/%d, Loc: %d' % (k, time_step, p)) A[-1].set_rowrank_(2) _,A[-1] = cytnx.linalg.Gesvd(A[-1],is_U=True,is_vT=False) ## last one. - A[-1].relabels_(lbls[-1]); #set the label back to be consistent + A[-1].relabel_(lbls[-1]); #set the label back to be consistent As.append(A.copy()) return As, Es # all time step states @@ -250,7 +250,7 @@ def Local_meas(A, B, Op, site): l = anet.Launch() else: tmp = A[i].relabel(1, "_aux_up") - Op = Op.relabels(["_aux_up", "_aux_low"]) + Op = Op.relabel(["_aux_up", "_aux_low"]) tmp = cytnx.Contract(tmp, Op) tmp.relabel_("_aux_low", A[i].labels()[1]) tmp.permute_(A[i].labels()) @@ -264,7 +264,7 @@ def prepare_rand_init_MPS(Nsites, chi, d): lbls = [] A = [None for i in range(Nsites)] A[0] = cytnx.UniTensor(cytnx.random.normal([1, d, min(chi, d)], 0., 1., seed=0), rowrank = 2) - A[0].relabels_(["0","1","2"]) + A[0].relabel_(["0","1","2"]) lbls.append(["0","1","2"]) # store the labels for later convinience. for k in range(1,Nsites): @@ -273,7 +273,7 @@ def prepare_rand_init_MPS(Nsites, chi, d): A[k] = cytnx.UniTensor(cytnx.random.normal([dim1, dim2, dim3],0.,1., seed=0), rowrank = 2) lbl = [str(2*k),str(2*k+1),str(2*k+2)] - A[k].relabels_(lbl) + A[k].relabel_(lbl) lbls.append(lbl) # store the labels for later convinience. return A diff --git a/pybind/unitensor_py.cpp b/pybind/unitensor_py.cpp index eb08890b0..fd24d6a00 100644 --- a/pybind/unitensor_py.cpp +++ b/pybind/unitensor_py.cpp @@ -175,14 +175,14 @@ void unitensor_binding(py::module &m) { return self.relabel(new_labels); }, py::arg("new_labels")) .def("relabels",[](UniTensor &self, const std::vector &new_labels){ - return self.relabels(new_labels); + return self.relabel(new_labels); }, py::arg("new_labels")) .def("c_relabel_",[](UniTensor &self, const std::vector &new_labels){ self.relabel_(new_labels); }, py::arg("new_labels")) .def("c_relabels_",[](UniTensor &self, const std::vector &new_labels){ - self.relabels_(new_labels); + self.relabel_(new_labels); }, py::arg("new_labels")) @@ -211,11 +211,11 @@ void unitensor_binding(py::module &m) { } ,py::arg("old_labels"), py::arg("new_labels")) .def("relabels",[](UniTensor &self, const std::vector &old_labels, const std::vector &new_labels){ - return self.relabels(old_labels,new_labels); + return self.relabel(old_labels,new_labels); } ,py::arg("old_labels"), py::arg("new_labels")) .def("c_relabels_",[](UniTensor &self, const std::vector &old_labels, const std::vector &new_labels){ - self.relabels_(old_labels,new_labels); + self.relabel_(old_labels,new_labels); } ,py::arg("old_labels"), py::arg("new_labels")) diff --git a/src/RegularGncon.cpp b/src/RegularGncon.cpp index 8060a95e1..5697bdab8 100644 --- a/src/RegularGncon.cpp +++ b/src/RegularGncon.cpp @@ -649,7 +649,7 @@ namespace cytnx { // modify the label of unitensor (shared): // this->tensors[idx].set_labels(this->label_arr[idx]);//this conflict this->CtTree.base_nodes[idx]->utensor = - this->tensors[idx].relabels(this->label_arr[idx]); // this conflict + this->tensors[idx].relabel(this->label_arr[idx]); // this conflict // this->CtTree.base_nodes[idx].name = this->tensors[idx].name(); this->CtTree.base_nodes[idx]->is_assigned = true; diff --git a/src/RegularNetwork.cpp b/src/RegularNetwork.cpp index 743d57211..069f8024c 100644 --- a/src/RegularNetwork.cpp +++ b/src/RegularNetwork.cpp @@ -964,7 +964,7 @@ namespace cytnx { for (cytnx_uint64 idx = 0; idx < this->tensors.size(); idx++) { this->CtTree.base_nodes[idx]->utensor = - this->tensors[idx].relabels(this->label_arr[idx]); // this conflict + this->tensors[idx].relabel(this->label_arr[idx]); // this conflict this->CtTree.base_nodes[idx]->is_assigned = true; } // 1.5 contraction order: @@ -1083,7 +1083,7 @@ namespace cytnx { #else for (cytnx_uint64 idx = 0; idx < this->tensors.size(); idx++) { this->CtTree.base_nodes[idx]->utensor = - this->tensors[idx].relabels(this->label_arr[idx]); // this conflict + this->tensors[idx].relabel(this->label_arr[idx]); // this conflict this->CtTree.base_nodes[idx]->is_assigned = true; } // 1.5 contraction order: diff --git a/src/linalg/Lanczos_Exp.cpp b/src/linalg/Lanczos_Exp.cpp index 31c0f1d17..d07508128 100644 --- a/src/linalg/Lanczos_Exp.cpp +++ b/src/linalg/Lanczos_Exp.cpp @@ -67,10 +67,10 @@ namespace cytnx { const unsigned int &Maxiter = 10000) { // the operation (I + Hop/k) on A auto I_plus_A_Op = [&](UniTensor A) { - return ((Hop->matvec(A)) / k + A).relabels_(b.labels()); + return ((Hop->matvec(A)) / k + A).relabel_(b.labels()); }; // the residuals of (b - (I + Hop/k)x) - auto r = (b - I_plus_A_Op(Tin)).relabels_(b.labels()); + auto r = (b - I_plus_A_Op(Tin)).relabel_(b.labels()); // choose r0_hat = r auto r0 = r; auto x = Tin; @@ -90,22 +90,22 @@ namespace cytnx { for (int i = 1; i < Maxiter; ++i) { auto v = I_plus_A_Op(pv_old); auto a = p_old / Dot_internal(r0, v); - auto h = (x_old + a * pv_old).relabels_(b.labels()); - auto s = (r_old - a * v).relabels_(b.labels()); + auto h = (x_old + a * pv_old).relabel_(b.labels()); + auto s = (r_old - a * v).relabel_(b.labels()); if (abs(Dot_internal(s, s)) < CvgCrit) { x = h; break; } auto t = I_plus_A_Op(s); auto w = Dot_internal(t, s) / Dot_internal(t, t); - x = (h + w * s).relabels_(b.labels()); - r = (s - w * t).relabels_(b.labels()); + x = (h + w * s).relabel_(b.labels()); + r = (s - w * t).relabel_(b.labels()); if (abs(Dot_internal(r, r)) < CvgCrit) { break; } auto p = Dot_internal(r0, r); auto beta = (p / p_old) * (a / w); - pv = (r + beta * (pv_old - w * v)).relabels_(b.labels()); + pv = (r + beta * (pv_old - w * v)).relabel_(b.labels()); // update pv_old = pv; @@ -145,7 +145,7 @@ namespace cytnx { // that, // |(I + A / k )^(−1) v[i] − w[i]| ≤ eps1 |v[i]| . auto w = invert_biCGSTAB_internal(Hop, v, v, k, eps1); - // auto resi = ((Hop->matvec(w))/k + w).relabels_(v.labels()) - v; + // auto resi = ((Hop->matvec(w))/k + w).relabel_(v.labels()) - v; // For j = 0 to i for (int j = 0; j <= i; ++j) { @@ -212,10 +212,10 @@ namespace cytnx { auto label_kr = B.labels()[1]; auto Vk_labels = v0.labels(); Vk_labels.insert(Vk_labels.begin(), label_kl); - Vk_ut.relabels_(Vk_labels); + Vk_ut.relabel_(Vk_labels); auto VkDag_labels = v0.labels(); VkDag_labels.push_back(label_kr); - VkDag_ut.relabels_(VkDag_labels); + VkDag_ut.relabel_(VkDag_labels); // Vk_ut.print_diagram(); // VkDag_ut.print_diagram(); @@ -243,10 +243,10 @@ namespace cytnx { v = v / v_nrm; // first iteration - auto wp = (Hop->matvec(v)).relabels_(v.labels()); + auto wp = (Hop->matvec(v)).relabel_(v.labels()); auto alpha = Dot_internal(wp, v); Hp.at({0, 0}) = alpha; - auto w = (wp - alpha * v).relabels_(v.labels()); + auto w = (wp - alpha * v).relabel_(v.labels()); // prepare U auto Vk_shape = v.shape(); @@ -264,14 +264,14 @@ namespace cytnx { auto beta = std::sqrt(double(Dot_internal(w, w).real())); v_old = v.clone(); if (beta > beta_tol) { - v = (w / beta).relabels_(v.labels()); + v = (w / beta).relabel_(v.labels()); } else { // beta too small -> the norm of new vector too small. This vector cannot span // the new dimension if (verbose) { std::cout << "beta too small, pick another vector." << i << std::endl; } // pick a new vector perpendicular to all vector in Vs - v = Gram_Schmidt_internal(Vs).relabels_(v.labels()); + v = Gram_Schmidt_internal(Vs).relabel_(v.labels()); auto v_norm = Dot_internal(v, v); // if the picked vector also too small, break and construct expH if (abs(v_norm) <= beta_tol) { @@ -286,10 +286,10 @@ namespace cytnx { Vs.push_back(v); Hp.at({(cytnx_uint64)i, (cytnx_uint64)i - 1}) = Hp.at({(cytnx_uint64)i - 1, (cytnx_uint64)i}) = beta; - wp = (Hop->matvec(v)).relabels_(v.labels()); + wp = (Hop->matvec(v)).relabel_(v.labels()); alpha = Dot_internal(wp, v); Hp.at({(cytnx_uint64)i, (cytnx_uint64)i}) = alpha; - w = (wp - alpha * v - beta * v_old).relabels_(v.labels()); + w = (wp - alpha * v - beta * v_old).relabel_(v.labels()); // Converge check Hp_sub = resize_mat_internal(Hp, i + 1, i + 1); @@ -347,10 +347,10 @@ namespace cytnx { auto label_kr = B.labels()[1]; auto Vk_labels = v.labels(); Vk_labels.insert(Vk_labels.begin(), label_kl); - Vk_ut.relabels_(Vk_labels); + Vk_ut.relabel_(Vk_labels); auto VkDag_labels = v.labels(); VkDag_labels.push_back(label_kr); - VkDag_ut.relabels_(VkDag_labels); + VkDag_ut.relabel_(VkDag_labels); out = Contracts({T, VkDag_ut, B}, "", true); out = Contract(out, Vk_ut); diff --git a/src/tn_algo/DMRG.cpp b/src/tn_algo/DMRG.cpp index f89241c47..130c505a1 100644 --- a/src/tn_algo/DMRG.cpp +++ b/src/tn_algo/DMRG.cpp @@ -53,7 +53,7 @@ namespace cytnx { // shifted ortho state: for (cytnx_int64 ir = 0; ir < this->ortho_mps.size(); ir++) { - auto r = this->ortho_mps[ir].relabels(v.labels()); + auto r = this->ortho_mps[ir].relabel(v.labels()); Scalar c = Contract(r.Dagger(), v).item(); out += this->weight * c * r; } @@ -179,11 +179,11 @@ namespace cytnx { // anet.PutUniTensors(["L","A","A_Conj","M"],[self.LR[p],self.mps.A[p],self.mps.A[p].Conj(),self.mpo.get_op(p)],is_clone=False); // hard coded the network: - auto Lenv = this->LR[p].relabels({"-2", "-1", "-3"}); - auto tA = this->mps.data()[p].relabels({"-1", "-4", "1"}); + auto Lenv = this->LR[p].relabel({"-2", "-1", "-3"}); + auto tA = this->mps.data()[p].relabel({"-1", "-4", "1"}); auto tAc = this->mps.data()[p].Conj(); tAc.set_labels({"-3", "-5", "2"}); - auto M = this->mpo.get_op(p).relabels({"-2", "0", "-4", "-5"}); + auto M = this->mpo.get_op(p).relabel({"-2", "0", "-4", "-5"}); this->LR[p + 1] = Network::Contract({Lenv, tA, tAc, M}, ";0,1,2").Launch(true); } // this->mps.S_mvright(); diff --git a/src/tn_algo/RegularMPS.cpp b/src/tn_algo/RegularMPS.cpp index 65fbb8335..643e18270 100644 --- a/src/tn_algo/RegularMPS.cpp +++ b/src/tn_algo/RegularMPS.cpp @@ -48,13 +48,13 @@ namespace cytnx { UniTensor L; for (auto Ai : this->_TNs) { if (L.uten_type() == UTenType.Void) { - auto tA = Ai.relabels({"0", "1", "2"}); + auto tA = Ai.relabel({"0", "1", "2"}); L = Contract(tA, tA.Dagger().relabel("0", "-2")); } else { L.set_labels({"2", "-2"}); - auto tA = Ai.relabels({"2", "3", "4"}); + auto tA = Ai.relabel({"2", "3", "4"}); L = Contract(tA, L); - L = Contract(L, tA.Dagger().relabels({"-4", "-2", "3"})); + L = Contract(L, tA.Dagger().relabel({"-4", "-2", "3"})); } } return L.Trace().item(); diff --git a/tests/BlockUniTensor_test.cpp b/tests/BlockUniTensor_test.cpp index 4e406f92d..e9b44e182 100644 --- a/tests/BlockUniTensor_test.cpp +++ b/tests/BlockUniTensor_test.cpp @@ -113,39 +113,39 @@ TEST_F(BlockUniTensorTest, clone) { } TEST_F(BlockUniTensorTest, relabels) { - BUT1 = BUT1.relabels({"a", "b", "cd", "d"}); + BUT1 = BUT1.relabel({"a", "b", "cd", "d"}); EXPECT_EQ(BUT1.labels()[0], "a"); EXPECT_EQ(BUT1.labels()[1], "b"); EXPECT_EQ(BUT1.labels()[2], "cd"); EXPECT_EQ(BUT1.labels()[3], "d"); - BUT1 = BUT1.relabels({"1", "-1", "2", "1000"}); + BUT1 = BUT1.relabel({"1", "-1", "2", "1000"}); EXPECT_EQ(BUT1.labels()[0], "1"); EXPECT_EQ(BUT1.labels()[1], "-1"); EXPECT_EQ(BUT1.labels()[2], "2"); EXPECT_EQ(BUT1.labels()[3], "1000"); - EXPECT_THROW(BUT1.relabels({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"a"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"1", "2"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"a", "b", "c", "d", "e"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"a"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"1", "2"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"a", "b", "c", "d", "e"}), std::logic_error); } TEST_F(BlockUniTensorTest, relabels_) { - BUT1.relabels_({"a", "b", "cd", "d"}); + BUT1.relabel_({"a", "b", "cd", "d"}); EXPECT_EQ(BUT1.labels()[0], "a"); EXPECT_EQ(BUT1.labels()[1], "b"); EXPECT_EQ(BUT1.labels()[2], "cd"); EXPECT_EQ(BUT1.labels()[3], "d"); - BUT1.relabels_({"1", "-1", "2", "1000"}); + BUT1.relabel_({"1", "-1", "2", "1000"}); EXPECT_EQ(BUT1.labels()[0], "1"); EXPECT_EQ(BUT1.labels()[1], "-1"); EXPECT_EQ(BUT1.labels()[2], "2"); EXPECT_EQ(BUT1.labels()[3], "1000"); - EXPECT_THROW(BUT1.relabels_({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"a"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"1", "2"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"a", "b", "c", "d", "e"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"a"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"1", "2"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"a", "b", "c", "d", "e"}), std::logic_error); } TEST_F(BlockUniTensorTest, relabel) { diff --git a/tests/DenseUniTensor_test.cpp b/tests/DenseUniTensor_test.cpp index 230ffc8e5..41fa9cd24 100644 --- a/tests/DenseUniTensor_test.cpp +++ b/tests/DenseUniTensor_test.cpp @@ -539,7 +539,7 @@ TEST_F(DenseUniTensorTest, to_) { } TEST_F(DenseUniTensorTest, relabels) { - auto ut = utzero3456.relabels({"a", "b", "cd", "d"}); + auto ut = utzero3456.relabel({"a", "b", "cd", "d"}); EXPECT_EQ(utzero3456.labels()[0], "0"); EXPECT_EQ(utzero3456.labels()[1], "1"); EXPECT_EQ(utzero3456.labels()[2], "2"); @@ -548,17 +548,17 @@ TEST_F(DenseUniTensorTest, relabels) { EXPECT_EQ(ut.labels()[1], "b"); EXPECT_EQ(ut.labels()[2], "cd"); EXPECT_EQ(ut.labels()[3], "d"); - ut = utzero3456.relabels({"1", "-1", "2", "1000"}); - EXPECT_THROW(ut.relabels({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(ut.relabels({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(ut.relabels({"a"}), std::logic_error); - EXPECT_THROW(ut.relabels({"1", "2"}), std::logic_error); - EXPECT_THROW(ut.relabels({"a", "b", "c", "d", "e"}), std::logic_error); - EXPECT_THROW(ut_uninit.relabels({"a", "b", "c", "d", "e"}), std::logic_error); + ut = utzero3456.relabel({"1", "-1", "2", "1000"}); + EXPECT_THROW(ut.relabel({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(ut.relabel({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(ut.relabel({"a"}), std::logic_error); + EXPECT_THROW(ut.relabel({"1", "2"}), std::logic_error); + EXPECT_THROW(ut.relabel({"a", "b", "c", "d", "e"}), std::logic_error); + EXPECT_THROW(ut_uninit.relabel({"a", "b", "c", "d", "e"}), std::logic_error); } TEST_F(DenseUniTensorTest, relabels_) { - auto ut = utzero3456.relabels_({"a", "b", "cd", "d"}); + auto ut = utzero3456.relabel_({"a", "b", "cd", "d"}); EXPECT_EQ(utzero3456.labels()[0], "a"); EXPECT_EQ(utzero3456.labels()[1], "b"); EXPECT_EQ(utzero3456.labels()[2], "cd"); @@ -567,13 +567,13 @@ TEST_F(DenseUniTensorTest, relabels_) { EXPECT_EQ(ut.labels()[1], "b"); EXPECT_EQ(ut.labels()[2], "cd"); EXPECT_EQ(ut.labels()[3], "d"); - ut = utzero3456.relabels_({"1", "-1", "2", "1000"}); - EXPECT_THROW(ut.relabels_({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(ut.relabels_({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(ut.relabels_({"a"}), std::logic_error); - EXPECT_THROW(ut.relabels_({"1", "2"}), std::logic_error); - EXPECT_THROW(ut.relabels_({"a", "b", "c", "d", "e"}), std::logic_error); - EXPECT_THROW(ut_uninit.relabels_({"a", "b", "c", "d", "e"}), std::logic_error); + ut = utzero3456.relabel_({"1", "-1", "2", "1000"}); + EXPECT_THROW(ut.relabel_({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(ut.relabel_({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(ut.relabel_({"a"}), std::logic_error); + EXPECT_THROW(ut.relabel_({"1", "2"}), std::logic_error); + EXPECT_THROW(ut.relabel_({"a", "b", "c", "d", "e"}), std::logic_error); + EXPECT_THROW(ut_uninit.relabel_({"a", "b", "c", "d", "e"}), std::logic_error); } TEST_F(DenseUniTensorTest, relabel) { diff --git a/tests/OLDtest.cpp b/tests/OLDtest.cpp index 63e5d8667..b1452299a 100644 --- a/tests/OLDtest.cpp +++ b/tests/OLDtest.cpp @@ -91,8 +91,8 @@ int main(int argc, char *argv[]) { return 0; auto T1 = UniTensor(arange(30).reshape(2, 5, 3), 1); - auto T2 = T1.clone().relabels({0, 3, 4}); - auto T3 = T1.clone().relabels({5, 3, 7}); + auto T2 = T1.clone().relabel({0, 3, 4}); + auto T3 = T1.clone().relabel({5, 3, 7}); T1.print_diagram(); T2.print_diagram(); diff --git a/tests/gpu/BlockUniTensor_test.cpp b/tests/gpu/BlockUniTensor_test.cpp index 2d42a592a..45e89c3bb 100644 --- a/tests/gpu/BlockUniTensor_test.cpp +++ b/tests/gpu/BlockUniTensor_test.cpp @@ -39,59 +39,59 @@ TEST_F(BlockUniTensorTest, gpu_Trace) { } TEST_F(BlockUniTensorTest, gpu_relabels) { - BUT1 = BUT1.relabels({"a", "b", "cd", "d"}); + BUT1 = BUT1.relabel({"a", "b", "cd", "d"}); EXPECT_EQ(BUT1.labels()[0], "a"); EXPECT_EQ(BUT1.labels()[1], "b"); EXPECT_EQ(BUT1.labels()[2], "cd"); EXPECT_EQ(BUT1.labels()[3], "d"); - BUT1 = BUT1.relabels({"1", "-1", "2", "1000"}); + BUT1 = BUT1.relabel({"1", "-1", "2", "1000"}); EXPECT_EQ(BUT1.labels()[0], "1"); EXPECT_EQ(BUT1.labels()[1], "-1"); EXPECT_EQ(BUT1.labels()[2], "2"); EXPECT_EQ(BUT1.labels()[3], "1000"); - EXPECT_THROW(BUT1.relabels({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"a"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"1", "2"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"a", "b", "c", "d", "e"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"a"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"1", "2"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"a", "b", "c", "d", "e"}), std::logic_error); } TEST_F(BlockUniTensorTest, gpu_relabels_) { - BUT1.relabels_({"a", "b", "cd", "d"}); + BUT1.relabel_({"a", "b", "cd", "d"}); EXPECT_EQ(BUT1.labels()[0], "a"); EXPECT_EQ(BUT1.labels()[1], "b"); EXPECT_EQ(BUT1.labels()[2], "cd"); EXPECT_EQ(BUT1.labels()[3], "d"); - BUT1.relabels_({"1", "-1", "2", "1000"}); + BUT1.relabel_({"1", "-1", "2", "1000"}); EXPECT_EQ(BUT1.labels()[0], "1"); EXPECT_EQ(BUT1.labels()[1], "-1"); EXPECT_EQ(BUT1.labels()[2], "2"); EXPECT_EQ(BUT1.labels()[3], "1000"); - EXPECT_THROW(BUT1.relabels_({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"a"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"1", "2"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"a", "b", "c", "d", "e"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"a"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"1", "2"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"a", "b", "c", "d", "e"}), std::logic_error); } TEST_F(BlockUniTensorTest, gpu_relabel) { auto tmp = BUT1.clone(); - BUT1 = BUT1.relabels({"a", "b", "cd", "d"}); + BUT1 = BUT1.relabel({"a", "b", "cd", "d"}); EXPECT_EQ(BUT1.labels()[0], "a"); EXPECT_EQ(BUT1.labels()[1], "b"); EXPECT_EQ(BUT1.labels()[2], "cd"); EXPECT_EQ(BUT1.labels()[3], "d"); - BUT1 = BUT1.relabels({"1", "-1", "2", "1000"}); + BUT1 = BUT1.relabel({"1", "-1", "2", "1000"}); EXPECT_EQ(BUT1.labels()[0], "1"); EXPECT_EQ(BUT1.labels()[1], "-1"); EXPECT_EQ(BUT1.labels()[2], "2"); EXPECT_EQ(BUT1.labels()[3], "1000"); - EXPECT_THROW(BUT1.relabels({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"a"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"1", "2"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"a", "b", "c", "d", "e"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"a"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"1", "2"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"a", "b", "c", "d", "e"}), std::logic_error); BUT1 = tmp; BUT1 = BUT1.relabel("0", "a"); @@ -129,21 +129,21 @@ TEST_F(BlockUniTensorTest, gpu_relabel) { } TEST_F(BlockUniTensorTest, gpu_relabel_) { auto tmp = BUT1.clone(); - BUT1.relabels_({"a", "b", "cd", "d"}); + BUT1.relabel_({"a", "b", "cd", "d"}); EXPECT_EQ(BUT1.labels()[0], "a"); EXPECT_EQ(BUT1.labels()[1], "b"); EXPECT_EQ(BUT1.labels()[2], "cd"); EXPECT_EQ(BUT1.labels()[3], "d"); - BUT1.relabels_({"1", "-1", "2", "1000"}); + BUT1.relabel_({"1", "-1", "2", "1000"}); EXPECT_EQ(BUT1.labels()[0], "1"); EXPECT_EQ(BUT1.labels()[1], "-1"); EXPECT_EQ(BUT1.labels()[2], "2"); EXPECT_EQ(BUT1.labels()[3], "1000"); - EXPECT_THROW(BUT1.relabels_({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"a"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"1", "2"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"a", "b", "c", "d", "e"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"a"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"1", "2"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"a", "b", "c", "d", "e"}), std::logic_error); BUT1 = tmp; BUT1.relabel_("0", "a"); diff --git a/tests/gpu/DenseUniTensor_test.cpp b/tests/gpu/DenseUniTensor_test.cpp index 890acb36c..2e6864651 100644 --- a/tests/gpu/DenseUniTensor_test.cpp +++ b/tests/gpu/DenseUniTensor_test.cpp @@ -21,30 +21,30 @@ TEST_F(DenseUniTensorTest, gpu_Trace) { } TEST_F(DenseUniTensorTest, gpu_relabels) { - utzero3456 = utzero3456.relabels({"a", "b", "cd", "d"}); + utzero3456 = utzero3456.relabel({"a", "b", "cd", "d"}); EXPECT_EQ(utzero3456.labels()[0], "a"); EXPECT_EQ(utzero3456.labels()[1], "b"); EXPECT_EQ(utzero3456.labels()[2], "cd"); EXPECT_EQ(utzero3456.labels()[3], "d"); - utzero3456 = utzero3456.relabels({"1", "-1", "2", "1000"}); - EXPECT_THROW(utzero3456.relabels({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(utzero3456.relabels({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(utzero3456.relabels({"a"}), std::logic_error); - EXPECT_THROW(utzero3456.relabels({"1", "2"}), std::logic_error); - EXPECT_THROW(utzero3456.relabels({"a", "b", "c", "d", "e"}), std::logic_error); + utzero3456 = utzero3456.relabel({"1", "-1", "2", "1000"}); + EXPECT_THROW(utzero3456.relabel({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(utzero3456.relabel({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(utzero3456.relabel({"a"}), std::logic_error); + EXPECT_THROW(utzero3456.relabel({"1", "2"}), std::logic_error); + EXPECT_THROW(utzero3456.relabel({"a", "b", "c", "d", "e"}), std::logic_error); } TEST_F(DenseUniTensorTest, gpu_relabels_) { - utzero3456.relabels_({"a", "b", "cd", "d"}); + utzero3456.relabel_({"a", "b", "cd", "d"}); EXPECT_EQ(utzero3456.labels()[0], "a"); EXPECT_EQ(utzero3456.labels()[1], "b"); EXPECT_EQ(utzero3456.labels()[2], "cd"); EXPECT_EQ(utzero3456.labels()[3], "d"); - utzero3456.relabels_({"1", "-1", "2", "1000"}); - EXPECT_THROW(utzero3456.relabels_({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(utzero3456.relabels_({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(utzero3456.relabels_({"a"}), std::logic_error); - EXPECT_THROW(utzero3456.relabels_({"1", "2"}), std::logic_error); - EXPECT_THROW(utzero3456.relabels_({"a", "b", "c", "d", "e"}), std::logic_error); + utzero3456.relabel_({"1", "-1", "2", "1000"}); + EXPECT_THROW(utzero3456.relabel_({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(utzero3456.relabel_({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(utzero3456.relabel_({"a"}), std::logic_error); + EXPECT_THROW(utzero3456.relabel_({"1", "2"}), std::logic_error); + EXPECT_THROW(utzero3456.relabel_({"a", "b", "c", "d", "e"}), std::logic_error); } TEST_F(DenseUniTensorTest, gpu_relabel) { diff --git a/tests/gpu/OLDtest.cpp b/tests/gpu/OLDtest.cpp index 63e5d8667..b1452299a 100644 --- a/tests/gpu/OLDtest.cpp +++ b/tests/gpu/OLDtest.cpp @@ -91,8 +91,8 @@ int main(int argc, char *argv[]) { return 0; auto T1 = UniTensor(arange(30).reshape(2, 5, 3), 1); - auto T2 = T1.clone().relabels({0, 3, 4}); - auto T3 = T1.clone().relabels({5, 3, 7}); + auto T2 = T1.clone().relabel({0, 3, 4}); + auto T3 = T1.clone().relabel({5, 3, 7}); T1.print_diagram(); T2.print_diagram(); diff --git a/tests/gpu/linalg_test/Arnoldi_Ut_test.cpp b/tests/gpu/linalg_test/Arnoldi_Ut_test.cpp index e4857dc51..cdc54a25b 100644 --- a/tests/gpu/linalg_test/Arnoldi_Ut_test.cpp +++ b/tests/gpu/linalg_test/Arnoldi_Ut_test.cpp @@ -24,7 +24,7 @@ namespace { const int& device); UniTensor matvec(const UniTensor& l) override { auto tmp = Contracts({A, l, B}, "", true); - tmp.relabels_(l.labels()).set_rowrank(l.rowrank()); + tmp.relabel_(l.labels()).set_rowrank(l.rowrank()); return tmp; } }; @@ -34,15 +34,15 @@ namespace { std::vector bonds = {Bond(D), Bond(d), Bond(D)}; A = UniTensor(bonds, {}, -1, in_dtype, in_device) .set_name("A") - .relabels_({"al", "phys", "ar"}) + .relabel_({"al", "phys", "ar"}) .set_rowrank(2); B = UniTensor(bonds, {}, -1, in_dtype, in_device) .set_name("B") - .relabels_({"bl", "phys", "br"}) + .relabel_({"bl", "phys", "br"}) .set_rowrank(2); T_init = UniTensor({Bond(D), Bond(D)}, {}, -1, in_dtype, in_device) .set_name("l") - .relabels_({"al", "bl"}) + .relabel_({"al", "bl"}) .set_rowrank(1); if (Type.is_float(this->dtype())) { double low = -1.0, high = 1.0; @@ -64,7 +64,7 @@ namespace { } UniTensor matvec(const UniTensor& l) override { auto tmp = Contracts({A, l, B}, "", true); - tmp.relabels_(l.labels()).set_rowrank(l.rowrank()); + tmp.relabel_(l.labels()).set_rowrank(l.rowrank()); return tmp; } }; diff --git a/tests/linalg_test/Arnoldi_Ut_test.cpp b/tests/linalg_test/Arnoldi_Ut_test.cpp index 3116ae2d3..9f8fe1196 100644 --- a/tests/linalg_test/Arnoldi_Ut_test.cpp +++ b/tests/linalg_test/Arnoldi_Ut_test.cpp @@ -23,7 +23,7 @@ namespace { const unsigned int& dtype = Type.Double, const int& device = Device.cpu); UniTensor matvec(const UniTensor& l) override { auto tmp = Contracts({A, l, B}, "", true); - tmp.relabels_(l.labels()).set_rowrank(l.rowrank()); + tmp.relabel_(l.labels()).set_rowrank(l.rowrank()); return tmp; } @@ -48,15 +48,15 @@ namespace { std::vector bonds = {Bond(D), Bond(d), Bond(D)}; A = UniTensor(bonds, {}, -1, in_dtype, in_device) .set_name("A") - .relabels_({"al", "phys", "ar"}) + .relabel_({"al", "phys", "ar"}) .set_rowrank(2); B = UniTensor(bonds, {}, -1, in_dtype, in_device) .set_name("B") - .relabels_({"bl", "phys", "br"}) + .relabel_({"bl", "phys", "br"}) .set_rowrank(2); T_init = UniTensor({Bond(D), Bond(D)}, {}, -1, in_dtype, in_device) .set_name("l") - .relabels_({"al", "bl"}) + .relabel_({"al", "bl"}) .set_rowrank(1); if (Type.is_float(this->dtype())) { double low = -1.0, high = 1.0; diff --git a/tests/linalg_test/Lanczos_Exp_test.cpp b/tests/linalg_test/Lanczos_Exp_test.cpp index 55d186ee2..c26272f6c 100644 --- a/tests/linalg_test/Lanczos_Exp_test.cpp +++ b/tests/linalg_test/Lanczos_Exp_test.cpp @@ -39,7 +39,7 @@ namespace Lanczos_Exp_Ut_Test { UniTensor matvec(const UniTensor& A) override { auto tmp = Contract(EffH, A); tmp.permute_({"vil", "pi", "vir"}, 1); - tmp.relabels_(A.labels()); + tmp.relabel_(A.labels()); return tmp; } }; @@ -116,7 +116,7 @@ namespace Lanczos_Exp_Ut_Test { double low = -1.0, high = 1.0; UniTensor A = UniTensor({Bond(D), Bond(d), Bond(D)}, {}, -1, dtype, device) .set_name("A") - .relabels_({"vol", "po", "vor"}) + .relabel_({"vol", "po", "vor"}) .set_rowrank_(1); if (Type.is_float(A.dtype())) { random::uniform_(A, low, high, 0); @@ -139,7 +139,7 @@ namespace Lanczos_Exp_Ut_Test { std::vector heff_labels = {"vil", "pi", "vir", "vol", "po", "vor"}; UniTensor HEff = UniTensor(bonds, {}, -1, dtype, device) .set_name("HEff") - .relabels_(heff_labels) + .relabel_(heff_labels) .set_rowrank(bonds.size() / 2); auto HEff_shape = HEff.shape(); auto in_dim = 1; @@ -183,7 +183,7 @@ namespace Lanczos_Exp_Ut_Test { linalg::ExpM((tau * expH.get_block()).reshape(in_dim, out_dim)).reshape(HEff_shape)); auto ans = Contract(expH, Tin); ans.permute_({"vil", "pi", "vir"}, 1); - ans.relabels_(Tin.labels()); + ans.relabel_(Tin.labels()); ans = Contract(expH, Tin); return ans; } From cf3c4e8cf17cf8589ff3929815dff4ff7e3bf440 Mon Sep 17 00:00:00 2001 From: Manuel Schneider Date: Fri, 5 Dec 2025 16:31:43 +0800 Subject: [PATCH 02/10] replaced .set_label( by .relabel( --- benchmarks/linalg/Lanczos_bm.cpp | 2 +- ...xGuide_PythonCodeExamples_2023_06_30.ipynb | 4 +- docs/code/iTEBD/iTEBD.py | 14 +- example/DMRG/dmrg_two_sites_U1.py | 16 +- example/DMRG/dmrg_two_sites_dense.py | 24 +-- example/Network/Construct.cpp | 8 +- example/iTEBD/iTEBD.py | 12 +- example/iTEBD/iTEBD_U1.py | 12 +- example/iTEBD/iTEBD_tag.py | 12 +- pybind/unitensor_py.cpp | 2 +- src/RegularGncon.cpp | 4 +- src/RegularNetwork.cpp | 2 +- .../linalg_internal_cpu/Trace_internal.cpp | 2 +- .../linalg_internal_gpu/cuTrace_internal.cu | 4 +- src/linalg/Add.cpp | 2 +- src/linalg/Div.cpp | 4 +- src/linalg/Gesvd.cpp | 4 +- src/linalg/Gesvd_truncate.cpp | 4 +- src/linalg/Mul.cpp | 2 +- src/linalg/Qdr.cpp | 8 +- src/linalg/Qr.cpp | 4 +- src/linalg/Rsvd.cpp | 4 +- src/linalg/Rsvd_truncate.cpp | 4 +- src/linalg/Sub.cpp | 4 +- src/linalg/Svd.cpp | 4 +- src/linalg/Svd_truncate.cpp | 4 +- src/ncon.cpp | 2 +- src/tn_algo/DMRG.cpp | 20 +-- src/tn_algo/RegularMPS.cpp | 6 +- src/tn_algo/iMPS.cpp | 2 +- tests/BlockUniTensor_test.cpp | 48 +----- tests/Contract_test.h | 6 +- tests/DenseUniTensor_test.cpp | 142 +++++++----------- tests/OLDtest.cpp | 2 +- tests/UniTensor_base_test.cpp | 2 +- tests/gpu/BlockUniTensor_test.cpp | 48 +----- tests/gpu/Contract_test.h | 6 +- tests/gpu/DenseUniTensor_test.cpp | 39 +---- tests/gpu/OLDtest.cpp | 2 +- tests/gpu/UniTensor_base_test.cpp | 2 +- tests/gpu/linalg_test/ExpH_test.cpp | 2 +- tests/gpu/linalg_test/GeSvd_test.cpp | 2 +- tests/gpu/linalg_test/Lanczos_Gnd_test.cpp | 6 +- tests/gpu/linalg_test/Svd_test.cpp | 2 +- tests/gpu/linalg_test/linalg_test.h | 2 +- tests/linalg_test/Arnoldi_Ut_test.cpp | 4 +- tests/linalg_test/ExpH_test.cpp | 2 +- tests/linalg_test/GeSvd_test.cpp | 2 +- tests/linalg_test/Gesvd_truncate_test.cpp | 2 +- tests/linalg_test/Lanczos_Exp_test.cpp | 8 +- tests/linalg_test/Lanczos_Gnd_test.cpp | 10 +- tests/linalg_test/Rsvd_test.cpp | 2 +- tests/linalg_test/Rsvd_truncate_test.cpp | 2 +- tests/linalg_test/Svd_test.cpp | 2 +- tests/linalg_test/Svd_truncate_test.cpp | 2 +- tests/linalg_test/linalg_test.cpp | 28 ++-- tests/linalg_test/linalg_test.h | 2 +- 57 files changed, 219 insertions(+), 354 deletions(-) diff --git a/benchmarks/linalg/Lanczos_bm.cpp b/benchmarks/linalg/Lanczos_bm.cpp index 5db141921..44b019b50 100644 --- a/benchmarks/linalg/Lanczos_bm.cpp +++ b/benchmarks/linalg/Lanczos_bm.cpp @@ -27,7 +27,7 @@ namespace BMTest_Lanczos { * | | + | | "pi" * |_|--"vol" "po" "vor"--|_| * - * Then relabels ["vil", "pi", "vir"] -> ["vol", "po", "vor"] + * Then relabel ["vil", "pi", "vir"] -> ["vol", "po", "vor"] * * "vil":virtual in bond left * "po":physical out bond diff --git a/docs/code/CytnxGuide_PythonCodeExamples_2023_06_30.ipynb b/docs/code/CytnxGuide_PythonCodeExamples_2023_06_30.ipynb index 9c8137c95..f5c31008d 100644 --- a/docs/code/CytnxGuide_PythonCodeExamples_2023_06_30.ipynb +++ b/docs/code/CytnxGuide_PythonCodeExamples_2023_06_30.ipynb @@ -3664,12 +3664,12 @@ "\n", "s0 = 1./s0\n", "\n", - "s0.set_labels(['0','1'])\n", + "s0.relabel_(['0','1'])\n", "\n", "s2 = cytnx.Contract(cytnx.Contract(sL,s0),sR)\n", "\n", "\n", - "s2.set_labels(['-10','-11'])\n", + "s2.relabel_(['-10','-11'])\n", "\n", "A.set_label(2,'-10')\n", "\n", diff --git a/docs/code/iTEBD/iTEBD.py b/docs/code/iTEBD/iTEBD.py index 60c838d07..6027e9cde 100644 --- a/docs/code/iTEBD/iTEBD.py +++ b/docs/code/iTEBD/iTEBD.py @@ -79,10 +79,10 @@ Elast = 0 for i in range(10000): - A.set_labels([-1,0,-2]) - B.set_labels([-3,1,-4]) - la.set_labels([-2,-3]) - lb.set_labels([-4,-5]) + A.relabel_([-1,0,-2]) + B.relabel_([-3,1,-4]) + la.relabel_([-2,-3]) + lb.relabel_([-4,-5]) ## contract all X = cyx.Contract(cyx.Contract(A,la),cyx.Contract(B,lb)) @@ -103,7 +103,7 @@ # Note that X,Xt contract will result a rank-0 tensor, which can use item() toget element XNorm = cyx.Contract(X,Xt).item() XH = cyx.Contract(X,H) - XH.set_labels([-4,-5,0,1]) + XH.relabel_([-4,-5,0,1]) XHX = cyx.Contract(Xt,XH).item() ## rank-0 E = XHX/XNorm @@ -141,8 +141,8 @@ # --lb-A'-la-B'-lb-- # # again, but A' and B' are updated - A.set_labels([-1,0,-2]); A.set_rowrank(1); - B.set_labels([-3,1,-4]); B.set_rowrank(1); + A.relabel_([-1,0,-2]); A.set_rowrank(1); + B.relabel_([-3,1,-4]); B.set_rowrank(1); #A.print_diagram() #B.print_diagram() diff --git a/example/DMRG/dmrg_two_sites_U1.py b/example/DMRG/dmrg_two_sites_U1.py index 2a4e5fbc1..e99747bc3 100644 --- a/example/DMRG/dmrg_two_sites_U1.py +++ b/example/DMRG/dmrg_two_sites_U1.py @@ -19,7 +19,7 @@ def matvec(self, v): lbl = v.labels() self.anet.PutUniTensor("psi",v) out = self.anet.Launch() - out.relabels_(lbl) + out.relabel_(lbl) return out def optimize_psi(psi, functArgs, maxit=2, krydim=4): @@ -98,7 +98,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): A[k] = cytnx.UniTensor([B1,B2,B3],rowrank=2) lbl = [str(2*k),str(2*k+1),str(2*k+2)] - A[k].set_labels(lbl) + A[k].relabel_(lbl) A[k].get_block_()[0] = 1 lbls.append(lbl) # store the labels for later convinience. @@ -131,12 +131,12 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): psi.set_rowrank_(2) # maintain rowrank to perform the svd s,A[p],A[p+1] = cytnx.linalg.Svd_truncate(psi,new_dim) - A[p+1].relabels_(lbls[p+1]); # set the label back to be consistent + A[p+1].relabel_(lbls[p+1]); # set the label back to be consistent s = s/s.Norm().item() # normalize s A[p] = cytnx.Contract(A[p],s) # absorb s into next neighbor - A[p].relabels_(lbls[p]); # set the label back to be consistent + A[p].relabel_(lbls[p]); # set the label back to be consistent # update LR from right to left: anet = cytnx.Network() @@ -152,7 +152,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): A[0].set_rowrank_(1) _,A[0] = cytnx.linalg.Gesvd(A[0],is_U=False, is_vT=True) - A[0].relabels_(lbls[0]); #set the label back to be consistent + A[0].relabel_(lbls[0]); #set the label back to be consistent for p in range(Nsites-1): dim_l = A[p].shape()[0] @@ -165,12 +165,12 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): psi.set_rowrank_(2) # maintain rowrank to perform the svd s,A[p],A[p+1] = cytnx.linalg.Svd_truncate(psi,new_dim) - A[p].relabels_(lbls[p]); #set the label back to be consistent + A[p].relabel_(lbls[p]); #set the label back to be consistent s = s/s.Norm().item() # normalize s A[p+1] = cytnx.Contract(s,A[p+1]) ## absorb s into next neighbor. - A[p+1].relabels_(lbls[p+1]); #set the label back to be consistent + A[p+1].relabel_(lbls[p+1]); #set the label back to be consistent # update LR from left to right: anet = cytnx.Network() @@ -186,7 +186,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): A[-1].set_rowrank_(2) _,A[-1] = cytnx.linalg.Gesvd(A[-1],is_U=True,is_vT=False) ## last one. - A[-1].relabels_(lbls[-1]); #set the label back to be consistent + A[-1].relabel_(lbls[-1]); #set the label back to be consistent return Ekeep diff --git a/example/DMRG/dmrg_two_sites_dense.py b/example/DMRG/dmrg_two_sites_dense.py index 72a6775e9..4bbe2109f 100644 --- a/example/DMRG/dmrg_two_sites_dense.py +++ b/example/DMRG/dmrg_two_sites_dense.py @@ -18,7 +18,7 @@ def matvec(self, v): lbl = v.labels() self.anet.PutUniTensor("psi",v) out = self.anet.Launch() - out.relabels_(lbl) + out.relabel_(lbl) return out def optimize_psi(psi, functArgs, maxit=2, krydim=4): @@ -61,7 +61,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): lbls = [] # List for storing the MPS labels A = [None for i in range(Nsites)] A[0] = cytnx.UniTensor(cytnx.random.normal([1, d, min(chi, d)], 0., 1.), rowrank = 2) - A[0].relabels_(["0","1","2"]) + A[0].relabel_(["0","1","2"]) lbls.append(["0","1","2"]) # store the labels for later convinience. for k in range(1,Nsites): @@ -70,7 +70,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): A[k] = cytnx.UniTensor(cytnx.random.normal([dim1, dim2, dim3],0.,1.), rowrank = 2) lbl = [str(2*k),str(2*k+1),str(2*k+2)] - A[k].relabels_(lbl) + A[k].relabel_(lbl) lbls.append(lbl) # store the labels for later convinience. LR = [None for i in range(Nsites+1)] @@ -96,11 +96,11 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): LR[p+1] = anet.Launch() # Recover the original MPS labels - A[p].relabels_(lbls[p]) - A[p+1].relabels_(lbls[p+1]) + A[p].relabel_(lbls[p]) + A[p+1].relabel_(lbls[p+1]) _,A[-1] = cytnx.linalg.Gesvd(A[-1],is_U=True,is_vT=False) ## last one. - A[-1].relabels_(lbls[-1]) # Recover the original MPS labels + A[-1].relabel_(lbls[-1]) # Recover the original MPS labels Ekeep = [] for k in range(1, numsweeps+1): @@ -116,12 +116,12 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): psi.set_rowrank_(2) # maintain rowrank to perform the svd s,A[p],A[p+1] = cytnx.linalg.Svd_truncate(psi,new_dim) - A[p+1].relabels_(lbls[p+1]); # set the label back to be consistent + A[p+1].relabel_(lbls[p+1]); # set the label back to be consistent s = s/s.Norm().item() # normalize s A[p] = cytnx.Contract(A[p],s) # absorb s into next neighbor - A[p].relabels_(lbls[p]); # set the label back to be consistent + A[p].relabel_(lbls[p]); # set the label back to be consistent # update LR from right to left: anet = cytnx.Network() @@ -138,7 +138,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): A[0].set_rowrank_(1) _,A[0] = cytnx.linalg.Gesvd(A[0],is_U=False, is_vT=True) - A[0].relabels_(lbls[0]); #set the label back to be consistent + A[0].relabel_(lbls[0]); #set the label back to be consistent for p in range(Nsites-1): dim_l = A[p].shape()[0] @@ -151,12 +151,12 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): psi.set_rowrank_(2) # maintain rowrank to perform the svd s,A[p],A[p+1] = cytnx.linalg.Svd_truncate(psi,new_dim) - A[p].relabels_(lbls[p]); #set the label back to be consistent + A[p].relabel_(lbls[p]); #set the label back to be consistent s = s/s.Norm().item() # normalize s A[p+1] = cytnx.Contract(s,A[p+1]) ## absorb s into next neighbor. - A[p+1].relabels_(lbls[p+1]); #set the label back to be consistent + A[p+1].relabel_(lbls[p+1]); #set the label back to be consistent # update LR from left to right: anet = cytnx.Network() @@ -174,7 +174,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): A[-1].set_rowrank_(2) _,A[-1] = cytnx.linalg.Gesvd(A[-1],is_U=True,is_vT=False) ## last one. - A[-1].relabels_(lbls[-1]); #set the label back to be consistent + A[-1].relabel_(lbls[-1]); #set the label back to be consistent return Ekeep if __name__ == '__main__': diff --git a/example/Network/Construct.cpp b/example/Network/Construct.cpp index 38b92a475..27a935b1a 100644 --- a/example/Network/Construct.cpp +++ b/example/Network/Construct.cpp @@ -9,10 +9,10 @@ int main(int argc, char* argv[]) { auto tc = UniTensor(arange(24).reshape(2, 3, 4), 1); auto td = UniTensor(arange(24).reshape(2, 3, 4), 1); - ta.set_labels({0, 1, 2}); - tb.set_labels({0, 3, 4}); - tc.set_labels({5, 1, 6}); - td.set_labels({5, 7, 8}); + ta.relabel_({0, 1, 2}); + tb.relabel_({0, 3, 4}); + tc.relabel_({5, 1, 6}); + td.relabel_({5, 7, 8}); UniTensor oot = Network::Contract( diff --git a/example/iTEBD/iTEBD.py b/example/iTEBD/iTEBD.py index 73a7e4226..a392a9a76 100644 --- a/example/iTEBD/iTEBD.py +++ b/example/iTEBD/iTEBD.py @@ -64,10 +64,10 @@ def itebd_tfim(chi = 20, J = 1.0, Hx = 1.0, dt = 0.1, CvgCrit = 1.0e-10): Elast = 0 for i in range(10000): - A.set_labels(['a','0','b']) - B.set_labels(['c','1','d']) - la.set_labels(['b','c']) - lb.set_labels(['d','e']) + A.relabel_(['a','0','b']) + B.relabel_(['c','1','d']) + la.relabel_(['b','c']) + lb.relabel_(['d','e']) ## contract all @@ -89,7 +89,7 @@ def itebd_tfim(chi = 20, J = 1.0, Hx = 1.0, dt = 0.1, CvgCrit = 1.0e-10): # Note that X,Xt contract will result a rank-0 tensor, which can use item() toget element XNorm = cytnx.Contract(X,Xt).item() XH = cytnx.Contract(X,H) - XH.set_labels(['d','e','0','1']) + XH.relabel_(['d','e','0','1']) XHX = cytnx.Contract(Xt,XH).item() ## rank-0 @@ -133,7 +133,7 @@ def itebd_tfim(chi = 20, J = 1.0, Hx = 1.0, dt = 0.1, CvgCrit = 1.0e-10): # again, but A' and B' are updated lb_inv = 1./lb # lb_inv.print_diagram(); - lb_inv.set_labels(['e','d']) + lb_inv.relabel_(['e','d']) A = cytnx.Contract(lb_inv,A) B = cytnx.Contract(B,lb_inv) diff --git a/example/iTEBD/iTEBD_U1.py b/example/iTEBD/iTEBD_U1.py index e5de81834..5dcabc309 100644 --- a/example/iTEBD/iTEBD_U1.py +++ b/example/iTEBD/iTEBD_U1.py @@ -73,10 +73,10 @@ def itebd_heisenberg(chi = 32, J = 1.0, dt = 0.1, CvgCrit = 1.0e-12): Elast = 0 for i in range(10000): - A.set_labels(["a","0","b"]) - B.set_labels(["c","1","d"]) - la.set_labels(["b","c"]) - lb.set_labels(["d","e"]) + A.relabel_(["a","0","b"]) + B.relabel_(["c","1","d"]) + la.relabel_(["b","c"]) + lb.relabel_(["d","e"]) ## contract all tmpA = cytnx.Contract(A,la) @@ -101,7 +101,7 @@ def itebd_heisenberg(chi = 32, J = 1.0, dt = 0.1, CvgCrit = 1.0e-12): ## XH = cytnx.Contract(X,H) #XH.print_diagram() - XH.set_labels(['d','e','0','1']) + XH.relabel_(['d','e','0','1']) XHX = cytnx.Contract(Xt,XH).item() E = XHX/XNorm @@ -141,7 +141,7 @@ def itebd_heisenberg(chi = 32, J = 1.0, dt = 0.1, CvgCrit = 1.0e-12): T = lb_inv.get_block_(b); lb_inv.put_block_(1./T,b); - lb_inv.set_labels(['e','d']) + lb_inv.relabel_(['e','d']) A = cytnx.Contract(lb_inv,A) B = cytnx.Contract(B,lb_inv) diff --git a/example/iTEBD/iTEBD_tag.py b/example/iTEBD/iTEBD_tag.py index eaaad42e9..94609b254 100644 --- a/example/iTEBD/iTEBD_tag.py +++ b/example/iTEBD/iTEBD_tag.py @@ -66,10 +66,10 @@ def itebd_tfim_tag(chi = 20, J = 1.0, Hx = 1.0, dt = 0.1, CvgCrit = 1.0e-10): Elast = 0 for i in range(10000): - A.set_labels(['a','0','b']) - B.set_labels(['c','1','d']) - la.set_labels(['b','c']) - lb.set_labels(['d','e']) + A.relabel(['a','0','b']) + B.relabel(['c','1','d']) + la.relabel(['b','c']) + lb.relabel(['d','e']) @@ -93,7 +93,7 @@ def itebd_tfim_tag(chi = 20, J = 1.0, Hx = 1.0, dt = 0.1, CvgCrit = 1.0e-10): # Note that X,Xt contract will result a rank-0 tensor, which can use item() toget element XNorm = cytnx.Contract(X,Xt).item() XH = cytnx.Contract(X,H) - XH.set_labels(['d','e','0','1']) + XH.relabel_(['d','e','0','1']) XHX = cytnx.Contract(Xt,XH).item() ## rank-0 E = XHX/XNorm @@ -132,7 +132,7 @@ def itebd_tfim_tag(chi = 20, J = 1.0, Hx = 1.0, dt = 0.1, CvgCrit = 1.0e-10): # # again, but A' and B' are updated lb_inv = 1./lb - lb_inv.set_labels(['e','d']) + lb_inv.relabel_(['e','d']) A = cytnx.Contract(lb_inv,A) B = cytnx.Contract(B,lb_inv) diff --git a/pybind/unitensor_py.cpp b/pybind/unitensor_py.cpp index fd24d6a00..8a60e9373 100644 --- a/pybind/unitensor_py.cpp +++ b/pybind/unitensor_py.cpp @@ -164,7 +164,7 @@ void unitensor_binding(py::module &m) { .def("c_set_labels",[](UniTensor &self, const std::vector &new_labels){ - return self.set_labels(new_labels); + return self.relabel_(new_labels); },py::arg("new_labels")) diff --git a/src/RegularGncon.cpp b/src/RegularGncon.cpp index 5697bdab8..8e353e029 100644 --- a/src/RegularGncon.cpp +++ b/src/RegularGncon.cpp @@ -647,7 +647,7 @@ namespace cytnx { // std::cout<tensors[idx].set_labels(this->label_arr[idx]);//this conflict + // this->tensors[idx].relabel_(this->label_arr[idx]);//this conflict this->CtTree.base_nodes[idx]->utensor = this->tensors[idx].relabel(this->label_arr[idx]); // this conflict // this->CtTree.base_nodes[idx].name = this->tensors[idx].name(); @@ -741,7 +741,7 @@ namespace cytnx { // //5. reset back the original labels: // for(cytnx_uint64 i=0;itensors.size();i++){ - // this->tensors[i].set_labels(old_labels[i]); + // this->tensors[i].relabel_(old_labels[i]); // } // 6. permute accroding to pre-set labels: diff --git a/src/RegularNetwork.cpp b/src/RegularNetwork.cpp index 069f8024c..3aeebdad7 100644 --- a/src/RegularNetwork.cpp +++ b/src/RegularNetwork.cpp @@ -1025,7 +1025,7 @@ namespace cytnx { // //5. reset back the original labels: // for(cytnx_uint64 i=0;itensors.size();i++){ - // this->tensors[i].set_labels(old_labels[i]); + // this->tensors[i].relabel_(old_labels[i]); // } // 6. permute accroding to pre-set labels: diff --git a/src/backend/linalg_internal_cpu/Trace_internal.cpp b/src/backend/linalg_internal_cpu/Trace_internal.cpp index 9a047f492..e5e0d770d 100644 --- a/src/backend/linalg_internal_cpu/Trace_internal.cpp +++ b/src/backend/linalg_internal_cpu/Trace_internal.cpp @@ -29,7 +29,7 @@ namespace cytnx { cytnx::UniTensor I_UT = cytnx::UniTensor(eye(Ndiag, Tn.dtype()), false, -1); UniTensor UTn = UniTensor(Tn, false, 2); - I_UT.set_labels({UTn._impl->_labels[ax1], UTn._impl->_labels[ax2]}); + I_UT.relabel_({UTn._impl->_labels[ax1], UTn._impl->_labels[ax2]}); out = Contract(I_UT, UTn).get_block_(); diff --git a/src/backend/linalg_internal_gpu/cuTrace_internal.cu b/src/backend/linalg_internal_gpu/cuTrace_internal.cu index 878d840f4..f7ae723e7 100644 --- a/src/backend/linalg_internal_gpu/cuTrace_internal.cu +++ b/src/backend/linalg_internal_gpu/cuTrace_internal.cu @@ -30,9 +30,9 @@ namespace cytnx { const cytnx_uint64 &ax2) { cytnx::UniTensor I_UT = cytnx::UniTensor(zeros(Ndiag, Tn.dtype(), Tn.device()), true, -1); - I_UT.set_labels({"0", "1"}); + I_UT.relabel_({"0", "1"}); UniTensor UTn = UniTensor(Tn, false, 2); - UTn.set_labels( + UTn.relabel_( vec_cast(vec_range(100, 100 + UTn.labels().size()))); UTn._impl->_labels[ax1] = "0"; UTn._impl->_labels[ax2] = "1"; diff --git a/src/linalg/Add.cpp b/src/linalg/Add.cpp index e0193e76f..26f3c5502 100644 --- a/src/linalg/Add.cpp +++ b/src/linalg/Add.cpp @@ -612,7 +612,7 @@ namespace cytnx { out = Lt.clone(); out.Add_(Rt); } - out.set_labels(vec_range(Lt.rank())); + out.relabel_(vec_range(Lt.rank())); out.set_name(""); return out; diff --git a/src/linalg/Div.cpp b/src/linalg/Div.cpp index 2dad1e956..2b9b9f1e2 100644 --- a/src/linalg/Div.cpp +++ b/src/linalg/Div.cpp @@ -893,7 +893,7 @@ namespace cytnx { if (Lt.dtype() > Rt.dtype()) { out = out.astype(Rt.dtype()); } - out.set_labels(vec_range(Lt.rank())); + out.relabel_(vec_range(Lt.rank())); out.set_name(""); out.Div_(Rt); @@ -947,7 +947,7 @@ namespace cytnx { out = Lt.clone(); out.Div_(rc); } - // out.set_labels(vec_range(Lt.rank())); + // out.relabel_(vec_range(Lt.rank())); out.set_name(""); return out; diff --git a/src/linalg/Gesvd.cpp b/src/linalg/Gesvd.cpp index d6d1bb86d..a48157ccb 100644 --- a/src/linalg/Gesvd.cpp +++ b/src/linalg/Gesvd.cpp @@ -139,7 +139,7 @@ namespace cytnx { Cy_U.Init(outT[t], false, Tin.rowrank()); vector labelU(oldlabel.begin(), oldlabel.begin() + Tin.rowrank()); labelU.push_back(Cy_S.labels()[0]); - Cy_U.set_labels(labelU); + Cy_U.relabel_(labelU); t++; // U } if (is_vT) { @@ -156,7 +156,7 @@ namespace cytnx { // memcpy(&labelvT[1], &oldlabel[Tin.rowrank()], sizeof(cytnx_int64) * (labelvT.size() - // 1)); std::copy(oldlabel.begin() + Tin.rowrank(), oldlabel.end(), labelvT.begin() + 1); - Cy_vT.set_labels(labelvT); + Cy_vT.relabel_(labelvT); t++; // vT } // if tag, then update the tagging informations diff --git a/src/linalg/Gesvd_truncate.cpp b/src/linalg/Gesvd_truncate.cpp index 4b0c29995..da3611600 100644 --- a/src/linalg/Gesvd_truncate.cpp +++ b/src/linalg/Gesvd_truncate.cpp @@ -155,7 +155,7 @@ namespace cytnx { Cy_U.Init(outT[t], false, Tin.rowrank()); std::vector labelU(oldlabel.begin(), oldlabel.begin() + Tin.rowrank()); labelU.push_back(Cy_S.labels()[0]); - Cy_U.set_labels(labelU); + Cy_U.relabel_(labelU); t++; // U } @@ -173,7 +173,7 @@ namespace cytnx { std::vector labelvT(shapevT.size()); labelvT[0] = Cy_S.labels()[1]; std::copy(oldlabel.begin() + Tin.rowrank(), oldlabel.end(), labelvT.begin() + 1); - Cy_vT.set_labels(labelvT); + Cy_vT.relabel_(labelvT); t++; // vT } diff --git a/src/linalg/Mul.cpp b/src/linalg/Mul.cpp index 7469a3747..8367a6832 100644 --- a/src/linalg/Mul.cpp +++ b/src/linalg/Mul.cpp @@ -665,7 +665,7 @@ namespace cytnx { out = Lt.clone(); out.Mul_(Rt); } - out.set_labels(vec_range(Lt.rank())); + out.relabel_(vec_range(Lt.rank())); out.set_name(""); return out; diff --git a/src/linalg/Qdr.cpp b/src/linalg/Qdr.cpp index a252d4ce6..aff2d23bd 100644 --- a/src/linalg/Qdr.cpp +++ b/src/linalg/Qdr.cpp @@ -130,13 +130,13 @@ namespace cytnx { Qlbl.push_back(newlbl); outT[0].reshape_(Qshape); outCyT[0] = UniTensor(outT[0], false, Qshape.size() - 1); - outCyT[0].set_labels(Qlbl); + outCyT[0].relabel_(Qlbl); // D outCyT[1] = UniTensor(outT[1], true, 1); - // outCyT[1].set_labels({newlbl, newlbl - 1}); + // outCyT[1].relabel_({newlbl, newlbl - 1}); // newlbl -= 1; - outCyT[1].set_labels({newlbl, string("_aux_R")}); + outCyT[1].relabel_({newlbl, string("_aux_R")}); newlbl = outCyT[1].labels().back(); // R @@ -150,7 +150,7 @@ namespace cytnx { } outT[2].reshape_(Qshape); outCyT[2] = UniTensor(outT[2], false, 1); - outCyT[2].set_labels(Qlbl); + outCyT[2].relabel_(Qlbl); // tau if (is_tau) { diff --git a/src/linalg/Qr.cpp b/src/linalg/Qr.cpp index 01bd9bd8a..10b0c00a6 100644 --- a/src/linalg/Qr.cpp +++ b/src/linalg/Qr.cpp @@ -132,7 +132,7 @@ namespace cytnx { Qlbl.push_back(newlbl); outT[0].reshape_(Qshape); outCyT[0] = UniTensor(outT[0], false, Qshape.size() - 1); - outCyT[0].set_labels(Qlbl); + outCyT[0].relabel_(Qlbl); // R Qshape.clear(); @@ -145,7 +145,7 @@ namespace cytnx { } outT[1].reshape_(Qshape); outCyT[1] = UniTensor(outT[1], false, 1); - outCyT[1].set_labels(Qlbl); + outCyT[1].relabel_(Qlbl); // tau if (is_tau) { diff --git a/src/linalg/Rsvd.cpp b/src/linalg/Rsvd.cpp index 0699a9ef0..98a4f0d67 100644 --- a/src/linalg/Rsvd.cpp +++ b/src/linalg/Rsvd.cpp @@ -152,7 +152,7 @@ namespace cytnx { Cy_U.Init(outT[t], false, Tin.rowrank()); std::vector labelU(oldlabel.begin(), oldlabel.begin() + Tin.rowrank()); labelU.push_back(Cy_S.labels()[0]); - Cy_U.set_labels(labelU); + Cy_U.relabel_(labelU); t++; // U } if (is_vT) { @@ -169,7 +169,7 @@ namespace cytnx { // memcpy(&labelvT[1], &oldlabel[Tin.rowrank()], sizeof(cytnx_int64) * (labelvT.size() - // 1)); std::copy(oldlabel.begin() + Tin.rowrank(), oldlabel.end(), labelvT.begin() + 1); - Cy_vT.set_labels(labelvT); + Cy_vT.relabel_(labelvT); t++; // vT } // if tag, then update the tagging informations diff --git a/src/linalg/Rsvd_truncate.cpp b/src/linalg/Rsvd_truncate.cpp index d251edc3e..7f06a8641 100644 --- a/src/linalg/Rsvd_truncate.cpp +++ b/src/linalg/Rsvd_truncate.cpp @@ -191,7 +191,7 @@ namespace cytnx { Cy_U.Init(outT[t], false, Tin.rowrank()); std::vector labelU(oldlabel.begin(), oldlabel.begin() + Tin.rowrank()); labelU.push_back(Cy_S.labels()[0]); - Cy_U.set_labels(labelU); + Cy_U.relabel_(labelU); t++; // U } @@ -209,7 +209,7 @@ namespace cytnx { std::vector labelvT(shapevT.size()); labelvT[0] = Cy_S.labels()[1]; std::copy(oldlabel.begin() + Tin.rowrank(), oldlabel.end(), labelvT.begin() + 1); - Cy_vT.set_labels(labelvT); + Cy_vT.relabel_(labelvT); t++; // vT } diff --git a/src/linalg/Sub.cpp b/src/linalg/Sub.cpp index 1f5d068a4..4729904c6 100644 --- a/src/linalg/Sub.cpp +++ b/src/linalg/Sub.cpp @@ -905,7 +905,7 @@ namespace cytnx { if (Lt.dtype() > Rt.dtype()) { out = out.astype(Rt.dtype()); } - out.set_labels(vec_range(Lt.rank())); + out.relabel_(vec_range(Lt.rank())); out.set_name(""); out.Sub_(Rt); @@ -959,7 +959,7 @@ namespace cytnx { out = Lt.clone(); out.Sub_(rc); } - // out.set_labels(vec_range(Lt.rank())); + // out.relabel_(vec_range(Lt.rank())); out.set_name(""); return out; diff --git a/src/linalg/Svd.cpp b/src/linalg/Svd.cpp index db5fa338c..c819cab96 100644 --- a/src/linalg/Svd.cpp +++ b/src/linalg/Svd.cpp @@ -148,7 +148,7 @@ namespace cytnx { Cy_U.Init(outT[t], false, Tin.rowrank()); vector labelU(oldlabel.begin(), oldlabel.begin() + Tin.rowrank()); labelU.push_back(Cy_S.labels()[0]); - Cy_U.set_labels(labelU); + Cy_U.relabel_(labelU); t++; // U } if (compute_uv) { @@ -165,7 +165,7 @@ namespace cytnx { // memcpy(&labelvT[1], &oldlabel[Tin.rowrank()], sizeof(cytnx_int64) * (labelvT.size() - // 1)); std::copy(oldlabel.begin() + Tin.rowrank(), oldlabel.end(), labelvT.begin() + 1); - Cy_vT.set_labels(labelvT); + Cy_vT.relabel_(labelvT); t++; // vT } // if tag, then update the tagging informations diff --git a/src/linalg/Svd_truncate.cpp b/src/linalg/Svd_truncate.cpp index 19147ae2c..dc3982799 100644 --- a/src/linalg/Svd_truncate.cpp +++ b/src/linalg/Svd_truncate.cpp @@ -123,7 +123,7 @@ namespace cytnx { Cy_U.Init(outT[t], false, Tin.rowrank()); std::vector labelU(oldlabel.begin(), oldlabel.begin() + Tin.rowrank()); labelU.push_back(Cy_S.labels()[0]); - Cy_U.set_labels(labelU); + Cy_U.relabel_(labelU); t++; // U } @@ -141,7 +141,7 @@ namespace cytnx { std::vector labelvT(shapevT.size()); labelvT[0] = Cy_S.labels()[1]; std::copy(oldlabel.begin() + Tin.rowrank(), oldlabel.end(), labelvT.begin() + 1); - Cy_vT.set_labels(labelvT); + Cy_vT.relabel_(labelvT); t++; // vT } diff --git a/src/ncon.cpp b/src/ncon.cpp index 1b103a6da..acd8b44f9 100644 --- a/src/ncon.cpp +++ b/src/ncon.cpp @@ -95,7 +95,7 @@ namespace cytnx { N.setOrder(true, ""); out = N.Launch(); } - if (!out_labels.empty()) out.set_labels(out_labels); + if (!out_labels.empty()) out.relabel_(out_labels); return out; } } // namespace cytnx diff --git a/src/tn_algo/DMRG.cpp b/src/tn_algo/DMRG.cpp index 130c505a1..4f2c99cfa 100644 --- a/src/tn_algo/DMRG.cpp +++ b/src/tn_algo/DMRG.cpp @@ -57,7 +57,7 @@ namespace cytnx { Scalar c = Contract(r.Dagger(), v).item(); out += this->weight * c * r; } - out.set_labels(lbls); + out.relabel_(lbls); return out.contiguous(); } @@ -182,7 +182,7 @@ namespace cytnx { auto Lenv = this->LR[p].relabel({"-2", "-1", "-3"}); auto tA = this->mps.data()[p].relabel({"-1", "-4", "1"}); auto tAc = this->mps.data()[p].Conj(); - tAc.set_labels({"-3", "-5", "2"}); + tAc.relabel_({"-3", "-5", "2"}); auto M = this->mpo.get_op(p).relabel({"-2", "0", "-4", "-5"}); this->LR[p + 1] = Network::Contract({Lenv, tA, tAc, M}, ";0,1,2").Launch(true); } @@ -303,7 +303,7 @@ namespace cytnx { psi_T.reshape_(dim_l, this->mps.phys_dim(p), this->mps.phys_dim(p + 1), dim_r); // convert psi back to 4-leg form psi = UniTensor(psi_T, false, 2); - psi.set_labels(lbl); + psi.relabel_(lbl); // self.Ekeep.append(Entemp); auto outU = linalg::Svd_truncate(psi, new_dim); @@ -313,7 +313,7 @@ namespace cytnx { auto slabel = s.labels(); s = s / s.get_block_().Norm().item(); - s.set_labels(slabel); + s.relabel_(slabel); this->mps.data()[p] = Contract(this->mps.data()[p], s); // absorb s into next neighbor this->mps.S_loc() = p; @@ -419,7 +419,7 @@ namespace cytnx { psi_T.reshape_(dim_l, this->mps.phys_dim(p), this->mps.phys_dim(p + 1), dim_r); // convert psi back to 4-leg form psi = UniTensor(psi_T, false, 2); - psi.set_labels(lbl); + psi.relabel_(lbl); // self.Ekeep.append(Entemp); auto outU = linalg::Svd_truncate(psi, new_dim); @@ -430,7 +430,7 @@ namespace cytnx { auto slabel = s.labels(); s = s / s.get_block_().Norm().item(); - s.set_labels(slabel); + s.relabel_(slabel); this->mps.data()[p + 1] = Contract(s, this->mps.data()[p + 1]); // absorb s into next neighbor. @@ -545,7 +545,7 @@ namespace cytnx { // psi.print_diagram(); // exit(1); // psi_T.reshape_(dim_l, this->mps.phys_dim(p), this->mps.phys_dim(p+1), dim_r); //convert - // psi back to 4-leg form psi = UniTensor(psi_T,2); psi.set_labels(lbl); + // psi back to 4-leg form psi = UniTensor(psi_T,2); psi.relabel_(lbl); // self.Ekeep.append(Entemp); @@ -556,7 +556,7 @@ namespace cytnx { auto slabel = s.labels(); s = s / s.get_block_().Norm().item(); - s.set_labels(slabel); + s.relabel_(slabel); this->mps.data()[p] = Contract(this->mps.data()[p], s); // absorb s into next neighbor this->mps.S_loc() = p; @@ -660,7 +660,7 @@ namespace cytnx { psi = out[1]; Entemp = out[0].item(); // psi_T.reshape_(dim_l,this->mps.phys_dim(p),this->mps.phys_dim(p+1),dim_r);// convert psi - // back to 4-leg form psi = UniTensor(psi_T,2); psi.set_labels(lbl); + // back to 4-leg form psi = UniTensor(psi_T,2); psi.relabel_(lbl); // self.Ekeep.append(Entemp); auto outU = linalg::Svd_truncate(psi, new_dim); @@ -671,7 +671,7 @@ namespace cytnx { auto slabel = s.labels(); s = s / s.get_block_().Norm().item(); - s.set_labels(slabel); + s.relabel_(slabel); this->mps.data()[p + 1] = Contract(s, this->mps.data()[p + 1]); // absorb s into next neighbor. diff --git a/src/tn_algo/RegularMPS.cpp b/src/tn_algo/RegularMPS.cpp index 643e18270..ec49ce014 100644 --- a/src/tn_algo/RegularMPS.cpp +++ b/src/tn_algo/RegularMPS.cpp @@ -51,7 +51,7 @@ namespace cytnx { auto tA = Ai.relabel({"0", "1", "2"}); L = Contract(tA, tA.Dagger().relabel("0", "-2")); } else { - L.set_labels({"2", "-2"}); + L.relabel_({"2", "-2"}); auto tA = Ai.relabel({"2", "3", "4"}); L = Contract(tA, L); L = Contract(L, tA.Dagger().relabel({"-4", "-2", "3"})); @@ -107,7 +107,7 @@ namespace cytnx { dim3 = std::min(std::min(chi, cytnx_uint64(dim1 * dim2)), DR); } this->_TNs[k] = UniTensor(random::normal({dim1, dim2, dim3}, 0., 1., -1), false, 2); - this->_TNs[k].set_labels({to_string(2 * k), to_string(2 * k + 1), to_string(2 * k + 2)}); + this->_TNs[k].relabel_({to_string(2 * k), to_string(2 * k + 1), to_string(2 * k + 2)}); // vec_print(std::cout,this->_TNs[k].shape());// << endl; } this->S_loc = -1; @@ -171,7 +171,7 @@ namespace cytnx { this->_TNs[k].get_block_()(":", select[k]) = random::normal({dim1, dim3}, 0., 1.); // this->_TNs[k] = UniTensor(random::normal({dim1, dim2, dim3},0.,1.,-1,99),2); - this->_TNs[k].set_labels({to_string(2 * k), to_string(2 * k + 1), to_string(2 * k + 2)}); + this->_TNs[k].relabel_({to_string(2 * k), to_string(2 * k + 1), to_string(2 * k + 2)}); } this->S_loc = -1; this->Into_Lortho(); diff --git a/src/tn_algo/iMPS.cpp b/src/tn_algo/iMPS.cpp index 43028bf3d..adb52275e 100644 --- a/src/tn_algo/iMPS.cpp +++ b/src/tn_algo/iMPS.cpp @@ -56,7 +56,7 @@ namespace cytnx { for (cytnx_int64 k = 0; k < N; k++) { this->_TNs[k] = UniTensor(cytnx::random::normal({chi, vphys_dim[k], chi}, 0., 1.), false, 2); - this->_TNs[k].set_labels({to_string(2 * k), to_string(2 * k + 1), to_string(2 * k + 2)}); + this->_TNs[k].relabel_({to_string(2 * k), to_string(2 * k + 1), to_string(2 * k + 2)}); } } diff --git a/tests/BlockUniTensor_test.cpp b/tests/BlockUniTensor_test.cpp index e9b44e182..a0bac60ed 100644 --- a/tests/BlockUniTensor_test.cpp +++ b/tests/BlockUniTensor_test.cpp @@ -112,42 +112,6 @@ TEST_F(BlockUniTensorTest, clone) { } } -TEST_F(BlockUniTensorTest, relabels) { - BUT1 = BUT1.relabel({"a", "b", "cd", "d"}); - EXPECT_EQ(BUT1.labels()[0], "a"); - EXPECT_EQ(BUT1.labels()[1], "b"); - EXPECT_EQ(BUT1.labels()[2], "cd"); - EXPECT_EQ(BUT1.labels()[3], "d"); - BUT1 = BUT1.relabel({"1", "-1", "2", "1000"}); - EXPECT_EQ(BUT1.labels()[0], "1"); - EXPECT_EQ(BUT1.labels()[1], "-1"); - EXPECT_EQ(BUT1.labels()[2], "2"); - EXPECT_EQ(BUT1.labels()[3], "1000"); - - EXPECT_THROW(BUT1.relabel({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(BUT1.relabel({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(BUT1.relabel({"a"}), std::logic_error); - EXPECT_THROW(BUT1.relabel({"1", "2"}), std::logic_error); - EXPECT_THROW(BUT1.relabel({"a", "b", "c", "d", "e"}), std::logic_error); -} -TEST_F(BlockUniTensorTest, relabels_) { - BUT1.relabel_({"a", "b", "cd", "d"}); - EXPECT_EQ(BUT1.labels()[0], "a"); - EXPECT_EQ(BUT1.labels()[1], "b"); - EXPECT_EQ(BUT1.labels()[2], "cd"); - EXPECT_EQ(BUT1.labels()[3], "d"); - BUT1.relabel_({"1", "-1", "2", "1000"}); - EXPECT_EQ(BUT1.labels()[0], "1"); - EXPECT_EQ(BUT1.labels()[1], "-1"); - EXPECT_EQ(BUT1.labels()[2], "2"); - EXPECT_EQ(BUT1.labels()[3], "1000"); - EXPECT_THROW(BUT1.relabel_({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(BUT1.relabel_({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(BUT1.relabel_({"a"}), std::logic_error); - EXPECT_THROW(BUT1.relabel_({"1", "2"}), std::logic_error); - EXPECT_THROW(BUT1.relabel_({"a", "b", "c", "d", "e"}), std::logic_error); -} - TEST_F(BlockUniTensorTest, relabel) { auto tmp = BUT1.clone(); BUT1 = BUT1.relabel({"a", "b", "cd", "d"}); @@ -526,8 +490,8 @@ TEST_F(BlockUniTensorTest, reshape_) { EXPECT_ANY_THROW(Spf.reshape_({4, 1}, 1)) TEST_F(BlockUniTensorTest, contract1) { // two sparse matrix - UT_contract_L1.set_labels({"a", "b"}); - UT_contract_R1.set_labels({"b", "c"}); + UT_contract_L1.relabel_({"a", "b"}); + UT_contract_R1.relabel_({"b", "c"}); UniTensor out = UT_contract_L1.contract(UT_contract_R1); auto outbks = out.get_blocks(); auto ansbks = UT_contract_ans1.get_blocks(); @@ -541,8 +505,8 @@ TEST_F(BlockUniTensorTest, contract1) { TEST_F(BlockUniTensorTest, contract2) { // two sparse matrix with degeneracy - UT_contract_L2.set_labels({"a", "b"}); - UT_contract_R2.set_labels({"b", "c"}); + UT_contract_L2.relabel_({"a", "b"}); + UT_contract_R2.relabel_({"b", "c"}); UniTensor out = UT_contract_L2.contract(UT_contract_R2); auto outbks = out.get_blocks(); auto ansbks = UT_contract_ans2.get_blocks(); @@ -556,8 +520,8 @@ TEST_F(BlockUniTensorTest, contract2) { TEST_F(BlockUniTensorTest, contract3) { //// two 3 legs tensor - UT_contract_L3.set_labels({"a", "b", "c"}); - UT_contract_R3.set_labels({"c", "d", "e"}); + UT_contract_L3.relabel_({"a", "b", "c"}); + UT_contract_R3.relabel_({"c", "d", "e"}); UniTensor out = UT_contract_L3.contract(UT_contract_R3); auto outbks = out.get_blocks(); auto ansbks = UT_contract_ans3.get_blocks(); diff --git a/tests/Contract_test.h b/tests/Contract_test.h index 88a6d470b..077309207 100644 --- a/tests/Contract_test.h +++ b/tests/Contract_test.h @@ -24,9 +24,9 @@ class ContractTest : public ::testing::Test { // utdnB = utdnB.set_name("B"); // utdnC = utdnC.set_name("C"); - utdnA = utdnA.set_labels({"a", "b", "c"}); - utdnB = utdnB.set_labels({"c", "d"}); - utdnC = utdnC.set_labels({"d", "e"}); + utdnA = utdnA.relabel_({"a", "b", "c"}); + utdnB = utdnB.relabel_({"c", "d"}); + utdnC = utdnC.relabel_({"d", "e"}); utdnAns.at({0, 0, 0}) = 1; utdnAns.at({0, 0, 1}) = 1; diff --git a/tests/DenseUniTensor_test.cpp b/tests/DenseUniTensor_test.cpp index 41fa9cd24..530b17d7b 100644 --- a/tests/DenseUniTensor_test.cpp +++ b/tests/DenseUniTensor_test.cpp @@ -146,51 +146,6 @@ TEST_F(DenseUniTensorTest, set_label_not_exist_old_label) { EXPECT_ANY_THROW(utzero345.set_label("Not exist label", "testing label")); } -/*=====test info===== -describe:test set_labels. -====================*/ -TEST_F(DenseUniTensorTest, set_labels_normal) { - // vector - std::vector org_labels = {"org 1", "org 2", "org 3"}; - std::vector new_labels = {"testing 1", "testing 2", "testing 3"}; - utzero345.set_labels(org_labels); - utzero345.set_labels(new_labels); - EXPECT_EQ(utzero345.labels(), new_labels); - - // initilizer list - utzero345.set_labels({"org 1", "org 2", "org 3"}); - utzero345.set_labels({"testing 1", "testing 2", "testing 3"}); - EXPECT_EQ(utzero345.labels(), new_labels); -} - -/*=====test info===== -describe:set_labels to uninitialized unitensor -====================*/ -TEST_F(DenseUniTensorTest, set_labels_un_init) { - std::vector new_labels = {}; - ut_uninit.set_labels(new_labels); - EXPECT_EQ(ut_uninit.labels(), new_labels); -} - -/*=====test info===== -describe:test set_labels length not match. -====================*/ -TEST_F(DenseUniTensorTest, set_labels_len_not_match) { - // too long - std::vector new_labels_long = {"test1", "test2", "test3", "test4"}; - EXPECT_ANY_THROW(utzero345.set_labels(new_labels_long)); - std::vector new_labels_short = {"test1", "test2"}; - EXPECT_ANY_THROW(utzero345.set_labels(new_labels_short)); -} - -/*=====test info===== -describe:test set_labels duplicated. -====================*/ -TEST_F(DenseUniTensorTest, set_labels_duplicated) { - std::vector new_labels = {"test1", "test2", "test2", "test3"}; - EXPECT_ANY_THROW(utzero345.set_labels(new_labels)); -} - TEST_F(DenseUniTensorTest, set_rowrank) { // Spf is a rank-3 tensor const auto org_rowrank = Spf.rowrank(); @@ -538,44 +493,6 @@ TEST_F(DenseUniTensorTest, to_) { EXPECT_ANY_THROW(ut_uninit.to_(Device.cpu)); } -TEST_F(DenseUniTensorTest, relabels) { - auto ut = utzero3456.relabel({"a", "b", "cd", "d"}); - EXPECT_EQ(utzero3456.labels()[0], "0"); - EXPECT_EQ(utzero3456.labels()[1], "1"); - EXPECT_EQ(utzero3456.labels()[2], "2"); - EXPECT_EQ(utzero3456.labels()[3], "3"); - EXPECT_EQ(ut.labels()[0], "a"); - EXPECT_EQ(ut.labels()[1], "b"); - EXPECT_EQ(ut.labels()[2], "cd"); - EXPECT_EQ(ut.labels()[3], "d"); - ut = utzero3456.relabel({"1", "-1", "2", "1000"}); - EXPECT_THROW(ut.relabel({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(ut.relabel({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(ut.relabel({"a"}), std::logic_error); - EXPECT_THROW(ut.relabel({"1", "2"}), std::logic_error); - EXPECT_THROW(ut.relabel({"a", "b", "c", "d", "e"}), std::logic_error); - EXPECT_THROW(ut_uninit.relabel({"a", "b", "c", "d", "e"}), std::logic_error); -} - -TEST_F(DenseUniTensorTest, relabels_) { - auto ut = utzero3456.relabel_({"a", "b", "cd", "d"}); - EXPECT_EQ(utzero3456.labels()[0], "a"); - EXPECT_EQ(utzero3456.labels()[1], "b"); - EXPECT_EQ(utzero3456.labels()[2], "cd"); - EXPECT_EQ(utzero3456.labels()[3], "d"); - EXPECT_EQ(ut.labels()[0], "a"); - EXPECT_EQ(ut.labels()[1], "b"); - EXPECT_EQ(ut.labels()[2], "cd"); - EXPECT_EQ(ut.labels()[3], "d"); - ut = utzero3456.relabel_({"1", "-1", "2", "1000"}); - EXPECT_THROW(ut.relabel_({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(ut.relabel_({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(ut.relabel_({"a"}), std::logic_error); - EXPECT_THROW(ut.relabel_({"1", "2"}), std::logic_error); - EXPECT_THROW(ut.relabel_({"a", "b", "c", "d", "e"}), std::logic_error); - EXPECT_THROW(ut_uninit.relabel_({"a", "b", "c", "d", "e"}), std::logic_error); -} - TEST_F(DenseUniTensorTest, relabel) { auto tmp = utzero3456.clone(); auto ut = utzero3456.relabel({"a", "b", "cd", "d"}); @@ -637,6 +554,7 @@ TEST_F(DenseUniTensorTest, relabel) { // EXPECT_THROW(utzero3456.relabel(5,'a'),std::logic_error); EXPECT_THROW(ut_uninit.relabel(0, ""), std::logic_error); } + TEST_F(DenseUniTensorTest, relabel_) { auto tmp = utzero3456.clone(); auto ut = utzero3456.relabel_({"a", "b", "cd", "d"}); @@ -687,6 +605,52 @@ TEST_F(DenseUniTensorTest, relabel_) { EXPECT_THROW(ut_uninit.relabel_(0, ""), std::logic_error); } +/*=====test info===== +describe:test relabel_. +====================*/ +TEST_F(DenseUniTensorTest, relabel_normal) { + // vector + std::vector org_labels = {"org 1", "org 2", "org 3"}; + std::vector new_labels = {"testing 1", "testing 2", "testing 3"}; + utzero345.relabel_(org_labels); + utzero345.relabel_(new_labels); + EXPECT_EQ(utzero345.labels(), new_labels); + + // initializer list + utzero345.relabel_({"org 1", "org 2", "org 3"}); + utzero345.relabel_({"testing 1", "testing 2", "testing 3"}); + EXPECT_EQ(utzero345.labels(), new_labels); +} + +/*=====test info===== +describe:relabel_ to uninitialized unitensor +====================*/ +TEST_F(DenseUniTensorTest, relabel_un_init) { + std::vector new_labels = {}; + ut_uninit.relabel_(new_labels); + EXPECT_EQ(ut_uninit.labels(), new_labels); +} + +/*=====test info===== +describe:test relabel_ length not match. +====================*/ +TEST_F(DenseUniTensorTest, relabel_len_not_match) { + // too long + std::vector new_labels_long = {"test1", "test2", "test3", "test4"}; + EXPECT_ANY_THROW(utzero345.relabel_(new_labels_long)); + std::vector new_labels_short = {"test1", "test2"}; + EXPECT_ANY_THROW(utzero345.relabel_(new_labels_short)); +} + +/*=====test info===== +describe:test relabel_ duplicated. +====================*/ +TEST_F(DenseUniTensorTest, relabel_duplicated) { + std::vector new_labels = {"test1", "test2", "test2", "test3"}; + EXPECT_ANY_THROW(utzero345.relabel_(new_labels)); +} + + /*=====test info===== describe:test astype, input all possible dtype. ====================*/ @@ -1960,8 +1924,8 @@ TEST_F(DenseUniTensorTest, combineBond_ut_uninit) { } TEST_F(DenseUniTensorTest, contract1) { - ut1.set_labels({"a", "b", "c", "d"}); - ut2.set_labels({"a", "aa", "bb", "cc"}); + ut1.relabel_({"a", "b", "c", "d"}); + ut2.relabel_({"a", "aa", "bb", "cc"}); UniTensor out = ut1.contract(ut2); auto outbk = out.get_block_(); auto ansbk = contres1.get_block_(); @@ -1969,8 +1933,8 @@ TEST_F(DenseUniTensorTest, contract1) { } TEST_F(DenseUniTensorTest, contract2) { - ut1.set_labels({"a", "b", "c", "d"}); - ut2.set_labels({"a", "b", "bb", "cc"}); + ut1.relabel_({"a", "b", "c", "d"}); + ut2.relabel_({"a", "b", "bb", "cc"}); UniTensor out = ut1.contract(ut2); auto outbk = out.get_block_(); auto ansbk = contres2.get_block_(); @@ -1978,8 +1942,8 @@ TEST_F(DenseUniTensorTest, contract2) { } TEST_F(DenseUniTensorTest, contract3) { - ut1.set_labels({"a", "b", "c", "d"}); - ut2.set_labels({"a", "b", "c", "cc"}); + ut1.relabel_({"a", "b", "c", "d"}); + ut2.relabel_({"a", "b", "c", "cc"}); UniTensor out = ut1.contract(ut2); auto outbk = out.get_block_(); auto ansbk = contres3.get_block_(); diff --git a/tests/OLDtest.cpp b/tests/OLDtest.cpp index b1452299a..5afd6d2d0 100644 --- a/tests/OLDtest.cpp +++ b/tests/OLDtest.cpp @@ -49,7 +49,7 @@ class MyOp2 : public LinOp { auto H = UniTensor(T, 1); auto out = Contract(H, in); - out.set_labels(in.labels()); + out.relabel_(in.labels()); return out; } diff --git a/tests/UniTensor_base_test.cpp b/tests/UniTensor_base_test.cpp index 692a5e51b..ab824cb1e 100644 --- a/tests/UniTensor_base_test.cpp +++ b/tests/UniTensor_base_test.cpp @@ -1,7 +1,7 @@ #include "UniTensor_base_test.h" TEST_F(UniTensor_baseTest, get_index) { - utzero345.set_labels({"abc", "ABC", "CBA"}); + utzero345.relabel_({"abc", "ABC", "CBA"}); EXPECT_EQ(utzero345.get_index("abc"), 0); EXPECT_EQ(utzero345.get_index("ABC"), 1); EXPECT_EQ(utzero345.get_index("CBA"), 2); diff --git a/tests/gpu/BlockUniTensor_test.cpp b/tests/gpu/BlockUniTensor_test.cpp index 45e89c3bb..e678e2b4e 100644 --- a/tests/gpu/BlockUniTensor_test.cpp +++ b/tests/gpu/BlockUniTensor_test.cpp @@ -38,42 +38,6 @@ TEST_F(BlockUniTensorTest, gpu_Trace) { EXPECT_THROW(BUT1.Trace("-1", "5"), std::logic_error); } -TEST_F(BlockUniTensorTest, gpu_relabels) { - BUT1 = BUT1.relabel({"a", "b", "cd", "d"}); - EXPECT_EQ(BUT1.labels()[0], "a"); - EXPECT_EQ(BUT1.labels()[1], "b"); - EXPECT_EQ(BUT1.labels()[2], "cd"); - EXPECT_EQ(BUT1.labels()[3], "d"); - BUT1 = BUT1.relabel({"1", "-1", "2", "1000"}); - EXPECT_EQ(BUT1.labels()[0], "1"); - EXPECT_EQ(BUT1.labels()[1], "-1"); - EXPECT_EQ(BUT1.labels()[2], "2"); - EXPECT_EQ(BUT1.labels()[3], "1000"); - - EXPECT_THROW(BUT1.relabel({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(BUT1.relabel({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(BUT1.relabel({"a"}), std::logic_error); - EXPECT_THROW(BUT1.relabel({"1", "2"}), std::logic_error); - EXPECT_THROW(BUT1.relabel({"a", "b", "c", "d", "e"}), std::logic_error); -} -TEST_F(BlockUniTensorTest, gpu_relabels_) { - BUT1.relabel_({"a", "b", "cd", "d"}); - EXPECT_EQ(BUT1.labels()[0], "a"); - EXPECT_EQ(BUT1.labels()[1], "b"); - EXPECT_EQ(BUT1.labels()[2], "cd"); - EXPECT_EQ(BUT1.labels()[3], "d"); - BUT1.relabel_({"1", "-1", "2", "1000"}); - EXPECT_EQ(BUT1.labels()[0], "1"); - EXPECT_EQ(BUT1.labels()[1], "-1"); - EXPECT_EQ(BUT1.labels()[2], "2"); - EXPECT_EQ(BUT1.labels()[3], "1000"); - EXPECT_THROW(BUT1.relabel_({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(BUT1.relabel_({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(BUT1.relabel_({"a"}), std::logic_error); - EXPECT_THROW(BUT1.relabel_({"1", "2"}), std::logic_error); - EXPECT_THROW(BUT1.relabel_({"a", "b", "c", "d", "e"}), std::logic_error); -} - TEST_F(BlockUniTensorTest, gpu_relabel) { auto tmp = BUT1.clone(); BUT1 = BUT1.relabel({"a", "b", "cd", "d"}); @@ -776,8 +740,8 @@ TEST_F(BlockUniTensorTest, gpu_permute_2) { TEST_F(BlockUniTensorTest, gpu_contract1) { // two sparse matrix - UT_contract_L1.set_labels({"a", "b"}); - UT_contract_R1.set_labels({"b", "c"}); + UT_contract_L1.relabel_({"a", "b"}); + UT_contract_R1.relabel_({"b", "c"}); UniTensor out = UT_contract_L1.contract(UT_contract_R1); auto outbks = out.get_blocks(); auto ansbks = UT_contract_ans1.get_blocks(); @@ -788,8 +752,8 @@ TEST_F(BlockUniTensorTest, gpu_contract1) { TEST_F(BlockUniTensorTest, gpu_contract2) { // two sparse matrix with degeneracy - UT_contract_L2.set_labels({"a", "b"}); - UT_contract_R2.set_labels({"b", "c"}); + UT_contract_L2.relabel_({"a", "b"}); + UT_contract_R2.relabel_({"b", "c"}); UniTensor out = UT_contract_L2.contract(UT_contract_R2); auto outbks = out.get_blocks(); auto ansbks = UT_contract_ans2.get_blocks(); @@ -800,8 +764,8 @@ TEST_F(BlockUniTensorTest, gpu_contract2) { TEST_F(BlockUniTensorTest, gpu_contract3) { //// two 3 legs tensor - UT_contract_L3.set_labels({"a", "b", "c"}); - UT_contract_R3.set_labels({"c", "d", "e"}); + UT_contract_L3.relabel_({"a", "b", "c"}); + UT_contract_R3.relabel_({"c", "d", "e"}); UniTensor out = UT_contract_L3.contract(UT_contract_R3); auto outbks = out.get_blocks(); auto ansbks = UT_contract_ans3.get_blocks(); diff --git a/tests/gpu/Contract_test.h b/tests/gpu/Contract_test.h index 65edc5799..5933cab27 100644 --- a/tests/gpu/Contract_test.h +++ b/tests/gpu/Contract_test.h @@ -26,9 +26,9 @@ class ContractTest : public ::testing::Test { // utdnB = utdnB.set_name("B"); // utdnC = utdnC.set_name("C"); - utdnA = utdnA.set_labels({"a", "b", "c"}); - utdnB = utdnB.set_labels({"c", "d"}); - utdnC = utdnC.set_labels({"d", "e"}); + utdnA = utdnA.relabel_({"a", "b", "c"}); + utdnB = utdnB.relabel_({"c", "d"}); + utdnC = utdnC.relabel_({"d", "e"}); utdnAns.at({0, 0, 0}) = 1; utdnAns.at({0, 0, 1}) = 1; diff --git a/tests/gpu/DenseUniTensor_test.cpp b/tests/gpu/DenseUniTensor_test.cpp index 2e6864651..c2aad402c 100644 --- a/tests/gpu/DenseUniTensor_test.cpp +++ b/tests/gpu/DenseUniTensor_test.cpp @@ -20,33 +20,6 @@ TEST_F(DenseUniTensorTest, gpu_Trace) { // EXPECT_THROW(utzero3456.Trace(-1,5),std::logic_error); } -TEST_F(DenseUniTensorTest, gpu_relabels) { - utzero3456 = utzero3456.relabel({"a", "b", "cd", "d"}); - EXPECT_EQ(utzero3456.labels()[0], "a"); - EXPECT_EQ(utzero3456.labels()[1], "b"); - EXPECT_EQ(utzero3456.labels()[2], "cd"); - EXPECT_EQ(utzero3456.labels()[3], "d"); - utzero3456 = utzero3456.relabel({"1", "-1", "2", "1000"}); - EXPECT_THROW(utzero3456.relabel({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(utzero3456.relabel({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(utzero3456.relabel({"a"}), std::logic_error); - EXPECT_THROW(utzero3456.relabel({"1", "2"}), std::logic_error); - EXPECT_THROW(utzero3456.relabel({"a", "b", "c", "d", "e"}), std::logic_error); -} -TEST_F(DenseUniTensorTest, gpu_relabels_) { - utzero3456.relabel_({"a", "b", "cd", "d"}); - EXPECT_EQ(utzero3456.labels()[0], "a"); - EXPECT_EQ(utzero3456.labels()[1], "b"); - EXPECT_EQ(utzero3456.labels()[2], "cd"); - EXPECT_EQ(utzero3456.labels()[3], "d"); - utzero3456.relabel_({"1", "-1", "2", "1000"}); - EXPECT_THROW(utzero3456.relabel_({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(utzero3456.relabel_({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(utzero3456.relabel_({"a"}), std::logic_error); - EXPECT_THROW(utzero3456.relabel_({"1", "2"}), std::logic_error); - EXPECT_THROW(utzero3456.relabel_({"a", "b", "c", "d", "e"}), std::logic_error); -} - TEST_F(DenseUniTensorTest, gpu_relabel) { auto tmp = utzero3456.clone(); utzero3456 = utzero3456.relabel({"a", "b", "cd", "d"}); @@ -478,8 +451,8 @@ TEST_F(DenseUniTensorTest, gpu_permute_2) { } TEST_F(DenseUniTensorTest, gpu_contract1) { - ut1.set_labels({"a", "b", "c", "d"}); - ut2.set_labels({"a", "aa", "bb", "cc"}); + ut1.relabel_({"a", "b", "c", "d"}); + ut2.relabel_({"a", "aa", "bb", "cc"}); UniTensor out = ut1.contract(ut2); auto outbk = out.get_block_(); auto ansbk = contres1.get_block_(); @@ -487,8 +460,8 @@ TEST_F(DenseUniTensorTest, gpu_contract1) { } TEST_F(DenseUniTensorTest, gpu_contract2) { - ut1.set_labels({"a", "b", "c", "d"}); - ut2.set_labels({"a", "b", "bb", "cc"}); + ut1.relabel_({"a", "b", "c", "d"}); + ut2.relabel_({"a", "b", "bb", "cc"}); UniTensor out = ut1.contract(ut2); auto outbk = out.get_block_(); auto ansbk = contres2.get_block_(); @@ -496,8 +469,8 @@ TEST_F(DenseUniTensorTest, gpu_contract2) { } TEST_F(DenseUniTensorTest, gpu_contract3) { - ut1.set_labels({"a", "b", "c", "d"}); - ut2.set_labels({"a", "b", "c", "cc"}); + ut1.relabel_({"a", "b", "c", "d"}); + ut2.relabel_({"a", "b", "c", "cc"}); UniTensor out = ut1.contract(ut2); auto outbk = out.get_block_(); auto ansbk = contres3.get_block_(); diff --git a/tests/gpu/OLDtest.cpp b/tests/gpu/OLDtest.cpp index b1452299a..5afd6d2d0 100644 --- a/tests/gpu/OLDtest.cpp +++ b/tests/gpu/OLDtest.cpp @@ -49,7 +49,7 @@ class MyOp2 : public LinOp { auto H = UniTensor(T, 1); auto out = Contract(H, in); - out.set_labels(in.labels()); + out.relabel_(in.labels()); return out; } diff --git a/tests/gpu/UniTensor_base_test.cpp b/tests/gpu/UniTensor_base_test.cpp index 560ac3b00..37ae444da 100644 --- a/tests/gpu/UniTensor_base_test.cpp +++ b/tests/gpu/UniTensor_base_test.cpp @@ -1,7 +1,7 @@ #include "UniTensor_base_test.h" TEST_F(UniTensor_baseTest, gpu_get_index) { - utzero345.set_labels({"abc", "ABC", "CBA"}); + utzero345.relabel_({"abc", "ABC", "CBA"}); EXPECT_EQ(utzero345.get_index("abc"), 0); EXPECT_EQ(utzero345.get_index("ABC"), 1); EXPECT_EQ(utzero345.get_index("CBA"), 2); diff --git a/tests/gpu/linalg_test/ExpH_test.cpp b/tests/gpu/linalg_test/ExpH_test.cpp index be0268791..cb8af0d63 100644 --- a/tests/gpu/linalg_test/ExpH_test.cpp +++ b/tests/gpu/linalg_test/ExpH_test.cpp @@ -54,7 +54,7 @@ TEST(ExpH_UT, gpu_UTExpH_test) { // convert to UT UniTensor UT = UniTensor(t_i).to(cytnx::Device.cuda); UT.set_rowrank(2); - UT.set_labels({"a", "b", "c", "d"}); + UT.relabel_({"a", "b", "c", "d"}); double dt = 0.05; UniTensor UTFin = linalg::ExpH(UT, -dt).to(cytnx::Device.cuda); diff --git a/tests/gpu/linalg_test/GeSvd_test.cpp b/tests/gpu/linalg_test/GeSvd_test.cpp index b77d065a5..51691ee08 100644 --- a/tests/gpu/linalg_test/GeSvd_test.cpp +++ b/tests/gpu/linalg_test/GeSvd_test.cpp @@ -386,7 +386,7 @@ namespace GesvdTest { const UniTensor& U = Tout[1]; const UniTensor& V = Tout[2]; auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); + UD.relabel_({"0", "1", "9"}); UD.permute_({2, 0, 1}, 1); auto UUD = Contract(U, UD); } diff --git a/tests/gpu/linalg_test/Lanczos_Gnd_test.cpp b/tests/gpu/linalg_test/Lanczos_Gnd_test.cpp index 2fbfe6056..4bfcafd67 100644 --- a/tests/gpu/linalg_test/Lanczos_Gnd_test.cpp +++ b/tests/gpu/linalg_test/Lanczos_Gnd_test.cpp @@ -33,14 +33,14 @@ class MyOp2 : public LinOp { H.put_block(A, 0); H.put_block(B, 1); H.put_block(C, 2); - H.set_labels({"a", "b"}); + H.relabel_({"a", "b"}); H.to_(cytnx::Device.cuda); // H.print_diagram(); // H.print_blocks(); } UniTensor matvec(const UniTensor& psi) override { auto out = H.contract(psi); - out.set_labels({"b", "c"}); + out.relabel_({"b", "c"}); return out; } }; @@ -74,7 +74,7 @@ TEST(Lanczos_Gnd, gpu_Bk_Lanczos_Gnd_test) { lan_guess.put_block(random::normal(9, 1, 1).reshape({9, 1}), 0); lan_guess.put_block(random::normal(9, 1, 1).reshape({9, 1}), 1); lan_guess.put_block(random::normal(9, 1, 1).reshape({9, 1}), 2); - lan_guess.set_labels({"b", "c"}); + lan_guess.relabel_({"b", "c"}); lan_guess.to_(cytnx::Device.cuda); // lan_guess.print_diagram(); // lan_guess.print_blocks(); diff --git a/tests/gpu/linalg_test/Svd_test.cpp b/tests/gpu/linalg_test/Svd_test.cpp index 139939fa8..8bd12a532 100644 --- a/tests/gpu/linalg_test/Svd_test.cpp +++ b/tests/gpu/linalg_test/Svd_test.cpp @@ -387,7 +387,7 @@ namespace SvdTest { const UniTensor& U = Tout[1]; const UniTensor& V = Tout[2]; auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); + UD.relabel_({"0", "1", "9"}); UD.permute_({2, 0, 1}, 1); auto UUD = Contract(U, UD); } diff --git a/tests/gpu/linalg_test/linalg_test.h b/tests/gpu/linalg_test/linalg_test.h index 183cc1800..109aeb439 100644 --- a/tests/gpu/linalg_test/linalg_test.h +++ b/tests/gpu/linalg_test/linalg_test.h @@ -74,7 +74,7 @@ class linalg_Test : public ::testing::Test { H.put_block(A, 0); H.put_block(B, 1); H.put_block(C, 2); - H.set_labels({"a", "b"}); + H.relabel_({"a", "b"}); } void TearDown() override {} }; diff --git a/tests/linalg_test/Arnoldi_Ut_test.cpp b/tests/linalg_test/Arnoldi_Ut_test.cpp index 9f8fe1196..e35413c48 100644 --- a/tests/linalg_test/Arnoldi_Ut_test.cpp +++ b/tests/linalg_test/Arnoldi_Ut_test.cpp @@ -77,13 +77,13 @@ namespace { double low = -1.0, high = 1.0; int seed = 0; H.uniform_(low, high, seed); - H.set_labels({"a", "b"}); + H.relabel_({"a", "b"}); // H.print_diagram(); // H.print_blocks(); } UniTensor matvec(const UniTensor& psi) override { auto out = (H.astype(psi.dtype())).contract(psi); - out.set_labels({"b", "c"}); + out.relabel_({"b", "c"}); return out; } }; diff --git a/tests/linalg_test/ExpH_test.cpp b/tests/linalg_test/ExpH_test.cpp index 0b035e7e0..5a79f15c7 100644 --- a/tests/linalg_test/ExpH_test.cpp +++ b/tests/linalg_test/ExpH_test.cpp @@ -54,7 +54,7 @@ TEST(ExpH_UT, UTExpH_test) { // convert to UT UniTensor UT = UniTensor(t_i); UT.set_rowrank(2); - UT.set_labels({"a", "b", "c", "d"}); + UT.relabel_({"a", "b", "c", "d"}); double dt = 0.05; UniTensor UTFin = linalg::ExpH(UT, -dt); diff --git a/tests/linalg_test/GeSvd_test.cpp b/tests/linalg_test/GeSvd_test.cpp index 1c4974734..9f4560737 100644 --- a/tests/linalg_test/GeSvd_test.cpp +++ b/tests/linalg_test/GeSvd_test.cpp @@ -371,7 +371,7 @@ namespace GesvdTest { const UniTensor& U = Tout[1]; const UniTensor& V = Tout[2]; auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); + UD.relabel_({"0", "1", "9"}); UD.permute_({2, 0, 1}, 1); auto UUD = Contract(U, UD); } diff --git a/tests/linalg_test/Gesvd_truncate_test.cpp b/tests/linalg_test/Gesvd_truncate_test.cpp index ab2998acd..dc6009eb0 100644 --- a/tests/linalg_test/Gesvd_truncate_test.cpp +++ b/tests/linalg_test/Gesvd_truncate_test.cpp @@ -205,7 +205,7 @@ namespace GesvdTruncateTest { const UniTensor& U = Tout[1]; const UniTensor& V = Tout[2]; auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); + UD.relabel_({"0", "1", "9"}); UD.permute_({2, 0, 1}, 1); auto UUD = Contract(U, UD); } diff --git a/tests/linalg_test/Lanczos_Exp_test.cpp b/tests/linalg_test/Lanczos_Exp_test.cpp index c26272f6c..6052708d4 100644 --- a/tests/linalg_test/Lanczos_Exp_test.cpp +++ b/tests/linalg_test/Lanczos_Exp_test.cpp @@ -31,7 +31,7 @@ namespace Lanczos_Exp_Ut_Test { * | | + | | "vol"--A--"vor" | | + | | * |_|--"vol" "po" "vor"--|_| |_|---------A----------|_| * - * Then relabels ["vil", "pi", "vir"] -> ["vol", "po", "vor"] + * Then relabel ["vil", "pi", "vir"] -> ["vol", "po", "vor"] * * "vil":virtual in bond left * "po":physical out bond @@ -157,11 +157,11 @@ namespace Lanczos_Exp_Ut_Test { // Let H can be converge in ExpM auto eigs = HEff_mat.Eigh(); auto e = UniTensor(eigs[0], true) * 0.01; - e.set_labels({"a", "b"}); + e.relabel_({"a", "b"}); auto v = UniTensor(eigs[1]); - v.set_labels({"i", "a"}); + v.relabel_({"i", "a"}); auto vt = UniTensor(linalg::InvM(v.get_block())); - vt.set_labels({"b", "j"}); + vt.relabel_({"b", "j"}); HEff_mat = Contract(Contract(e, v), vt).get_block(); // HEff_mat = linalg::Matmul(HEff_mat, HEff_mat.permute({1, 0}).Conj()); // positive definete diff --git a/tests/linalg_test/Lanczos_Gnd_test.cpp b/tests/linalg_test/Lanczos_Gnd_test.cpp index 7fd0ce0fb..67b3c0a69 100644 --- a/tests/linalg_test/Lanczos_Gnd_test.cpp +++ b/tests/linalg_test/Lanczos_Gnd_test.cpp @@ -32,7 +32,7 @@ namespace { * | | + | | "pi" * |_|--"vol" "po" "vor"--|_| * - * Then relabels ["vil", "pi", "vir"] -> ["vol", "po", "vor"] + * Then relabel ["vil", "pi", "vir"] -> ["vol", "po", "vor"] * * "vil":virtual in bond left * "po":physical out bond @@ -105,13 +105,13 @@ namespace { H.put_block(A, 0); H.put_block(B, 1); H.put_block(C, 2); - H.set_labels({"a", "b"}); + H.relabel_({"a", "b"}); // H.print_diagram(); // H.print_blocks(); } UniTensor matvec(const UniTensor& psi) override { auto out = H.contract(psi); - out.set_labels({"b", "c"}); + out.relabel_({"b", "c"}); // out.print_diagram(); return out; } @@ -397,7 +397,7 @@ TEST(Lanczos_Gnd, Bk_Lanczos_Gnd_test) { lan_guess.put_block(random::normal(9, 1, 1).reshape({9, 1}), 0); lan_guess.put_block(random::normal(9, 1, 1).reshape({9, 1}), 1); lan_guess.put_block(random::normal(9, 1, 1).reshape({9, 1}), 2); - lan_guess.set_labels({"b", "c"}); + lan_guess.relabel_({"b", "c"}); // lan_guess.print_diagram(); // std::cout << lan_guess.shape() << std::endl; // lan_guess.print_blocks(); @@ -427,7 +427,7 @@ TEST(Lanczos_Gnd, Bk_Lanczos_test) { lan_guess.put_block(random::normal(9, 1, 1).reshape({9, 1}), 0); lan_guess.put_block(random::normal(9, 1, 1).reshape({9, 1}), 1); lan_guess.put_block(random::normal(9, 1, 1).reshape({9, 1}), 2); - lan_guess.set_labels({"b", "c"}); + lan_guess.relabel_({"b", "c"}); MyOp2 H = MyOp2(27); diff --git a/tests/linalg_test/Rsvd_test.cpp b/tests/linalg_test/Rsvd_test.cpp index 735fac9ff..6ae918462 100644 --- a/tests/linalg_test/Rsvd_test.cpp +++ b/tests/linalg_test/Rsvd_test.cpp @@ -186,7 +186,7 @@ namespace RsvdTest { const UniTensor& U = Tout[1]; const UniTensor& V = Tout[2]; auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); + UD.relabel_({"0", "1", "9"}); UD.permute_({2, 0, 1}, 1); auto UUD = Contract(U, UD); } diff --git a/tests/linalg_test/Rsvd_truncate_test.cpp b/tests/linalg_test/Rsvd_truncate_test.cpp index 7c2b1fdcc..6778c2bcb 100644 --- a/tests/linalg_test/Rsvd_truncate_test.cpp +++ b/tests/linalg_test/Rsvd_truncate_test.cpp @@ -186,7 +186,7 @@ namespace RsvdTruncateTest { const UniTensor& U = Tout[1]; const UniTensor& V = Tout[2]; auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); + UD.relabel_({"0", "1", "9"}); UD.permute_({2, 0, 1}, 1); auto UUD = Contract(U, UD); } diff --git a/tests/linalg_test/Svd_test.cpp b/tests/linalg_test/Svd_test.cpp index 7bdf16369..b99fcd4fe 100644 --- a/tests/linalg_test/Svd_test.cpp +++ b/tests/linalg_test/Svd_test.cpp @@ -372,7 +372,7 @@ namespace SvdTest { const UniTensor& U = Tout[1]; const UniTensor& V = Tout[2]; auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); + UD.relabel_({"0", "1", "9"}); UD.permute_({2, 0, 1}, 1); auto UUD = Contract(U, UD); } diff --git a/tests/linalg_test/Svd_truncate_test.cpp b/tests/linalg_test/Svd_truncate_test.cpp index 962fa926e..2c972ce42 100644 --- a/tests/linalg_test/Svd_truncate_test.cpp +++ b/tests/linalg_test/Svd_truncate_test.cpp @@ -189,7 +189,7 @@ namespace SvdTruncateTest { const UniTensor& U = Tout[1]; const UniTensor& V = Tout[2]; auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); + UD.relabel_({"0", "1", "9"}); UD.permute_({2, 0, 1}, 1); auto UUD = Contract(U, UD); } diff --git a/tests/linalg_test/linalg_test.cpp b/tests/linalg_test/linalg_test.cpp index e4deeaf49..87ea422ee 100644 --- a/tests/linalg_test/linalg_test.cpp +++ b/tests/linalg_test/linalg_test.cpp @@ -348,11 +348,11 @@ TEST_F(linalg_Test, BkUt_Norm) { TEST_F(linalg_Test, Tensor_Eig) { auto res = linalg::Eig(arange3x3cd); auto e = UniTensor(res[0], true); - e.set_labels({"a", "b"}); + e.relabel_({"a", "b"}); auto v = UniTensor(res[1]); - v.set_labels({"i", "a"}); + v.relabel_({"i", "a"}); auto vt = UniTensor(linalg::InvM(v.get_block())); - vt.set_labels({"b", "j"}); + vt.relabel_({"b", "j"}); EXPECT_TRUE((UniTensor(arange3x3cd) - Contract(Contract(e, v), vt)).Norm().item() < 1e-13); } @@ -360,22 +360,22 @@ TEST_F(linalg_Test, Tensor_Eigh) { auto her = arange3x3cd + arange3x3cd.Conj().permute({1, 0}); auto res = linalg::Eigh(her); auto e = UniTensor(res[0], true); - e.set_labels({"a", "b"}); + e.relabel_({"a", "b"}); auto v = UniTensor(res[1]); - v.set_labels({"i", "a"}); + v.relabel_({"i", "a"}); auto vt = UniTensor(linalg::InvM(v.get_block())); - vt.set_labels({"b", "j"}); + vt.relabel_({"b", "j"}); EXPECT_TRUE((UniTensor(her) - Contract(Contract(e, v), vt)).Norm().item() < 1e-13); } TEST_F(linalg_Test, DenseUt_Eig) { auto res = linalg::Eig(arange3x3cd_ut); auto e = res[0]; - e.set_labels({"a", "b"}); + e.relabel_({"a", "b"}); auto v = res[1]; - v.set_labels({"i", "a"}); + v.relabel_({"i", "a"}); auto vt = UniTensor(linalg::InvM(v.get_block())); - vt.set_labels({"b", "j"}); + vt.relabel_({"b", "j"}); EXPECT_TRUE((UniTensor(arange3x3cd) - Contract(Contract(e, v), vt)).Norm().item() < 1e-13); } @@ -383,11 +383,11 @@ TEST_F(linalg_Test, DenseUt_Eigh) { auto her = arange3x3cd + arange3x3cd.Conj().permute({1, 0}); auto res = linalg::Eigh(UniTensor(her)); auto e = res[0]; - e.set_labels({"a", "b"}); + e.relabel_({"a", "b"}); auto v = res[1]; - v.set_labels({"i", "a"}); + v.relabel_({"i", "a"}); auto vt = UniTensor(linalg::InvM(v.get_block())); - vt.set_labels({"b", "j"}); + vt.relabel_({"b", "j"}); EXPECT_TRUE((UniTensor(her) - Contract(Contract(e, v), vt)).Norm().item() < 1e-13); } @@ -412,13 +412,13 @@ TEST_F(linalg_Test, Tensor_InvM_) { TEST_F(linalg_Test, DenseUt_InvM) { auto inv = linalg::InvM(invertable3x3cd_ut); - inv.set_labels({"1", "2"}); // invertable3x3cd_ut is labeled "0","1". + inv.relabel_({"1", "2"}); // invertable3x3cd_ut is labeled "0","1". EXPECT_TRUE((invertable3x3cd_ut.contract(inv) - UniTensor(eye3x3cd)).Norm().item() < 1e-13); } TEST_F(linalg_Test, DenseUt_InvM_) { auto inv = invertable3x3cd_ut.clone(); - inv.set_labels({"1", "2"}); // invertable3x3cd_ut is labeled "0","1". + inv.relabel_({"1", "2"}); // invertable3x3cd_ut is labeled "0","1". linalg::InvM_(inv); EXPECT_TRUE((invertable3x3cd_ut.contract(inv) - UniTensor(eye3x3cd)).Norm().item() < 1e-13); } diff --git a/tests/linalg_test/linalg_test.h b/tests/linalg_test/linalg_test.h index e45b0da31..7816d9427 100644 --- a/tests/linalg_test/linalg_test.h +++ b/tests/linalg_test/linalg_test.h @@ -76,7 +76,7 @@ class linalg_Test : public ::testing::Test { H.put_block(A, 0); H.put_block(B, 1); H.put_block(C, 2); - H.set_labels({"a", "b"}); + H.relabel_({"a", "b"}); invertable3x3cd.at({0, 0}) = 2; // just to make it invertable. invertable3x3cd_ut.at({0, 0}) = 2; // just to make it invertable. From 1ef7e9988d00770ad31aa2bc674072cf961ba8ea Mon Sep 17 00:00:00 2001 From: Manuel Schneider Date: Fri, 5 Dec 2025 17:41:59 +0800 Subject: [PATCH 03/10] replaced .set_label( by .relabel( --- src/backend/linalg_internal_gpu/cuTrace_internal.cu | 3 +-- tests/DenseUniTensor_test.cpp | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/src/backend/linalg_internal_gpu/cuTrace_internal.cu b/src/backend/linalg_internal_gpu/cuTrace_internal.cu index f7ae723e7..3ea855699 100644 --- a/src/backend/linalg_internal_gpu/cuTrace_internal.cu +++ b/src/backend/linalg_internal_gpu/cuTrace_internal.cu @@ -32,8 +32,7 @@ namespace cytnx { I_UT.relabel_({"0", "1"}); UniTensor UTn = UniTensor(Tn, false, 2); - UTn.relabel_( - vec_cast(vec_range(100, 100 + UTn.labels().size()))); + UTn.relabel_(vec_cast(vec_range(100, 100 + UTn.labels().size()))); UTn._impl->_labels[ax1] = "0"; UTn._impl->_labels[ax2] = "1"; out = Contract(I_UT, UTn).get_block_(); diff --git a/tests/DenseUniTensor_test.cpp b/tests/DenseUniTensor_test.cpp index 530b17d7b..4da33e205 100644 --- a/tests/DenseUniTensor_test.cpp +++ b/tests/DenseUniTensor_test.cpp @@ -650,7 +650,6 @@ TEST_F(DenseUniTensorTest, relabel_duplicated) { EXPECT_ANY_THROW(utzero345.relabel_(new_labels)); } - /*=====test info===== describe:test astype, input all possible dtype. ====================*/ From 98afa33974242a6ce7db575f7ca2ed6a05bd3eca Mon Sep 17 00:00:00 2001 From: Manuel Schneider Date: Mon, 23 Feb 2026 15:34:11 +0900 Subject: [PATCH 04/10] fixed missing underscore --- example/iTEBD/iTEBD_tag.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/example/iTEBD/iTEBD_tag.py b/example/iTEBD/iTEBD_tag.py index 94609b254..857f0abd2 100644 --- a/example/iTEBD/iTEBD_tag.py +++ b/example/iTEBD/iTEBD_tag.py @@ -66,10 +66,10 @@ def itebd_tfim_tag(chi = 20, J = 1.0, Hx = 1.0, dt = 0.1, CvgCrit = 1.0e-10): Elast = 0 for i in range(10000): - A.relabel(['a','0','b']) - B.relabel(['c','1','d']) - la.relabel(['b','c']) - lb.relabel(['d','e']) + A.relabel_(['a','0','b']) + B.relabel_(['c','1','d']) + la.relabel_(['b','c']) + lb.relabel_(['d','e']) From 10f8896abbf368bd1568ea1d1ed2bea862ab2c11 Mon Sep 17 00:00:00 2001 From: Manuel Schneider Date: Mon, 23 Feb 2026 15:38:23 +0900 Subject: [PATCH 05/10] minor style fix --- example/TDVP/tdvp1_dense.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/example/TDVP/tdvp1_dense.py b/example/TDVP/tdvp1_dense.py index 480f5535c..b3f2159ff 100644 --- a/example/TDVP/tdvp1_dense.py +++ b/example/TDVP/tdvp1_dense.py @@ -250,7 +250,7 @@ def Local_meas(A, B, Op, site): l = anet.Launch() else: tmp = A[i].relabel(1, "_aux_up") - Op = Op.relabel(["_aux_up", "_aux_low"]) + Op.relabel_(["_aux_up", "_aux_low"]) tmp = cytnx.Contract(tmp, Op) tmp.relabel_("_aux_low", A[i].labels()[1]) tmp.permute_(A[i].labels()) From 70c200662ebcbe239efe167c22f58cade86567be Mon Sep 17 00:00:00 2001 From: pcchen Date: Fri, 20 Mar 2026 10:29:33 +0800 Subject: [PATCH 06/10] fix relabels_(new_labels) deprecated message: point to 1-param relabel_ The [[deprecated]] attribute and Doxygen comment on the 1-parameter relabels_() overload incorrectly directed users to the 2-parameter relabel_(old_labels, new_labels). Corrected to relabel_(new_labels). Co-Authored-By: Claude Sonnet 4.6 --- include/UniTensor.hpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/include/UniTensor.hpp b/include/UniTensor.hpp index 088637a1c..a27a87cb8 100644 --- a/include/UniTensor.hpp +++ b/include/UniTensor.hpp @@ -3191,14 +3191,12 @@ namespace cytnx { } /** @deprecated This function is deprecated. Please use \n - UniTensor &relabel_(const std::vector &old_labels, - const std::vector &new_labels)\n + UniTensor &relabel_(const std::vector &new_labels)\n instead. */ [[deprecated( "Please use " - "UniTensor &relabel_(const std::vector &old_labels, const " - "std::vector &new_labels) " + "UniTensor &relabel_(const std::vector &new_labels) " "instead.")]] UniTensor & relabels_(const std::vector &new_labels) { this->_impl->relabels_(new_labels); From a33450c0a12c8f2a8c8f8e126c4d81ee50b98a26 Mon Sep 17 00:00:00 2001 From: pcchen Date: Fri, 20 Mar 2026 10:31:03 +0800 Subject: [PATCH 07/10] mark set_labels() overloads as [[deprecated]] in public UniTensor API Both the vector and initializer_list overloads of set_labels() were missing [[deprecated]] attributes despite all call sites having been replaced with relabel_(). Users will now get a compiler warning directing them to relabel_(new_labels). Co-Authored-By: Claude Sonnet 4.6 --- include/UniTensor.hpp | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/include/UniTensor.hpp b/include/UniTensor.hpp index a27a87cb8..2003e77c7 100644 --- a/include/UniTensor.hpp +++ b/include/UniTensor.hpp @@ -2910,24 +2910,29 @@ namespace cytnx { */ /** - @brief Set new labels for all the bonds. - @param[in] new_labels the new labels for each bond. - @note - 1. the new assign label cannot be the same as the label of any other bonds in the - UniTensor. ( cannot have duplicate labels ) - 2. Compared to relabels(const std::vector &new_labels) const, this - function set the new label and return self. + @deprecated This function is deprecated. Please use \n + UniTensor &relabel_(const std::vector &new_labels)\n + instead. */ - UniTensor &set_labels(const std::vector &new_labels) { + [[deprecated( + "Please use " + "UniTensor &relabel_(const std::vector &new_labels) " + "instead.")]] UniTensor & + set_labels(const std::vector &new_labels) { this->_impl->set_labels(new_labels); return *this; } /** - @see - set_labels(const std::vector &new_labels) - */ - UniTensor &set_labels(const std::initializer_list &new_labels) { + @deprecated This function is deprecated. Please use \n + UniTensor &relabel_(const std::initializer_list &new_labels)\n + instead. + */ + [[deprecated( + "Please use " + "UniTensor &relabel_(const std::initializer_list &new_labels) " + "instead.")]] UniTensor & + set_labels(const std::initializer_list &new_labels) { std::vector new_lbls(new_labels); std::vector vs(new_lbls.size()); transform(new_lbls.begin(), new_lbls.end(), vs.begin(), From 7179bbf109ce4f3b5cd1acbbb165d26a91e65b9c Mon Sep 17 00:00:00 2001 From: pcchen Date: Fri, 20 Mar 2026 10:31:58 +0800 Subject: [PATCH 08/10] mark set_labels and relabels_ as [[deprecated]] in UniTensor_base The three base class declarations (set_labels, relabels_(new_labels), relabels_(old_labels, new_labels)) were missing [[deprecated]] attributes, so callers going through the base class pointer would get no compiler warning. Each now points to its relabel_ replacement. Co-Authored-By: Claude Sonnet 4.6 --- include/UniTensor.hpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/UniTensor.hpp b/include/UniTensor.hpp index 2003e77c7..cedc8bae3 100644 --- a/include/UniTensor.hpp +++ b/include/UniTensor.hpp @@ -196,11 +196,14 @@ namespace cytnx { this->_labels[inx] = new_label; } + [[deprecated("Please use relabel_(const std::vector &new_labels) instead.")]] void set_labels(const std::vector &new_labels); void relabel_(const std::vector &new_labels); // implemented + [[deprecated("Please use relabel_(const std::vector &new_labels) instead.")]] void relabels_(const std::vector &new_labels); // implemented void relabel_(const std::vector &old_labels, const std::vector &new_labels); // implemented + [[deprecated("Please use relabel_(const std::vector &old_labels, const std::vector &new_labels) instead.")]] void relabels_(const std::vector &old_labels, const std::vector &new_labels); // implemented void relabel_(const std::string &old_label, const std::string &new_label) { From 3e84432bb8683a6ed57c16483ebcbaf96d7762de Mon Sep 17 00:00:00 2001 From: pcchen Date: Fri, 20 Mar 2026 11:49:45 +0800 Subject: [PATCH 09/10] restore deprecated-function tests with pragma suppression Tests for set_labels, relabels, and relabels_ were deleted by the PR, leaving no automated check that the deprecated wrappers still work. Restored all removed tests and wrapped them in: #pragma GCC diagnostic ignored "-Wdeprecated-declarations" so they compile cleanly while still exercising the deprecated API. Co-Authored-By: Claude Sonnet 4.6 --- tests/BlockUniTensor_test.cpp | 44 ++++++++++++++ tests/DenseUniTensor_test.cpp | 97 +++++++++++++++++++++++++++++++ tests/gpu/BlockUniTensor_test.cpp | 43 ++++++++++++++ tests/gpu/DenseUniTensor_test.cpp | 34 +++++++++++ 4 files changed, 218 insertions(+) diff --git a/tests/BlockUniTensor_test.cpp b/tests/BlockUniTensor_test.cpp index a0bac60ed..01e6f50c4 100644 --- a/tests/BlockUniTensor_test.cpp +++ b/tests/BlockUniTensor_test.cpp @@ -112,6 +112,50 @@ TEST_F(BlockUniTensorTest, clone) { } } +// Deprecated-function tests: suppress warnings so the compiler does not error +// on [[deprecated]] calls. These tests verify backward compatibility. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + +TEST_F(BlockUniTensorTest, relabels) { + BUT1 = BUT1.relabels({"a", "b", "cd", "d"}); + EXPECT_EQ(BUT1.labels()[0], "a"); + EXPECT_EQ(BUT1.labels()[1], "b"); + EXPECT_EQ(BUT1.labels()[2], "cd"); + EXPECT_EQ(BUT1.labels()[3], "d"); + BUT1 = BUT1.relabels({"1", "-1", "2", "1000"}); + EXPECT_EQ(BUT1.labels()[0], "1"); + EXPECT_EQ(BUT1.labels()[1], "-1"); + EXPECT_EQ(BUT1.labels()[2], "2"); + EXPECT_EQ(BUT1.labels()[3], "1000"); + + EXPECT_THROW(BUT1.relabels({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(BUT1.relabels({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(BUT1.relabels({"a"}), std::logic_error); + EXPECT_THROW(BUT1.relabels({"1", "2"}), std::logic_error); + EXPECT_THROW(BUT1.relabels({"a", "b", "c", "d", "e"}), std::logic_error); +} + +TEST_F(BlockUniTensorTest, relabels_) { + BUT1.relabels_({"a", "b", "cd", "d"}); + EXPECT_EQ(BUT1.labels()[0], "a"); + EXPECT_EQ(BUT1.labels()[1], "b"); + EXPECT_EQ(BUT1.labels()[2], "cd"); + EXPECT_EQ(BUT1.labels()[3], "d"); + BUT1.relabels_({"1", "-1", "2", "1000"}); + EXPECT_EQ(BUT1.labels()[0], "1"); + EXPECT_EQ(BUT1.labels()[1], "-1"); + EXPECT_EQ(BUT1.labels()[2], "2"); + EXPECT_EQ(BUT1.labels()[3], "1000"); + EXPECT_THROW(BUT1.relabels_({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(BUT1.relabels_({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(BUT1.relabels_({"a"}), std::logic_error); + EXPECT_THROW(BUT1.relabels_({"1", "2"}), std::logic_error); + EXPECT_THROW(BUT1.relabels_({"a", "b", "c", "d", "e"}), std::logic_error); +} + +#pragma GCC diagnostic pop + TEST_F(BlockUniTensorTest, relabel) { auto tmp = BUT1.clone(); BUT1 = BUT1.relabel({"a", "b", "cd", "d"}); diff --git a/tests/DenseUniTensor_test.cpp b/tests/DenseUniTensor_test.cpp index 4da33e205..8958d19e4 100644 --- a/tests/DenseUniTensor_test.cpp +++ b/tests/DenseUniTensor_test.cpp @@ -146,6 +146,58 @@ TEST_F(DenseUniTensorTest, set_label_not_exist_old_label) { EXPECT_ANY_THROW(utzero345.set_label("Not exist label", "testing label")); } +// Deprecated-function tests: suppress warnings so the compiler does not error +// on [[deprecated]] calls. These tests verify backward compatibility. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + +/*=====test info===== +describe:test set_labels. +====================*/ +TEST_F(DenseUniTensorTest, set_labels_normal) { + // vector + std::vector org_labels = {"org 1", "org 2", "org 3"}; + std::vector new_labels = {"testing 1", "testing 2", "testing 3"}; + utzero345.set_labels(org_labels); + utzero345.set_labels(new_labels); + EXPECT_EQ(utzero345.labels(), new_labels); + + // initilizer list + utzero345.set_labels({"org 1", "org 2", "org 3"}); + utzero345.set_labels({"testing 1", "testing 2", "testing 3"}); + EXPECT_EQ(utzero345.labels(), new_labels); +} + +/*=====test info===== +describe:set_labels to uninitialized unitensor +====================*/ +TEST_F(DenseUniTensorTest, set_labels_un_init) { + std::vector new_labels = {}; + ut_uninit.set_labels(new_labels); + EXPECT_EQ(ut_uninit.labels(), new_labels); +} + +/*=====test info===== +describe:test set_labels length not match. +====================*/ +TEST_F(DenseUniTensorTest, set_labels_len_not_match) { + // too long + std::vector new_labels_long = {"test1", "test2", "test3", "test4"}; + EXPECT_ANY_THROW(utzero345.set_labels(new_labels_long)); + std::vector new_labels_short = {"test1", "test2"}; + EXPECT_ANY_THROW(utzero345.set_labels(new_labels_short)); +} + +/*=====test info===== +describe:test set_labels duplicated. +====================*/ +TEST_F(DenseUniTensorTest, set_labels_duplicated) { + std::vector new_labels = {"test1", "test2", "test2", "test3"}; + EXPECT_ANY_THROW(utzero345.set_labels(new_labels)); +} + +#pragma GCC diagnostic pop + TEST_F(DenseUniTensorTest, set_rowrank) { // Spf is a rank-3 tensor const auto org_rowrank = Spf.rowrank(); @@ -605,6 +657,51 @@ TEST_F(DenseUniTensorTest, relabel_) { EXPECT_THROW(ut_uninit.relabel_(0, ""), std::logic_error); } +// Deprecated-function tests: suppress warnings so the compiler does not error +// on [[deprecated]] calls. These tests verify backward compatibility. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + +TEST_F(DenseUniTensorTest, relabels) { + auto ut = utzero3456.relabels({"a", "b", "cd", "d"}); + EXPECT_EQ(utzero3456.labels()[0], "0"); + EXPECT_EQ(utzero3456.labels()[1], "1"); + EXPECT_EQ(utzero3456.labels()[2], "2"); + EXPECT_EQ(utzero3456.labels()[3], "3"); + EXPECT_EQ(ut.labels()[0], "a"); + EXPECT_EQ(ut.labels()[1], "b"); + EXPECT_EQ(ut.labels()[2], "cd"); + EXPECT_EQ(ut.labels()[3], "d"); + ut = utzero3456.relabels({"1", "-1", "2", "1000"}); + EXPECT_THROW(ut.relabels({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(ut.relabels({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(ut.relabels({"a"}), std::logic_error); + EXPECT_THROW(ut.relabels({"1", "2"}), std::logic_error); + EXPECT_THROW(ut.relabels({"a", "b", "c", "d", "e"}), std::logic_error); + EXPECT_THROW(ut_uninit.relabels({"a", "b", "c", "d", "e"}), std::logic_error); +} + +TEST_F(DenseUniTensorTest, relabels_) { + auto ut = utzero3456.relabels_({"a", "b", "cd", "d"}); + EXPECT_EQ(utzero3456.labels()[0], "a"); + EXPECT_EQ(utzero3456.labels()[1], "b"); + EXPECT_EQ(utzero3456.labels()[2], "cd"); + EXPECT_EQ(utzero3456.labels()[3], "d"); + EXPECT_EQ(ut.labels()[0], "a"); + EXPECT_EQ(ut.labels()[1], "b"); + EXPECT_EQ(ut.labels()[2], "cd"); + EXPECT_EQ(ut.labels()[3], "d"); + ut = utzero3456.relabels_({"1", "-1", "2", "1000"}); + EXPECT_THROW(ut.relabels_({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(ut.relabels_({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(ut.relabels_({"a"}), std::logic_error); + EXPECT_THROW(ut.relabels_({"1", "2"}), std::logic_error); + EXPECT_THROW(ut.relabels_({"a", "b", "c", "d", "e"}), std::logic_error); + EXPECT_THROW(ut_uninit.relabels_({"a", "b", "c", "d", "e"}), std::logic_error); +} + +#pragma GCC diagnostic pop + /*=====test info===== describe:test relabel_. ====================*/ diff --git a/tests/gpu/BlockUniTensor_test.cpp b/tests/gpu/BlockUniTensor_test.cpp index e678e2b4e..165630f73 100644 --- a/tests/gpu/BlockUniTensor_test.cpp +++ b/tests/gpu/BlockUniTensor_test.cpp @@ -38,6 +38,49 @@ TEST_F(BlockUniTensorTest, gpu_Trace) { EXPECT_THROW(BUT1.Trace("-1", "5"), std::logic_error); } +// Deprecated-function tests: suppress warnings so the compiler does not error +// on [[deprecated]] calls. These tests verify backward compatibility. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + +TEST_F(BlockUniTensorTest, gpu_relabels) { + BUT1 = BUT1.relabels({"a", "b", "cd", "d"}); + EXPECT_EQ(BUT1.labels()[0], "a"); + EXPECT_EQ(BUT1.labels()[1], "b"); + EXPECT_EQ(BUT1.labels()[2], "cd"); + EXPECT_EQ(BUT1.labels()[3], "d"); + BUT1 = BUT1.relabels({"1", "-1", "2", "1000"}); + EXPECT_EQ(BUT1.labels()[0], "1"); + EXPECT_EQ(BUT1.labels()[1], "-1"); + EXPECT_EQ(BUT1.labels()[2], "2"); + EXPECT_EQ(BUT1.labels()[3], "1000"); + + EXPECT_THROW(BUT1.relabels({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(BUT1.relabels({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(BUT1.relabels({"a"}), std::logic_error); + EXPECT_THROW(BUT1.relabels({"1", "2"}), std::logic_error); + EXPECT_THROW(BUT1.relabels({"a", "b", "c", "d", "e"}), std::logic_error); +} +TEST_F(BlockUniTensorTest, gpu_relabels_) { + BUT1.relabels_({"a", "b", "cd", "d"}); + EXPECT_EQ(BUT1.labels()[0], "a"); + EXPECT_EQ(BUT1.labels()[1], "b"); + EXPECT_EQ(BUT1.labels()[2], "cd"); + EXPECT_EQ(BUT1.labels()[3], "d"); + BUT1.relabels_({"1", "-1", "2", "1000"}); + EXPECT_EQ(BUT1.labels()[0], "1"); + EXPECT_EQ(BUT1.labels()[1], "-1"); + EXPECT_EQ(BUT1.labels()[2], "2"); + EXPECT_EQ(BUT1.labels()[3], "1000"); + EXPECT_THROW(BUT1.relabels_({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(BUT1.relabels_({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(BUT1.relabels_({"a"}), std::logic_error); + EXPECT_THROW(BUT1.relabels_({"1", "2"}), std::logic_error); + EXPECT_THROW(BUT1.relabels_({"a", "b", "c", "d", "e"}), std::logic_error); +} + +#pragma GCC diagnostic pop + TEST_F(BlockUniTensorTest, gpu_relabel) { auto tmp = BUT1.clone(); BUT1 = BUT1.relabel({"a", "b", "cd", "d"}); diff --git a/tests/gpu/DenseUniTensor_test.cpp b/tests/gpu/DenseUniTensor_test.cpp index c2aad402c..c562c051d 100644 --- a/tests/gpu/DenseUniTensor_test.cpp +++ b/tests/gpu/DenseUniTensor_test.cpp @@ -20,6 +20,40 @@ TEST_F(DenseUniTensorTest, gpu_Trace) { // EXPECT_THROW(utzero3456.Trace(-1,5),std::logic_error); } +// Deprecated-function tests: suppress warnings so the compiler does not error +// on [[deprecated]] calls. These tests verify backward compatibility. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + +TEST_F(DenseUniTensorTest, gpu_relabels) { + utzero3456 = utzero3456.relabels({"a", "b", "cd", "d"}); + EXPECT_EQ(utzero3456.labels()[0], "a"); + EXPECT_EQ(utzero3456.labels()[1], "b"); + EXPECT_EQ(utzero3456.labels()[2], "cd"); + EXPECT_EQ(utzero3456.labels()[3], "d"); + utzero3456 = utzero3456.relabels({"1", "-1", "2", "1000"}); + EXPECT_THROW(utzero3456.relabels({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(utzero3456.relabels({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(utzero3456.relabels({"a"}), std::logic_error); + EXPECT_THROW(utzero3456.relabels({"1", "2"}), std::logic_error); + EXPECT_THROW(utzero3456.relabels({"a", "b", "c", "d", "e"}), std::logic_error); +} +TEST_F(DenseUniTensorTest, gpu_relabels_) { + utzero3456.relabels_({"a", "b", "cd", "d"}); + EXPECT_EQ(utzero3456.labels()[0], "a"); + EXPECT_EQ(utzero3456.labels()[1], "b"); + EXPECT_EQ(utzero3456.labels()[2], "cd"); + EXPECT_EQ(utzero3456.labels()[3], "d"); + utzero3456.relabels_({"1", "-1", "2", "1000"}); + EXPECT_THROW(utzero3456.relabels_({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(utzero3456.relabels_({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(utzero3456.relabels_({"a"}), std::logic_error); + EXPECT_THROW(utzero3456.relabels_({"1", "2"}), std::logic_error); + EXPECT_THROW(utzero3456.relabels_({"a", "b", "c", "d", "e"}), std::logic_error); +} + +#pragma GCC diagnostic pop + TEST_F(DenseUniTensorTest, gpu_relabel) { auto tmp = utzero3456.clone(); utzero3456 = utzero3456.relabel({"a", "b", "cd", "d"}); From 227a9b060dbf4a0f0c28cb87fed00e07228912fc Mon Sep 17 00:00:00 2001 From: pcchen Date: Fri, 20 Mar 2026 11:52:07 +0800 Subject: [PATCH 10/10] emit DeprecationWarning from deprecated Python pybind bindings c_set_labels, relabels (both overloads), and c_relabels_ (both overloads) now call PyErr_WarnEx(PyExc_DeprecationWarning, ...) before delegating to their relabel/relabel_ replacements, matching Python convention. Co-Authored-By: Claude Sonnet 4.6 --- pybind/unitensor_py.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pybind/unitensor_py.cpp b/pybind/unitensor_py.cpp index 8a60e9373..e2adb55cd 100644 --- a/pybind/unitensor_py.cpp +++ b/pybind/unitensor_py.cpp @@ -164,6 +164,8 @@ void unitensor_binding(py::module &m) { .def("c_set_labels",[](UniTensor &self, const std::vector &new_labels){ + PyErr_WarnEx(PyExc_DeprecationWarning, + "c_set_labels() is deprecated, use relabel_() instead.", 1); return self.relabel_(new_labels); },py::arg("new_labels")) @@ -175,6 +177,8 @@ void unitensor_binding(py::module &m) { return self.relabel(new_labels); }, py::arg("new_labels")) .def("relabels",[](UniTensor &self, const std::vector &new_labels){ + PyErr_WarnEx(PyExc_DeprecationWarning, + "relabels() is deprecated, use relabel() instead.", 1); return self.relabel(new_labels); }, py::arg("new_labels")) @@ -182,6 +186,8 @@ void unitensor_binding(py::module &m) { self.relabel_(new_labels); }, py::arg("new_labels")) .def("c_relabels_",[](UniTensor &self, const std::vector &new_labels){ + PyErr_WarnEx(PyExc_DeprecationWarning, + "c_relabels_() is deprecated, use relabel_() instead.", 1); self.relabel_(new_labels); }, py::arg("new_labels")) @@ -211,10 +217,14 @@ void unitensor_binding(py::module &m) { } ,py::arg("old_labels"), py::arg("new_labels")) .def("relabels",[](UniTensor &self, const std::vector &old_labels, const std::vector &new_labels){ + PyErr_WarnEx(PyExc_DeprecationWarning, + "relabels() is deprecated, use relabel() instead.", 1); return self.relabel(old_labels,new_labels); } ,py::arg("old_labels"), py::arg("new_labels")) .def("c_relabels_",[](UniTensor &self, const std::vector &old_labels, const std::vector &new_labels){ + PyErr_WarnEx(PyExc_DeprecationWarning, + "c_relabels_() is deprecated, use relabel_() instead.", 1); self.relabel_(old_labels,new_labels); } ,py::arg("old_labels"), py::arg("new_labels"))