mptensor  v0.3.0
Parallel Library for Tensor Network Methods
load.hpp
Go to the documentation of this file.
1 /*
2  mptensor - Parallel Library for Tensor Network Methods
3 
4  Copyright 2016 Satoshi Morita
5 
6  mptensor is free software: you can redistribute it and/or modify it
7  under the terms of the GNU Lesser General Public License as
8  published by the Free Software Foundation, either version 3 of the
9  License, or (at your option) any later version.
10 
11  mptensor is distributed in the hope that it will be useful, but
12  WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  Lesser General Public License for more details.
15 
16  You should have received a copy of the GNU Lesser General Public
17  License along with mptensor. If not, see
18  <https://www.gnu.org/licenses/>.
19 */
20 
29 #ifndef _MPTENSOR_LOAD_HPP_
30 #define _MPTENSOR_LOAD_HPP_
31 
32 #include <cassert>
33 #include <cstdio>
34 #include <fstream>
35 #include <string>
36 
38 #include "mptensor/tensor.hpp"
39 
40 namespace mptensor {
41 
43 
52 template <template <typename> class Matrix, typename C>
53 void Tensor<Matrix, C>::load(const std::string &filename) {
54  const bool comm_root = (get_comm_rank() == 0);
55  std::ifstream fin;
56  std::string dummy;
57  std::string version;
58  size_t ibuf[8] = {0};
59  size_t& loaded_version_major = ibuf[0];
60  size_t& loaded_version_minor = ibuf[1];
61  size_t& loaded_version_patch = ibuf[2];
62  size_t& loaded_matrix_type = ibuf[3];
63  size_t& loaded_value_type = ibuf[4];
64  size_t& loaded_comm_size = ibuf[5];
65  size_t& loaded_ndim = ibuf[6];
66  size_t& loaded_urank = ibuf[7];
67  const size_t this_matrix_type = Matrix<C>::matrix_type_tag;
68  const size_t this_value_type = value_type_tag<C>();
69  const size_t this_comm_size = get_comm_size();
70  Shape loaded_shape;
71  Axes loaded_map;
72 
73  // Read the base file
74  {
75  if (comm_root) {
76  fin.open(filename);
77  fin >> dummy >> version;
78 
79  if (dummy != "mptensor") {
80  ibuf[0] = 0;
81  ibuf[1] = 2;
82  ibuf[2] = 0;
83  fin.close();
84  } else {
85  sscanf(version.c_str(), "%lu.%lu.%lu", &(ibuf[0]), &(ibuf[1]),
86  &(ibuf[2]));
87  fin >> dummy >> ibuf[3] >> dummy; // matrix_type
88  fin >> dummy >> ibuf[4] >> dummy; // value_type
89  fin >> dummy >> ibuf[5]; // comm_size
90  fin >> dummy >> ibuf[6]; // ndim
91  fin >> dummy >> ibuf[7]; // upper_rank
92  }
93  }
94  Mat.bcast(ibuf, 8, 0);
95 
96  if (ibuf[0] == 0 && ibuf[1] <= 2) {
97  load_ver_0_2(filename.c_str());
98  return;
99  }
100 
101  const size_t count = 2 * loaded_ndim;
102  size_t* buffer = new size_t[count];
103 
104  if (comm_root) {
105  size_t k = 0;
106  fin >> dummy;
107  for (size_t i = 0; i < loaded_ndim; ++i) fin >> buffer[k++];
108  fin >> dummy;
109  for (size_t i = 0; i < loaded_ndim; ++i) fin >> buffer[k++];
110  fin.close();
111  }
112  Mat.bcast(buffer, count, 0);
113 
114  loaded_shape.assign(loaded_ndim, buffer);
115  loaded_map.assign(loaded_ndim, (buffer + loaded_ndim));
116 
117  delete[] buffer;
118  }
119 
120  assert(loaded_value_type == this_value_type);
121 
122  // Initialize tensor shape
123  init(loaded_shape, loaded_urank, loaded_map);
124 
125  // Read tensor elements
126  if (loaded_matrix_type == this_matrix_type &&
127  loaded_comm_size == this_comm_size) {
128  if (io_helper::debug && comm_root) {
129  std::clog << "Info: Load a tensor directly." << std::endl;
130  }
131  io_helper::load_binary(filename, get_comm_rank(), get_matrix().head(),
132  local_size());
133  return;
134  } else if (loaded_matrix_type == MATRIX_TYPE_TAG_LAPACK) {
135  if (io_helper::debug && comm_root) {
136  std::clog << "Info: Load a non-distributed tensor." << std::endl;
137  }
138  io_helper::load_scalapack(filename, loaded_comm_size, Mat);
139  return;
140  } else if (loaded_matrix_type == MATRIX_TYPE_TAG_SCALAPACK) {
141  if (io_helper::debug && comm_root) {
142  std::clog
143  << "Info: Load a tensor distributed on different-size communicator."
144  << std::endl;
145  }
146  io_helper::load_scalapack(filename, loaded_comm_size, Mat);
147  return;
148  }
149 }
150 
152 
161 template <template <typename> class Matrix, typename C>
162 void Tensor<Matrix, C>::load_ver_0_2(const char* filename) {
163  std::ifstream fin;
164  size_t n;
165  size_t urank;
166  Shape shape;
167  Axes map;
168 
169  if (get_comm_rank() == 0) {
170  std::clog << "Warning: \"" << filename
171  << "\" will be loaded using v0.2 interface." << std::endl;
172  }
173 
174  // Read the base file
175  {
176  if (get_comm_rank() == 0) {
177  fin.open(filename);
178  fin >> n;
179  }
180  Mat.bcast(&n, 1, 0);
181 
182  const size_t count = 2 * n + 1;
183  size_t* buffer = new size_t[count];
184 
185  if (get_comm_rank() == 0) {
186  for (size_t i = 0; i < count; ++i) fin >> buffer[i];
187  fin.close();
188  }
189  Mat.bcast(buffer, count, 0);
190 
191  urank = buffer[0];
192  shape.assign(n, (buffer + 1));
193  map.assign(n, (buffer + n + 1));
194 
195  delete[] buffer;
196  }
197 
198  // Initialize tensor shape
199  init(shape, urank, map);
200 
201  // Read tensor elements
202  {
203  char* datafile = new char[std::strlen(filename) + 16];
204  sprintf(datafile, "%s.%04d", filename, get_comm_rank());
205 
206  // load_binary(datafile,get_matrix().head(),local_size());
207  fin.open(datafile, std::ofstream::binary);
208  fin.read(reinterpret_cast<char*>(get_matrix().head()),
209  sizeof(C) * local_size());
210  fin.close();
211 
212  delete[] datafile;
213  }
214 }
215 
216 } // namespace mptensor
217 
218 #endif // _MPTENSOR_LOAD_HPP_
Definition: index.hpp:39
void assign(size_t n, size_t j[])
Definition: index.cc:37
void load(const std::string &filename)
Load a tensor from files.
Definition: load.hpp:53
std::string filename(const std::string &prefix, int proc_size)
Definition: common.hpp:32
Header file of helper functions for file io.
constexpr size_t MATRIX_TYPE_TAG_LAPACK
Definition: matrix.hpp:34
constexpr size_t MATRIX_TYPE_TAG_SCALAPACK
Definition: matrix.hpp:35
constexpr bool debug
Definition: io_helper.hpp:46
void load_binary(const std::string &prefix, int comm_rank, C *data_head, std::size_t local_size)
Definition: io_helper.hpp:66
void load_scalapack(const std::string &prefix, int loaded_comm_size, Matrix< C > &mat)
Definition: io_helper.hpp:125
Definition: complex.hpp:34
Index Shape
Definition: tensor.hpp:46
Index Axes
Definition: tensor.hpp:45
tuple shape
Definition: output.py:28
Tensor class.