Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions include/RI/global/Shared_Vector.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,18 @@ class Shape_Vector
for(auto ptr_in=v_in.begin(); ptr_in<v_in.end(); )
*(ptr_this++) = *(ptr_in++);
}
Shape_Vector(const std::vector<std::size_t>& v_in)
:size_(v_in.size())
{
assert(v_in.size() <= sizeof(v) / sizeof(*v));
for (std::size_t i = 0;i < size_;++i) this->v[i] = v_in[i];
}
Shape_Vector(std::vector<std::size_t>&& v_in)
:size_(v_in.size())
{
assert(v_in.size() <= sizeof(v) / sizeof(*v));
for (std::size_t i = 0;i < size_;++i) this->v[i] = v_in[i];
}

const std::size_t* begin() const noexcept { return this->v; }
const std::size_t* end() const noexcept { return this->v+size_; }
Expand Down
5 changes: 5 additions & 0 deletions include/RI/global/Tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,11 @@ class Tensor
Tensor transpose() const;
Tensor dagger() const;

/// permute from the input index order to {0, 1, 2, ..., N}
Tensor permute_from(const std::vector<std::size_t>& order) const;
/// permute from {0, 1, 2, ..., N } to the input new index order
// Tensor permute_to(const std::vector<std::size_t>) const;

// ||d||_p = (|d_1|^p+|d_2|^p+...)^{1/p}
// if(p==std::numeric_limits<double>::max()) ||d||_max = max_i |d_i|
Global_Func::To_Real_t<T> norm(const double p) const;
Expand Down
70 changes: 70 additions & 0 deletions include/RI/global/Tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -324,4 +324,74 @@ std::array<std::array<std::array<std::array<T,N3>,N2>,N1>,N0> to_array(const Ten
return a;
}

// check whether an array is a permutation of {from, from+1, ..., to}
inline bool is_permutation(const std::vector<size_t>& arr, const std::size_t from, const std::size_t to)
{
std::vector<bool> seen(arr.size(), false);
if (arr.size() != to - from + 1) return false;
for (std::size_t i = 0; i < arr.size(); ++i) {
if (arr[i] < from || arr[i] > to) return false;
const std::size_t idx = arr[i] - from;
if (seen[idx]) return false;
seen[idx] = true;
}
return true;
}

template<typename T>
Tensor<T> Tensor<T>::permute_from(const std::vector<std::size_t>& order) const
{
const std::size_t N = order.size();
assert(this->shape.size() == N);
assert(is_permutation(order, 0, N-1));
Shape_Vector new_shape(this->shape);
for (std::size_t d = 0; d < N; ++d)
new_shape[d] = this->shape[order[d]];

Tensor<T> t(new_shape);

/* N-permutation
for (std::size_t i0 = 0; i0 < new_shape[0], ++i0)
for (std::size_t i1 = 0, i1 < new_shape[1], ++i1)
...
for (std::size_t iN = 0, iN < new_shape[N], ++iN)
t(i0, i1, ..., iN) = this->operator()(order(i0), order(i1), ..., order(N))
*/
std::vector<std::size_t> strides(N);
strides[N - 1] = 1;
for (int d = N - 2; d >= 0; --d)
strides[d] = strides[d + 1] * this->shape[d + 1];

std::vector<std::size_t> new_strides(N);
new_strides[N - 1] = 1;
for (int d = N - 2; d >= 0; --d)
new_strides[d] = new_strides[d + 1] * new_shape[d + 1];


std::vector<std::size_t> new_idx(N, 0);
std::vector<std::size_t> idx(N, 0);
for (std::size_t i = 0; i < t.get_shape_all(); ++i) // new 1D-index
{
// new ND-index
std::size_t tmp = i;
for (std::size_t d = 0; d < N; ++d)
{
new_idx[d] = tmp / new_strides[d];
tmp %= new_strides[d];
}
// old ND-index
for (std::size_t d = 0; d < N; ++d)
idx[order[d]] = new_idx[d];

// old 1D-index
std::size_t i0 = 0;
for (std::size_t d = 0; d < N; ++d)
i0 += idx[d] * strides[d];

(*t.data)[i] = (*this->data)[i0];
}

return t;
}

}
22 changes: 22 additions & 0 deletions include/RI/global/Tensor_Multiply-32.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,28 @@ namespace Tensor_Multiply
return Txy;
}

// Txy(x0,x1,..., xM) = Tx(x0, x1,x2, ..., xM, y0, y1, ..., yN) * Vy(y0, y1, ..., yN)
template<typename Tdata>
Tensor<Tdata> gemv(const Tensor<Tdata>& Tx, const Tensor<Tdata>& Vy)
{
assert(Tx.shape.size() >= Vy.shape.size());
const std::size_t ny = Vy.get_shape_all();
assert(Tx.get_shape_all() % ny == 0);
const std::size_t dim = Tx.shape.size() - Vy.shape.size();
std::vector<std::size_t> shape_vector;
if (dim == 0)
shape_vector.push_back(1);
else
for (int d = 0; d < dim;++d)
shape_vector.push_back(Tx.shape[d]);
Tensor<Tdata> Txy(shape_vector);
Blas_Interface::gemv(
'N', Txy.get_shape_all(), ny,
Tdata(1.0), Tx.ptr(), ny, Vy.ptr(), 1,
Tdata(0.0), Txy.ptr(), 1);
return Txy;
}

}

}
8 changes: 8 additions & 0 deletions include/RI/physics/Exx.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,10 @@ class Exx
constexpr static std::size_t Npos = Ndim; // tmp
using Tatom_pos = std::array<Tpos,Npos>; // tmp

Exx(const std::string method = "loop3") { this->method = method; }
Exx(Exx&&) noexcept = default;
Exx& operator=(Exx&&) noexcept = default;

void set_parallel(
const MPI_Comm &mpi_comm,
const std::map<TA,Tatom_pos> &atoms_pos,
Expand Down Expand Up @@ -96,6 +100,7 @@ class Exx
void free_dVs(const std::string &save_name_suffix="");
void free_dCRs(const std::string &save_name_suffix="");
void free_dVRs(const std::string &save_name_suffix="");
void free_cvc() { this->cvc_.clear(); }

public:
LRI<TA,Tcell,Ndim,Tdata> lri;
Expand All @@ -122,6 +127,9 @@ class Exx
};
Flag_Save_Result flag_save_result;

std::string method = "loop3"; // "loop3", "cvc"
std::map<TA, std::map<TAC, std::map<TA, std::map<TAC, Tensor<Tdata>>>>> cvc_;
std::map<TA, std::map<TAC, std::map<TA, std::map<TAC, Tensor<Tdata>>>>>& get_cvc() const { return this->cvc_; }
};

}
Expand Down
21 changes: 15 additions & 6 deletions include/RI/physics/Exx.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -266,12 +266,21 @@ void Exx<TA,Tcell,Ndim,Tdata>::cal_Hs(

if(!this->flag_finish.Ds_delta)
this->Hs.clear();
this->lri.cal_loop3(
{Label::ab_ab::a0b0_a1b1,
Label::ab_ab::a0b0_a1b2,
Label::ab_ab::a0b0_a2b1,
Label::ab_ab::a0b0_a2b2},
this->Hs);
if (this->method == "loop3")
this->lri.cal_loop3(
{ Label::ab_ab::a0b0_a1b1,
Label::ab_ab::a0b0_a1b2,
Label::ab_ab::a0b0_a2b1,
Label::ab_ab::a0b0_a2b2 },
this->Hs);
else if (this->method == "cvc")
{
if (this->cvc_.empty())
this->cvc_ = this->lri.cal_cvc();
this->Hs = this->lri.constract_cvc_ds(this->cvc_);
}
else
assert(false && "Unknown method in Exx::cal_Hs");

//if()
this->energy = this->post_2D.cal_energy(
Expand Down
26 changes: 26 additions & 0 deletions include/RI/physics/LR.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
#pragma once
#include "./Exx.h"

namespace RI
{
// Nothing different from Exx,
// Except the two density matrices can be different
template<typename TA, typename Tcell, std::size_t Ndim, typename Tdata>
class LR : public Exx<TA,Tcell,Ndim,Tdata>
{
public:
LR(const std::string method = "cvc") { this->method = method; } // Exx default mehtod: "loop3"
LR(Exx<TA, Tcell, Ndim, Tdata>&& exx) : Exx<TA, Tcell, Ndim, Tdata>::Exx(std::move(exx)) {}
using Exx<TA,Tcell,Ndim,Tdata>::cal_force;
// New function: cal_force with two different density matrices
void cal_force(const std::map<TA, std::map<std::pair<TA, std::array<Tcell, Ndim>>, Tensor<Tdata>>>& Ds_left,
const std::array<std::string, 5>& save_names_suffix = { "","","","","" }) // "Cs","Vs","Ds","dCs","dVs"
{
// The only difference from Exx::cal_force: save Ds_left if not empty
if (!Ds_left.empty())
this->post_2D.saves["Ds_" + save_names_suffix[2]] = this->post_2D.set_tensors_map2(Ds_left);
this->cal_force(save_names_suffix);
}
};

}
Loading