Skip to content

Commit

Permalink
implement pre-commit fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
antoniolrm committed Jun 14, 2024
1 parent 5181f5f commit 1bfb6fb
Show file tree
Hide file tree
Showing 11 changed files with 247 additions and 239 deletions.
2 changes: 1 addition & 1 deletion .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,5 @@ tests:
stage: tests
script:
- cd dacp/tests
- python -m unittest -v tests.py
- python -m unittest -v tests.py
allow_failure: true
2 changes: 1 addition & 1 deletion AUTHORS.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# pyDACP Authors
Below is a list of the contributors to pyDACP:

+ [Kostas Vilkelis](https://quantumtinkerer.tudelft.nl/members/kostas-vilkelis/)
+ [Kostas Vilkelis](https://quantumtinkerer.tudelft.nl/members/kostas-vilkelis/)
+ [Antonio Manesco](https://antoniomanesco.org/)
+ [Anton Akhmerov](<https://antonakhmerov.org>)
+ [Michael Wimmer](https://michaelwimmer.org/)
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,13 @@

A python package to compute eigenvalues using the dual applications of Chebyshev polynomials algorithm. The algorithm is described in [SciPost Phys. 11, 103 (2021)](https://scipost.org/SciPostPhys.11.6.103).

This package implements an algorithm that computes the eigenvalues of hermitian linear opeators within a given window. Besides the original algorithm, we also provide a way to deal with degeneracies systematically, and remove the need of prior estimations of the number of eigenvalues. The algorithm is useful for large, sufficiently sparse matrices.
This package implements an algorithm that computes the eigenvalues of hermitian linear operators within a given window. Besides the original algorithm, we also provide a way to deal with degeneracies systematically, and remove the need of prior estimations of the number of eigenvalues. The algorithm is useful for large, sufficiently sparse matrices.

This is an experimental version, so use at your own risk.

## Content

* Instalation
* Installation
* The algorithm
+ First application of Chebyshev polynomials
+ Second application of Chebyshev polynomials
Expand Down Expand Up @@ -52,7 +52,7 @@ T_k(\mathcal{F})|r\rangle = |r_E\rangle.
### Second application of Chebyshev polynomials

Now we have one single vector $`|r_E\rangle`$ within the energy window we want.
And we use again Chebyshev polynomials to span the full basis of the subspace $`\mathcal{L}`$
And we use again Chebyshev polynomials to span the full basis of the subspace $`\mathcal{L}`$
with $`E \in [-a, a]`$.
For that, we define a second operator, $`\mathcal{G}`$,
which is simply the rescaled Hamiltonian such that all eigenvalues are within $`[-1, 1]`$:
Expand Down Expand Up @@ -96,7 +96,7 @@ This is possible because of two properties combined:
1. $`[T_i(\mathcal{H}), \mathcal{H}]=0`$
2. $`T_i(\mathcal{H})T_j(\mathcal{H}) = \frac12 \left(T_{i+j}(\mathcal{H}) + T_{|i-j|}(\mathcal{H}) \right)~.`$

Combining those two properties, we only need to store the fitered vectors from previous runs $`|r_E^{pref}\rangle`$ and compute:
Combining those two properties, we only need to store the filtered vectors from previous runs $`|r_E^{pref}\rangle`$ and compute:
```math
S_{ij} = \left r_E^{prev}| \left[\frac12 \left(T_{i+j}(\mathcal{H}) + T_{|i-j|}(\mathcal{H}) \right) |r_E^{current}\rangle\right]~.
```
Expand Down
122 changes: 66 additions & 56 deletions dacp/benchmark/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
import itertools as it
import xarray as xr

calculation = 'eigvals_only_2d'
calculation = "eigvals_only_2d"

# +
t = 1
Expand All @@ -22,6 +22,7 @@ def make_syst(N=100, dimension=2):
if dimension == 2:
L = np.sqrt(N)
lat = kwant.lattice.square(a=1, norbs=1)

# Define 2D shape
def shape(pos):
(x, y) = pos
Expand All @@ -30,6 +31,7 @@ def shape(pos):
elif dimension == 3:
L = np.cbrt(N)
lat = kwant.lattice.cubic(a=1, norbs=1)

# Define 3D shape
def shape(pos):
(x, y, z) = pos
Expand All @@ -38,6 +40,7 @@ def shape(pos):
elif dimension == 1:
L = N
lat = kwant.lattice.chain(a=1, norbs=1)

# Define 1D shape
def shape(pos):
x = pos[0]
Expand All @@ -63,15 +66,17 @@ def onsite(site, seed):

# -


def sparse_diag(matrix, k, sigma, **kwargs):
"""Call sla.eigsh with mumps support.
Please see scipy.sparse.linalg.eigsh for documentation.
"""

class LuInv(sla.LinearOperator):
def __init__(self, A):
inst = mumps.MUMPSContext()
inst.analyze(A, ordering='pord')
inst.analyze(A, ordering="pord")
inst.factor(A)
self.solve = inst.solve
sla.LinearOperator.__init__(self, A.dtype, A.shape)
Expand Down Expand Up @@ -137,20 +142,22 @@ def sparse_benchmark():

# +
params = {
'N' : [10**i for i in np.linspace(3, 4.5, 5, endpoint=True)],
'seeds' : [1, 2, 3],
'dimensions' : [2]
"N": [10**i for i in np.linspace(3, 4.5, 5, endpoint=True)],
"seeds": [1, 2, 3],
"dimensions": [2],
}

values = list(params.values())
args = np.array(list(it.product(*values)))

args = tqdm(args)


def wrapped_fn(args):
a = benchmark(*args)
return a


result = list(map(wrapped_fn, args))
# -

Expand All @@ -161,26 +168,24 @@ def wrapped_fn(args):
shaped_data = np.reshape(result, shapes)
da = xr.DataArray(
data=shaped_data,
dims=[*params.keys(), 'output'],
coords={
**params,
'output': ['sparsetime', 'sparsemem', 'dacptime', 'dacpmem']
}
dims=[*params.keys(), "output"],
coords={**params, "output": ["sparsetime", "sparsemem", "dacptime", "dacpmem"]},
)

da.to_netcdf('./data/data_benchmark_' + calculation + '.nc')
da.to_netcdf("./data/data_benchmark_" + calculation + ".nc")

da = xr.open_dataarray('./data/data_benchmark_' + calculation + '.nc')
da = xr.open_dataarray("./data/data_benchmark_" + calculation + ".nc")

da_mean=da.mean(dim='seeds')
da_mean = da.mean(dim="seeds")

import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
plt.rcParams['figure.figsize'] = (6, 6)
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['font.size'] = 15
plt.rcParams['legend.fontsize'] = 15

rc("text", usetex=True)
plt.rcParams["figure.figsize"] = (6, 6)
plt.rcParams["lines.linewidth"] = 2
plt.rcParams["font.size"] = 15
plt.rcParams["legend.fontsize"] = 15

from scipy.stats import linregress
from scipy.optimize import curve_fit
Expand All @@ -189,86 +194,91 @@ def wrapped_fn(args):
def slope(dim, output):
return linregress(
np.log10(da_mean.N.values),
np.log10(da_mean.sel(output=output, dimensions=dim).values)
np.log10(da_mean.sel(output=output, dimensions=dim).values),
)[0:2]


# +
def power_law(x, a, b):
return a*x**b
return a * x**b


def power_fit(dim, output):
return curve_fit(
power_law,
da_mean.N.values,
da_mean.sel(output=output, dimensions=dim).values
power_law, da_mean.N.values, da_mean.sel(output=output, dimensions=dim).values
)[0]


# -

time_dacp_fit = slope(dim=2, output='dacptime')
time_sparse_fit = slope(dim=2, output='sparsetime')
time_dacp_fit = slope(dim=2, output="dacptime")
time_sparse_fit = slope(dim=2, output="sparsetime")
print(time_dacp_fit, time_sparse_fit)

time_dacp_fit = power_fit(dim=2, output='dacptime')
time_sparse_fit = power_fit(dim=2, output='sparsetime')
time_dacp_fit = power_fit(dim=2, output="dacptime")
time_sparse_fit = power_fit(dim=2, output="sparsetime")
print(time_dacp_fit, time_sparse_fit)

mem_dacp_fit_0 = slope(dim=2, output='dacpmem')
mem_sparse_fit_0 = slope(dim=2, output='sparsemem')
mem_dacp_fit_0 = slope(dim=2, output="dacpmem")
mem_sparse_fit_0 = slope(dim=2, output="sparsemem")
print(mem_dacp_fit_0, mem_sparse_fit_0)

mem_dacp_fit = power_fit(dim=2, output='dacpmem')
mem_sparse_fit = power_fit(dim=2, output='sparsemem')
mem_dacp_fit = power_fit(dim=2, output="dacpmem")
mem_sparse_fit = power_fit(dim=2, output="sparsemem")
print(mem_dacp_fit, mem_sparse_fit)

from sys import getsizeof

getsizeof(np.random.rand(int(1e6)) + 1j * np.random.rand(int(1e6))) / 1e6 * 1000

(da_mean.sel(dimensions=2).sel(output=['dacptime', 'sparsetime'])/3600).plot(hue='output', marker='o')
plt.xscale('log')
plt.yscale('log')
plt.ylabel(r'$\mathrm{Time\ [hours]}$')
plt.xlabel(r'$\mathrm{Number\ of\ sites}$')
(da_mean.sel(dimensions=2).sel(output=["dacptime", "sparsetime"]) / 3600).plot(
hue="output", marker="o"
)
plt.xscale("log")
plt.yscale("log")
plt.ylabel(r"$\mathrm{Time\ [hours]}$")
plt.xlabel(r"$\mathrm{Number\ of\ sites}$")
plt.yticks([1e-3, 1e-2, 1e-1, 1])
plt.tight_layout()
plt.savefig('time_2d_' + calculation + '.png')
plt.savefig("time_2d_" + calculation + ".png")
plt.show()


def power_from_slope(x, a, b):
return 10**b * x ** a
return 10**b * x**a


(da_mean.sel(dimensions=2).sel(output=['dacpmem', 'sparsemem'])/1000).plot(hue='output', marker='o')
plt.plot(np.array(params['N']), power_law(np.array(params['N']), *mem_dacp_fit)/1000)
plt.plot(np.array(params['N']), power_law(np.array(params['N']), *mem_sparse_fit)/1000)
(da_mean.sel(dimensions=2).sel(output=["dacpmem", "sparsemem"]) / 1000).plot(
hue="output", marker="o"
)
plt.plot(np.array(params["N"]), power_law(np.array(params["N"]), *mem_dacp_fit) / 1000)
plt.plot(
np.array(params["N"]), power_law(np.array(params["N"]), *mem_sparse_fit) / 1000
)
# plt.plot(np.array(params['N']), power_from_slope(np.array(params['N']), *mem_sparse_fit)/1000)
# plt.plot(np.array(params['N']), power_from_slope(np.array(params['N']), *mem_dacp_fit)/1000)
plt.xscale('log')
plt.yscale('log')
plt.ylabel(r'$\mathrm{Memory\ [GB]}$')
plt.xlabel(r'$\mathrm{Number\ of\ sites}$')
plt.xscale("log")
plt.yscale("log")
plt.ylabel(r"$\mathrm{Memory\ [GB]}$")
plt.xlabel(r"$\mathrm{Number\ of\ sites}$")
# plt.yticks([0.1, 1, 10])
plt.tight_layout()
plt.savefig('mem_2d_' + calculation + '.png')
plt.savefig("mem_2d_" + calculation + ".png")
plt.show()

(da_mean.sel(dimensions=2).sel(output=['dacpmem'])/1000).data
(da_mean.sel(dimensions=2).sel(output=["dacpmem"]) / 1000).data

params['N']
params["N"]

mem_sparse_fit/1000
mem_sparse_fit / 1000

# plt.plot(np.array(params['N']), power_law(np.array(params['N']), *mem_dacp_fit)/1000)
plt.plot(np.array(params['N']), power_law(np.array(params['N']), *mem_sparse_fit)/1000)
plt.xscale('log')
plt.yscale('log')

np.array(params['N'])

0.7*10**1.3
plt.plot(
np.array(params["N"]), power_law(np.array(params["N"]), *mem_sparse_fit) / 1000
)
plt.xscale("log")
plt.yscale("log")

np.array(params["N"])

0.7 * 10**1.3
Loading

0 comments on commit 1bfb6fb

Please sign in to comment.