Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 0 additions & 29 deletions .github/threadpool.yaml

This file was deleted.

2 changes: 1 addition & 1 deletion .github/workflows/run_pytest.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -48,4 +48,4 @@ jobs:
run: |
pip install .[dev]
pip list
pytest --skip-gpu --psiflow-config=.github/threadpool.yaml
pytest --skip-gpu
32 changes: 32 additions & 0 deletions configs/local_test.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
---
parsl_log_level: WARNING
retries: 0
make_symlinks: false

ModelEvaluation:
gpu: false
use_threadpool: false
max_simulation_time: 1

ModelTraining:
gpu: true
use_threadpool: true
max_training_time: 1
max_workers: 1 # suppress assertion for multigpu training

CP2K:
cores_per_worker: 1
max_evaluation_time: 0.1
container_uri: 'oras://ghcr.io/molmod/cp2k:2024.1'

GPAW:
cores_per_worker: 1
max_evaluation_time: 0.1
container_uri: 'oras://ghcr.io/molmod/gpaw:24.1'

ORCA:
cores_per_worker: 1
max_evaluation_time: 0.1


...
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
39 changes: 39 additions & 0 deletions psiflow/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,42 @@ def resolve_and_check(path: Path) -> Path:
load = ExecutionContextLoader.load
context = ExecutionContextLoader.context
wait = ExecutionContextLoader.wait


# TODO: EXECUTION
# - max_runtime is in seconds, max_simulation_time (and others) is in minutes
# - ExecutionDefinition gpu argument?
# - ExecutionDefinition wrap_in_timeout functionality
# - ExecutionDefinition wrap_in_srun functionality? Actually for MD this is more iffy right
# - ExecutionDefinition why properties?
# - ExecutionDefinition centralize wq_resources
# - configuration file with all options
# - timeout -s 9 or -s 15?
# - executor keys are hardcoded strings..
# - define a 'format_env_variables' util
# - update GPAW + ORCA + Default containers
# include s-dftd3 in modelevaluation + install s-dftd3 with openmp
# - what with mem_per_node / mem_per_worker
# - always GPU for training?
# - currently reference mpi_args have to be tuple according to typeguard..
# - cores_per_block has to be specified even when exclusive..?
# - can we do something with WQ priority?
# - see chatgpt convo for process memory limits and such
# - make /tmp for app workdirs an option?
# - what is scaling_cores_per_worker in WQ
# - can we clean up psiflow_internal slightly?
# -
# TODO: REFERENCE
# - reference MPI args not really checked
# - mpi flags are very finicky across implementations --> use ENV args?
# OMPI_MCA_orte_report_bindings=1 I_MPI_DEBUG=4
# - commands ends with 'exit 0' - what if we do not want to exit yet?
# - some actual logging?
# - safe_compute_dataset functionality?
# - ReferenceDummy is a bit sloppy
# -
# TODO: MISC
# - think about test efficiency
# - some imports take a very long time
# - fix serialisation
# -
8 changes: 8 additions & 0 deletions psiflow/data/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,14 @@ def evaluate(
Returns:
Dataset: A new Dataset with evaluation results.
"""
# TODO: WIP
from psiflow.hamiltonians import Hamiltonian

if not isinstance(computable, Hamiltonian):
# avoid extracting and inserting the same quantities
return computable.compute_dataset(self)

# use Hamiltonian.compute method
if batch_size is not None:
outputs = computable.compute(self, batch_size=batch_size)
else:
Expand Down
Loading
Loading