diff --git a/pyproject.toml b/pyproject.toml index 7a1261e999..ac7565e734 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -118,6 +118,8 @@ requires-python = '>=3.9' 'core.structure' = 'aiida.orm.nodes.data.structure:StructureData' 'core.upf' = 'aiida.orm.nodes.data.upf:UpfData' +[project.entry-points.'aiida.exporters'] + [project.entry-points.'aiida.groups'] 'core' = 'aiida.orm.groups:Group' 'core.auto' = 'aiida.orm.groups:AutoGroup' @@ -180,10 +182,6 @@ requires-python = '>=3.9' 'core.arithmetic.add_multiply' = 'aiida.workflows.arithmetic.add_multiply:add_multiply' 'core.arithmetic.multiply_add' = 'aiida.workflows.arithmetic.multiply_add:MultiplyAddWorkChain' -[project.entry-points.'aiida.exporters'] -# 'core.arithmetic.add_multiply' = 'aiida.workflows.arithmetic.add_multiply:add_multiply' -# 'core.arithmetic.multiply_add' = 'aiida.workflows.arithmetic.multiply_add:MultiplyAddWorkChain' - [project.optional-dependencies] atomic_tools = [ 'PyCifRW~=4.4', diff --git a/src/aiida/cmdline/commands/cmd_data/__init__.py b/src/aiida/cmdline/commands/cmd_data/__init__.py index 3daa895240..fed0e1b5ba 100644 --- a/src/aiida/cmdline/commands/cmd_data/__init__.py +++ b/src/aiida/cmdline/commands/cmd_data/__init__.py @@ -9,14 +9,15 @@ """The `verdi data` command line interface.""" from aiida.cmdline.commands.cmd_verdi import verdi -from aiida.cmdline.utils.pluginable import Pluginable from aiida.cmdline.params import arguments, options, types +from aiida.cmdline.utils.pluginable import Pluginable @verdi.group('data', entry_point_group='aiida.cmdline.data', cls=Pluginable) def verdi_data(): """Inspect, create and manage data nodes.""" + @verdi_data.command('dump') @arguments.DATA() @options.PATH() @@ -27,22 +28,17 @@ def data_dump( path, overwrite, ) -> None: - - """Dump an arbitrary `Data` node entity to disk. - - """ + """Dump an arbitrary `Data` node entity to disk.""" from aiida.tools.dumping.data import DataDumper - data_dumper = DataDumper( - overwrite=overwrite - ) + data_dumper = DataDumper(overwrite=overwrite) print(type(data), data) # `data` comes as a tuple if len(data) > 1: - raise NotImplementedError("Dumping of multiple data nodes not yet supported.") + raise NotImplementedError('Dumping of multiple data nodes not yet supported.') # Probs shouldn't do that. Quick hack. - data=data[0] + data = data[0] data_dumper.dump(data_node=data, output_path=path) diff --git a/src/aiida/cmdline/commands/cmd_group.py b/src/aiida/cmdline/commands/cmd_group.py index dd45422343..73a18e3019 100644 --- a/src/aiida/cmdline/commands/cmd_group.py +++ b/src/aiida/cmdline/commands/cmd_group.py @@ -633,6 +633,7 @@ def group_path_ls(path, type_string, recursive, as_table, no_virtual, with_descr continue echo.echo(child.path, bold=not child.is_virtual) + @verdi_group.command('dump') @arguments.GROUP() # @with_dbenv() @@ -659,10 +660,8 @@ def group_path_ls(path, type_string, recursive, as_table, no_virtual, with_descr # help='Add a filter to show only groups for which the label contains STRING.', # ) def group_dump(group): - print(group) from aiida.tools.dumping.group import GroupDumper group_dumper = GroupDumper() group_dumper.dump(group) - diff --git a/src/aiida/cmdline/commands/cmd_process.py b/src/aiida/cmdline/commands/cmd_process.py index 22214d484e..825e01a3b3 100644 --- a/src/aiida/cmdline/commands/cmd_process.py +++ b/src/aiida/cmdline/commands/cmd_process.py @@ -682,10 +682,7 @@ def process_dump( try: dump_path = process_dumper.dump( - process_node=process, - output_path=path, - include_attributes=include_attributes, - include_extras=include_extras + process_node=process, output_path=path, include_attributes=include_attributes, include_extras=include_extras ) except FileExistsError: echo.echo_critical( diff --git a/src/aiida/cmdline/commands/cmd_profile.py b/src/aiida/cmdline/commands/cmd_profile.py index 8d6e45b0fe..80e8dcc492 100644 --- a/src/aiida/cmdline/commands/cmd_profile.py +++ b/src/aiida/cmdline/commands/cmd_profile.py @@ -271,6 +271,7 @@ def profile_delete(force, delete_data, profiles): get_config().delete_profile(profile.name, delete_storage=delete_data) echo.echo_success(f'Profile `{profile.name}` was deleted.') + @verdi_profile.command('dump') @options.PATH() @arguments.PROFILE(default=defaults.get_default_profile) diff --git a/src/aiida/tools/dumping/abstract.py b/src/aiida/tools/dumping/abstract.py index 7ca2306077..8e54190c61 100644 --- a/src/aiida/tools/dumping/abstract.py +++ b/src/aiida/tools/dumping/abstract.py @@ -2,9 +2,8 @@ # Could add saveguard file with filename based on class name or sthg like that # -class AbstractDumper(): + +class AbstractDumper: def __init__(self, overwrite, incremental): self.overwrite = overwrite self.incremental = incremental - - diff --git a/src/aiida/tools/dumping/backup-code.py b/src/aiida/tools/dumping/backup-code.py index 1c053c9925..468d2b1d80 100644 --- a/src/aiida/tools/dumping/backup-code.py +++ b/src/aiida/tools/dumping/backup-code.py @@ -1,12 +1,13 @@ # Could also be a more general CollectionDumper class, actually import contextlib -from aiida.common import timezone -from aiida.orm import CalculationNode, Code, Computer, Group, QueryBuilder, StructureData, User, WorkflowNode -from typing import List -from aiida.tools.dumping.utils import _validate_make_dump_path, get_nodes_from_db from pathlib import Path +from typing import List + +from aiida.common import timezone +from aiida.orm import CalculationNode, WorkflowNode from aiida.tools.dumping.processes import ProcessDumper +from aiida.tools.dumping.utils import get_nodes_from_db # DEFAULT_ENTITIES_TO_DUMP = [WorkflowNode, StructureData, User, Code, Computer] DEFAULT_ENTITIES_TO_DUMP = [CalculationNode, WorkflowNode] # , StructureData, User, Code, Computer] @@ -45,11 +46,11 @@ def dump(self, group): print(type(entity), entity, len(group_nodes)) # print("ABC", aiida_entity==CalculationNode) - if entity==CalculationNode: + if entity == CalculationNode: print('SELF._DUMP_CALCULATIONS_HIDDEN', len(group_nodes)) self._dump_calculations_hidden(calculations=group_nodes) - if entity==WorkflowNode: + if entity == WorkflowNode: print('SELF._DUMP_WORKFLOWS_HIDDEN', len(group_nodes)) self._dump_workflows_hidden(workflows=group_nodes) @@ -57,7 +58,6 @@ def _dump_calculations_hidden(self, calculations): # ? Dump only top-level workchains, as that includes sub-workchains already for calculation in calculations: - # ? Hardcode overwrite=True for now calculation_dumper = ProcessDumper(overwrite=True) @@ -68,7 +68,6 @@ def _dump_calculations_hidden(self, calculations): with contextlib.suppress(FileExistsError): calculation_dumper.dump(process_node=calculation, output_path=calculation_dump_path) - # # To make development quicker # if iworkflow_ > 1: # break diff --git a/src/aiida/tools/dumping/collection.py b/src/aiida/tools/dumping/collection.py index be9fb96a01..912928716e 100644 --- a/src/aiida/tools/dumping/collection.py +++ b/src/aiida/tools/dumping/collection.py @@ -8,15 +8,16 @@ ########################################################################### """Functionality for dumping of a Collections of AiiDA ORMs.""" -from aiida import orm import logging from collections import Counter +from aiida import orm + LOGGER = logging.getLogger(__name__) # TODO: Could also get the entities, or UUIDs directly, rather than just counting them here -class CollectionDumper: +class CollectionDumper: @staticmethod def create_entity_counter(orm_collection: orm.Group | orm.Collection | None = None): if orm_collection is None: @@ -26,7 +27,6 @@ def create_entity_counter(orm_collection: orm.Group | orm.Collection | None = No @staticmethod def _create_entity_counter_profile(): - nodes = orm.QueryBuilder().append(orm.Node).all(flat=True) type_counter = Counter() @@ -58,8 +58,6 @@ def _create_entity_counter_group(group: orm.Group | str): return type_counter - - # @staticmethod # def _create_entity_counter_storage(): # # ? If the group only has one WorkChain assigned to it, this will only return a count of 1 for the diff --git a/src/aiida/tools/dumping/data.py b/src/aiida/tools/dumping/data.py index d68c6f73db..4d28b6947a 100644 --- a/src/aiida/tools/dumping/data.py +++ b/src/aiida/tools/dumping/data.py @@ -9,14 +9,15 @@ """Functionality for dumping of Data nodes.""" from __future__ import annotations -from functools import singledispatchmethod -from aiida import orm -from aiida.orm.nodes.data.structure import StructureData -from pathlib import Path import logging +from functools import singledispatchmethod +from pathlib import Path + import yaml +from aiida import orm +from aiida.orm.nodes.data.structure import StructureData logger = logging.getLogger(__name__) @@ -93,7 +94,7 @@ def _dump_code( file_name: str | Path | None = None, file_format: str = 'yaml', *args, - **kwargs + **kwargs, ): if output_path is None: output_path = Path.cwd() diff --git a/src/aiida/tools/dumping/gio-dev.py b/src/aiida/tools/dumping/gio-dev.py index ba3829ff34..a05525146d 100644 --- a/src/aiida/tools/dumping/gio-dev.py +++ b/src/aiida/tools/dumping/gio-dev.py @@ -25,4 +25,4 @@ def export_structure(node, folder, **kargs): # GP: support both functions and cl # GP: Not specified means use the default, for now from hardcoded list, in the future reading from some defined method of the plugin # This could be entry point given in plugin -aiida.orm.data.StructureData.export_to_dir # think to a syntax for methods \ No newline at end of file +aiida.orm.data.StructureData.export_to_dir # think to a syntax for methods diff --git a/src/aiida/tools/dumping/group.py b/src/aiida/tools/dumping/group.py index 34c3ed7e07..6625512b1b 100644 --- a/src/aiida/tools/dumping/group.py +++ b/src/aiida/tools/dumping/group.py @@ -1,4 +1,3 @@ - import contextlib import itertools import logging @@ -8,16 +7,17 @@ from aiida import orm from aiida.common import timezone - from aiida.tools.dumping.collection import CollectionDumper from aiida.tools.dumping.process import ProcessDumper -from aiida.tools.dumping.data import DataDumper - -from aiida.tools.dumping.utils import _validate_make_dump_path, get_nodes_from_db +from aiida.tools.dumping.utils import get_nodes_from_db # DEFAULT_ENTITIES_TO_DUMP = [WorkflowNode, StructureData, User, Code, Computer] DEFAULT_PROCESSES_TO_DUMP = [orm.CalculationNode, orm.WorkflowNode] # , StructureData, User, Code, Computer] -DEFAULT_DATA_TO_DUMP = [orm.StructureData, orm.Code, orm.Computer, ] # , StructureData, User, Code, Computer] +DEFAULT_DATA_TO_DUMP = [ + orm.StructureData, + orm.Code, + orm.Computer, +] # , StructureData, User, Code, Computer] DEFAULT_ENTITIES_TO_DUMP = DEFAULT_PROCESSES_TO_DUMP + DEFAULT_DATA_TO_DUMP # from aiida.common.utils import str_timedelta @@ -77,19 +77,17 @@ def dump(self, group: orm.Group | str | None = None, output_path: Path | str | N # self.group_path = Path.cwd() / 'groups' # self.group_path = self.output_path / 'groups' / group_name - # logger.report(f'self.entity_counter for Group <{self.group}>: {self.entity_counter}') - # logger.report(f'Dumping calculations and workflows of group {group_name}...') + # logger.report(f'self.entity_counter for Group <{self.group}>: {self.entity_counter}') + # logger.report(f'Dumping calculations and workflows of group {group_name}...') # TODO: This shouldn't be on a per-group basis? Possibly dump all data for the whole profile. # TODO: Though, now that I think about it, it might actually be desirable to only limit that to the group only. - # logger.report(f'Dumping raw calculation data for group {group_name}...') + # logger.report(f'Dumping raw calculation data for group {group_name}...') logger.report(f'Dumping processes for group {group_name}...') self._dump_processes() - def _dump_processes(self): - if ( sum( self.entity_counter.get(orm_process_class, 0) @@ -142,25 +140,25 @@ def _dump_processes(self): self._dump_calculations_hidden() self._link_calculations_hidden() - # logger.report(f'Dumping other data nodes of group {group_name}...') + # logger.report(f'Dumping other data nodes of group {group_name}...') - # TODO: Here might also be pseudo.family.sssp, not just workflows/calculations + # TODO: Here might also be pseudo.family.sssp, not just workflows/calculations - # for entity in self.entities_to_dump: + # for entity in self.entities_to_dump: - # group_nodes = get_nodes_from_db(aiida_node_type=entity, with_group=group, flatten=True) + # group_nodes = get_nodes_from_db(aiida_node_type=entity, with_group=group, flatten=True) - # # print('_DUMP_TO_HIDDEN(SELF, AIIDA_ENTITY, AIIDA_NODES)') - # # print(entity, len(group_nodes)) + # # print('_DUMP_TO_HIDDEN(SELF, AIIDA_ENTITY, AIIDA_NODES)') + # # print(entity, len(group_nodes)) - # # print("ABC", aiida_entity==CalculationNode) - # if entity==CalculationNode: - # print('SELF._DUMP_CALCULATIONS_HIDDEN', len(group_nodes)) - # self._dump_calculations_hidden(calculations=group_nodes) + # # print("ABC", aiida_entity==CalculationNode) + # if entity==CalculationNode: + # print('SELF._DUMP_CALCULATIONS_HIDDEN', len(group_nodes)) + # self._dump_calculations_hidden(calculations=group_nodes) - # # if entity==WorkflowNode: - # # print('SELF._DUMP_WORKFLOWS_HIDDEN', len(group_nodes)) - # # self._dump_workflows_hidden(workflows=group_nodes) + # # if entity==WorkflowNode: + # # print('SELF._DUMP_WORKFLOWS_HIDDEN', len(group_nodes)) + # # self._dump_workflows_hidden(workflows=group_nodes) def _dump_calculations_hidden(self): # ? Dump only top-level workchains, as that includes sub-workchains already @@ -211,7 +209,7 @@ def _dump_link_workflows(self, link_calculations: bool = True): workflow_dump_path = ( self.output_path / 'workflows' / workflow_dumper._generate_default_dump_path(process_node=workflow_node) ) - # logger.report(f'WORKFLOW_DUMP_PATH: {workflow_dump_path}') + # logger.report(f'WORKFLOW_DUMP_PATH: {workflow_dump_path}') workflow_dumper._dump_workflow( workflow_node=workflow_node, @@ -234,7 +232,7 @@ def _link_calculations_hidden(self): calculation_dump_path = calculation_dump_path / calculation_dumper._generate_default_dump_path( process_node=calculation_node ) - # logger.report(f'CALCULATION_DUMP_PATH: {calculation_dump_path}') + # logger.report(f'CALCULATION_DUMP_PATH: {calculation_dump_path}') with contextlib.suppress(FileExistsError): os.symlink(link_calculations_dir / calculation_node.uuid, calculation_dump_path) diff --git a/src/aiida/tools/dumping/process.py b/src/aiida/tools/dumping/process.py index 45f559b6f0..af7c3ee9d3 100644 --- a/src/aiida/tools/dumping/process.py +++ b/src/aiida/tools/dumping/process.py @@ -26,7 +26,6 @@ from aiida.tools.dumping.utils import _validate_make_dump_path logger = logging.getLogger(__name__) -from pprint import pprint class ProcessDumper: @@ -423,7 +422,7 @@ def _dump_calculation_io_rich( parent_path /= 'rich' # Set up the rich parsing functions - from aiida.tools.dumping.rich import RichParser #, default_core_export_mapping + from aiida.tools.dumping.rich import RichParser # , default_core_export_mapping # Extend (at least the keys) by the dynamic entry points @@ -446,18 +445,15 @@ def _dump_calculation_io_rich( else: logger.report('Neither `--rich-options` nor `--rich-config` set, using defaults.') - for link_triple in link_triples: link_label = link_triple.link_label - linked_node_path = ( - parent_path if self.flat else parent_path / Path(*link_label.split('__')) - ) + linked_node_path = parent_path if self.flat else parent_path / Path(*link_label.split('__')) node_entry_point = link_triple.node.entry_point node_entry_point_name = node_entry_point.name try: - print(f"{node_entry_point_name}: ", f"{options_dict[node_entry_point_name][0].__name__}") + print(f'{node_entry_point_name}: ', f'{options_dict[node_entry_point_name][0].__name__}') except: pass @@ -466,7 +462,7 @@ def _dump_calculation_io_rich( if node_entry_point_name.startswith('core'): node = link_triple.node - # if isinstance(node, orm.Data): + # if isinstance(node, orm.Data): # ? Here, instead one could check for `core_data_with_exports` explicitly # ? Check here if exporter or fileformat not None, then create the path # All orm.Data types implement `export`, `_get_exporters`, `get_export_formats`, and `_exportcontent` @@ -477,7 +473,6 @@ def _dump_calculation_io_rich( # print('linked_node_path', linked_node_path) # export_function - # Obtain settings from the export dict # TODO: -> This might break when plugin is missing exporter = options_dict[node_entry_point_name]['exporter'] @@ -504,13 +499,9 @@ def _dump_calculation_io_rich( # TODO: Eventually, all these functions should all have the same signature... elif exporter.__name__ == '_dump_code': exporter( - data_node=node, - output_path=rich_output_file.parent, - file_name=None, - file_format=fileformat + data_node=node, output_path=rich_output_file.parent, file_name=None, file_format=fileformat ) - def _generate_calculation_io_mapping(self, io_dump_paths: List[str | Path] | None = None) -> SimpleNamespace: """Helper function to generate mapping for entities dumped for each `orm.CalculationNode`. diff --git a/src/aiida/tools/dumping/profile.py b/src/aiida/tools/dumping/profile.py index 579e449e42..ac3198c301 100644 --- a/src/aiida/tools/dumping/profile.py +++ b/src/aiida/tools/dumping/profile.py @@ -21,13 +21,10 @@ from aiida import orm from aiida.manage.configuration.profile import Profile from aiida.orm import CalculationNode, Code, Computer, Group, QueryBuilder, StructureData, User, WorkflowNode -from aiida.orm.groups import ImportGroup - from aiida.tools.dumping.collection import CollectionDumper +from aiida.tools.dumping.data import DataDumper from aiida.tools.dumping.group import GroupDumper from aiida.tools.dumping.process import ProcessDumper -from aiida.tools.dumping.data import DataDumper - from aiida.tools.dumping.utils import _validate_make_dump_path, get_nodes_from_db logger = logging.getLogger(__name__) @@ -35,7 +32,11 @@ DEFAULT_COLLECTIONS_TO_DUMP = [Group] # ? Might not be needed -> Main useful collection type is just Group PROFILE_DUMP_JSON_FILE = 'profile-dump-info.json' DEFAULT_PROCESSES_TO_DUMP = [orm.CalculationNode, orm.WorkflowNode] # , StructureData, User, Code, Computer] -DEFAULT_DATA_TO_DUMP = [orm.StructureData, orm.Code, orm.Computer, ] # , StructureData, User, Code, Computer] +DEFAULT_DATA_TO_DUMP = [ + orm.StructureData, + orm.Code, + orm.Computer, +] # , StructureData, User, Code, Computer] DEFAULT_ENTITIES_TO_DUMP = DEFAULT_PROCESSES_TO_DUMP + DEFAULT_DATA_TO_DUMP @@ -343,5 +344,3 @@ def _dump_data(self): datadumper.dump(data_node, self.parent_path) # print(data_nodes) - - diff --git a/src/aiida/tools/dumping/rich.py b/src/aiida/tools/dumping/rich.py index 45ab4cd69c..dc4a76e675 100644 --- a/src/aiida/tools/dumping/rich.py +++ b/src/aiida/tools/dumping/rich.py @@ -1,7 +1,5 @@ -from importlib_metadata import EntryPoint from aiida.cmdline.commands.cmd_data.cmd_export import data_export -from aiida.tools.dumping.data import DataDumper __all__ = ('RichParser', 'default_core_export_mapping') @@ -81,125 +79,41 @@ # TODO: `core.jsonable` that should be easy via dict -> .json, or `code export` default_core_export_mapping = { - 'core.array': { - 'exporter': data_export, - 'export_format': 'json' - }, - 'core.array.bands': { - 'exporter': data_export, - 'export_format': 'mpl_pdf' - }, - 'core.array.kpoints': { - 'exporter': data_export, - 'export_format': 'json' - }, - 'core.array.projection': { - 'exporter': data_export, - 'export_format': 'json' - }, - 'core.array.trajectory': { - 'exporter': data_export, - 'export_format': 'cif' - }, - 'core.array.xy': { - 'exporter': data_export, - 'export_format': 'json' - }, - 'core.base': { - 'exporter': None, - 'export_format': None - }, - 'core.bool': { - 'exporter': None, - 'export_format': None - }, - 'core.cif': { - 'exporter': data_export, - 'export_format': 'cif' - }, + 'core.array': {'exporter': data_export, 'export_format': 'json'}, + 'core.array.bands': {'exporter': data_export, 'export_format': 'mpl_pdf'}, + 'core.array.kpoints': {'exporter': data_export, 'export_format': 'json'}, + 'core.array.projection': {'exporter': data_export, 'export_format': 'json'}, + 'core.array.trajectory': {'exporter': data_export, 'export_format': 'cif'}, + 'core.array.xy': {'exporter': data_export, 'export_format': 'json'}, + 'core.base': {'exporter': None, 'export_format': None}, + 'core.bool': {'exporter': None, 'export_format': None}, + 'core.cif': {'exporter': data_export, 'export_format': 'cif'}, # TODO: These should by written via the `data_export` function instead - 'core.code': { - 'exporter': data_export, - 'export_format': 'yaml' - }, - 'core.code.containerized': { - 'exporter': data_export, - 'export_format': 'yaml' - }, - 'core.code.installed': { - 'exporter': data_export, - 'export_format': 'yaml' - }, - 'core.code.portable': { - 'exporter': data_export, - 'export_format': 'yaml' - }, - 'core.dict': { - 'exporter': None, - 'export_format': None - }, - 'core.enum': { - 'exporter': None, - 'export_format': None - }, - 'core.float': { - 'exporter': None, - 'export_format': None - }, + 'core.code': {'exporter': data_export, 'export_format': 'yaml'}, + 'core.code.containerized': {'exporter': data_export, 'export_format': 'yaml'}, + 'core.code.installed': {'exporter': data_export, 'export_format': 'yaml'}, + 'core.code.portable': {'exporter': data_export, 'export_format': 'yaml'}, + 'core.dict': {'exporter': None, 'export_format': None}, + 'core.enum': {'exporter': None, 'export_format': None}, + 'core.float': {'exporter': None, 'export_format': None}, # TODO: Just use copy-tree - 'core.folder': { - 'exporter': None, - 'export_format': None - }, - 'core.int': { - 'exporter': None, - 'export_format': None - }, + 'core.folder': {'exporter': None, 'export_format': None}, + 'core.int': {'exporter': None, 'export_format': None}, 'core.jsonable': { 'exporter': data_export, - 'export_format': 'json' # duh - }, - 'core.list': { - 'exporter': None, - 'export_format': None - }, - 'core.numeric': { - 'exporter': None, - 'export_format': None - }, - 'core.orbital': { - 'exporter': None, - 'export_format': None + 'export_format': 'json', # duh }, + 'core.list': {'exporter': None, 'export_format': None}, + 'core.numeric': {'exporter': None, 'export_format': None}, + 'core.orbital': {'exporter': None, 'export_format': None}, # TODO: Here, try-except existance on remote and if so, dump it here locally - 'core.remote': { - 'exporter': None, - 'export_format': None - }, - 'core.remote.stash': { - 'exporter': None, - 'export_format': None - }, - 'core.remote.stash.folder': { - 'exporter': None, - 'export_format': None - }, - 'core.singlefile': { - 'exporter': None, - 'export_format': None - }, - 'core.str': { - 'exporter': None, - 'export_format': None - }, - 'core.structure': { - 'exporter': data_export, - 'export_format': 'cif' - }, - 'core.upf': { - 'exporter': data_export, - 'export_format': 'upf' - } + 'core.remote': {'exporter': None, 'export_format': None}, + 'core.remote.stash': {'exporter': None, 'export_format': None}, + 'core.remote.stash.folder': {'exporter': None, 'export_format': None}, + 'core.singlefile': {'exporter': None, 'export_format': None}, + 'core.str': {'exporter': None, 'export_format': None}, + 'core.structure': {'exporter': data_export, 'export_format': 'cif'}, + 'core.upf': {'exporter': data_export, 'export_format': 'upf'}, } @@ -298,15 +212,14 @@ def parse_rich_options(rich_options): export_value = value elif key == 'format': format_value = value - else: - if type_value is None: - type_value = component - elif export_value is None: - # TODO: this is only for core data types - export_value = default_core_export_mapping[type_value] - elif format_value is None: - # format_value = component - format_value = default_core_export_mapping[type_value] + elif type_value is None: + type_value = component + elif export_value is None: + # TODO: this is only for core data types + export_value = default_core_export_mapping[type_value] + elif format_value is None: + # format_value = component + format_value = default_core_export_mapping[type_value] if type_value: options_dict[type_value] = (export_value, format_value) diff --git a/src/aiida/tools/dumping/utils.py b/src/aiida/tools/dumping/utils.py index 24832bb261..3cbe9cd746 100644 --- a/src/aiida/tools/dumping/utils.py +++ b/src/aiida/tools/dumping/utils.py @@ -1,6 +1,7 @@ from logging import Logger from pathlib import Path -from aiida.orm import QueryBuilder, Group, Computer, Code + +from aiida.orm import Code, Computer, Group, QueryBuilder def _validate_make_dump_path( @@ -46,6 +47,7 @@ def _validate_make_dump_path( return validate_path.resolve() + def get_nodes_from_db(aiida_node_type, with_group: str | None = None, flat=False): qb = QueryBuilder()