diff --git a/.github/actions/setup_lfs/action.yml b/.github/actions/setup_lfs/action.yml index 86a3b0464d4..d27374ab281 100644 --- a/.github/actions/setup_lfs/action.yml +++ b/.github/actions/setup_lfs/action.yml @@ -11,11 +11,12 @@ inputs: runs: using: "composite" steps: - - name: Clone tardis-sn/tardis-regression-data + - name: Clone atharva-2001/tardis-regression-data uses: actions/checkout@v4 with: - repository: ${{ inputs.regression-data-repo }} + repository: atharva-2001/tardis-regression-data path: tardis-regression-data + ref: numpy_v2_2 - name: Create LFS file list run: git lfs ls-files -l | cut -d' ' -f1 | sort > .lfs-assets-id diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8938cc30124..c3ea1cd3553 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -119,7 +119,7 @@ jobs: - name: Generate and process changelog run: | - CHANGELOG=$(git cliff --config pyproject.toml --unreleased | sed -n '/^## Changelog/,$p' | grep -vE '^(ERROR|WARN)') + CHANGELOG=$(git cliff --config pyproject.toml --unreleased --tag ${{ env.NEW_TAG }}| sed -n '/^## Changelog/,$p' | grep -vE '^(ERROR|WARN)') echo "CHANGELOG<> $GITHUB_ENV echo "$CHANGELOG" >> $GITHUB_ENV echo "EOF" >> $GITHUB_ENV diff --git a/.mailmap b/.mailmap index ed00506c00f..b238be626b3 100644 --- a/.mailmap +++ b/.mailmap @@ -215,6 +215,8 @@ Stuart Sim ssim Stuart Sim Stuart Sim Stuart Sim Stuart Sim +Swayam Shah Sonu0305 + TARDIS Bot TARDIS Bot tardis-bot TARDIS Bot TARDIS Bot diff --git a/CHANGELOG.md b/CHANGELOG.md index 24dd5ca71a9..47cd288a6ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,20 @@ ## Changelog -### release-2024.12.08 (2024/12/07 20:08) +### release-2025.01.12 (2025/01/11 20:04) +- [2925](https://github.com/tardis-sn/tardis/pull/2925) give tag in git cliff command (2925) (@KasukabeDefenceForce) +- [2923](https://github.com/tardis-sn/tardis/pull/2923) Moves non-physical input check outsode I/O module (2923) (@Sonu0305) +- [2924](https://github.com/tardis-sn/tardis/pull/2924) Post-release 2025.01.05 (2924) (@tardis-bot) +### release-2025.01.05 (2024/12/30 01:37) +- [2915](https://github.com/tardis-sn/tardis/pull/2915) Post-release 2024.12.29 (2915) (@tardis-bot) +### release-2024.12.29 (2024/12/23 04:39) +- [2909](https://github.com/tardis-sn/tardis/pull/2909) Post-release 2024.12.22 (2909) (@tardis-bot) +### release-2024.12.22 (2024/12/16 10:05) +- [2901](https://github.com/tardis-sn/tardis/pull/2901) Update test docs (2901) (@KasukabeDefenceForce) +- [2905](https://github.com/tardis-sn/tardis/pull/2905) Post-release 2024.12.15 (2905) (@tardis-bot) +### release-2024.12.15 (2024/12/09 10:05) +- [2902](https://github.com/tardis-sn/tardis/pull/2902) Fix Benchmarks Workflow (2902) (@atharva-2001) +- [2900](https://github.com/tardis-sn/tardis/pull/2900) Post-release 2024.12.08 (2900) (@tardis-bot) +### release-2024.12.08 (2024/12/04 12:24) - [2898](https://github.com/tardis-sn/tardis/pull/2898) Remove "labeled" from tests workflow triggers (2898) (@KasukabeDefenceForce) - [2865](https://github.com/tardis-sn/tardis/pull/2865) Rates matrix solver (2865) (@andrewfullard) - [2874](https://github.com/tardis-sn/tardis/pull/2874) Remove tau sobolevs from plasma (2874) (@andrewfullard) diff --git a/CITATION.cff b/CITATION.cff index 60311673e68..2a2b4e1bab6 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -3,8 +3,8 @@ cff-version: 1.0.3 message: If you use this software, please cite it using these metadata. # FIXME title as repository name might not be the best name, please make human readable -title: 'tardis-sn/tardis: TARDIS v2024.12.08' -doi: 10.5281/zenodo.14300679 +title: 'tardis-sn/tardis: TARDIS v2025.01.12' +doi: 10.5281/zenodo.14633332 # FIXME splitting of full names is error prone, please check if given/family name are correct authors: - given-names: Wolfgang @@ -347,7 +347,7 @@ authors: - given-names: Atharwa family-names: Kharkar affiliation: -version: release-2024.12.08 -date-released: 2024-12-08 +version: release-2025.01.12 +date-released: 2025-01-12 repository-code: https://github.com/tardis-sn/tardis license: cc-by-4.0 diff --git a/README.rst b/README.rst index b7d8fcd3554..18e8c49bc27 100644 --- a/README.rst +++ b/README.rst @@ -110,14 +110,14 @@ The following BibTeX entries are needed for the references: adsnote = {Provided by the SAO/NASA Astrophysics Data System} } -.. |CITATION| replace:: kerzendorf_2024_14300679 +.. |CITATION| replace:: kerzendorf_2025_14633332 -.. |DOI_BADGE| image:: https://img.shields.io/badge/DOI-10.5281/zenodo.14300679-blue - :target: https://doi.org/10.5281/zenodo.14300679 +.. |DOI_BADGE| image:: https://img.shields.io/badge/DOI-10.5281/zenodo.14633332-blue + :target: https://doi.org/10.5281/zenodo.14633332 .. code-block:: bibtex - @software{kerzendorf_2024_14300679, + @software{kerzendorf_2025_14633332, author = {Kerzendorf, Wolfgang and Sim, Stuart and Vogl, Christian and @@ -225,13 +225,13 @@ The following BibTeX entries are needed for the references: Nayak U, Ashwin and Kumar, Atul and Kharkar, Atharwa}, - title = {tardis-sn/tardis: TARDIS v2024.12.08}, - month = dec, - year = 2024, + title = {tardis-sn/tardis: TARDIS v2025.01.12}, + month = jan, + year = 2025, publisher = {Zenodo}, - version = {release-2024.12.08}, - doi = {10.5281/zenodo.14300679}, - url = {https://doi.org/10.5281/zenodo.14300679} + version = {release-2025.01.12}, + doi = {10.5281/zenodo.14633332}, + url = {https://doi.org/10.5281/zenodo.14633332}, } ******* diff --git a/conda-linux-64.lock b/conda-linux-64.lock index 4a2b4a0db76..cf1aeba84eb 100644 --- a/conda-linux-64.lock +++ b/conda-linux-64.lock @@ -202,7 +202,6 @@ https://conda.anaconda.org/conda-forge/noarch/babel-2.14.0-pyhd8ed1ab_0.conda#96 https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.12.3-pyha770c72_1.conda#d48f7e9fdec44baf6d1da416fe402b04 https://conda.anaconda.org/conda-forge/noarch/bleach-6.1.0-pyhd8ed1ab_0.conda#0ed9d7c0e9afa7c025807a9a8136ea3e https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2#9b347a7ec10940d3f7941ff6c460b551 -https://conda.anaconda.org/conda-forge/linux-64/cairo-1.18.0-h3faef2a_0.conda#f907bb958910dc404647326ca80c263e https://conda.anaconda.org/conda-forge/linux-64/cffi-1.16.0-py312hf06ca03_0.conda#56b0ca764ce23cc54f3f7e2a7b970f6d https://conda.anaconda.org/conda-forge/noarch/comm-0.2.2-pyhd8ed1ab_1.conda#74673132601ec2b7fc592755605f4c1b https://conda.anaconda.org/conda-forge/noarch/commonmark-0.9.1-py_0.tar.bz2#6aa0173c14befcd577ded130cf6f22f5 diff --git a/docs/contributing/development/running_tests.rst b/docs/contributing/development/running_tests.rst index f9438d1dbca..a11ac2eca7e 100644 --- a/docs/contributing/development/running_tests.rst +++ b/docs/contributing/development/running_tests.rst @@ -22,39 +22,38 @@ tests, you can run this with: > pytest tardis -Running the more advanced unit tests requires TARDIS Reference data that can be +Running the more advanced unit tests requires TARDIS Regression data that can be downloaded -(`tardis-refdata `_). +(`tardis-regression-data `_). `Git LFS `_ is used -to download the large refdata files in the tardis-refdata repository. +to download the large regression data files in the tardis-regression-data repository. -However, it is not required to download the entire repository. Firstly it is -important to identify the refdata files that are needed. Sometimes, it is possible -that a preused fixture that is also being used in the current tests is using some -refdata. So, it is advised to check for such cases beforehand. - -After identifying the refdata files to be used in the unit tests, those particular -files can be downloaded using ``git lfs`` .. code-block:: shell - > git lfs pull --include=filename + > git lfs pull + -It is important to maintain the same directory structure as the tardis-refdata repo -i.e. the lfs files should be in the same directory tree exactly as in tardis-refdata -repository. +The ``tardis-regression-data`` repository should be located outside of the main ``tardis`` repository, rather than being nested within it. + +.. warning:: + We have migrated from ``tardis-refdata`` to``tardis-regression-data``. Finally, the tests can be run using the following command .. code-block:: shell - > pytest tardis --tardis-refdata=/path/to/tardis-refdata/ + > pytest tardis --tardis-regression-data=/path/to/tardis-regression-data/ Or, to run tests for a particular file or directory .. code-block:: shell - > pytest tardis/path/to/test_file_or_directory --tardis-refdata=/path/to/tardis-refdata/ + > pytest tardis/path/to/test_file_or_directory --tardis-regression-data=/path/to/tardis-regression-data/ + + +.. warning:: + In some cases you might have to update the regression data. The steps to update the regression data are outlined in the :ref:`update regression-data`. .. warning:: The `tests workflow `_ runs on @@ -74,36 +73,4 @@ You can generate Plasma Reference by the following command: .. code-block:: shell > pytest -rs tardis/plasma/tests/test_complete_plasmas.py - --tardis-refdata="/path/to/tardis-refdata/" --generate-reference - -Running the Integration Tests -============================= - -These tests require reference files against which the results of the various -tardis runs are tested. So you first need to either download the current -reference files (`here `_) -or generate new ones. - -Both of these require a configuration file for the integration tests: - -.. literalinclude:: integration.yml - :language: yaml - -Inside the atomic data directory there needs to be atomic data for each of -the setups that are provided in the ``test_integration`` folder. -If no references are given, the first step is to generate them. -The ``--less-packets`` option is useful for debugging purposes and will just -use very few packets to generate the references and thus make the process much -faster --- THIS IS ONLY FOR DEBUGGING PURPOSES. The ``-s`` option ensures that -TARDIS prints out the progress: - -.. code-block:: shell - - > pytest --integration=integration.yml -m integration --generate-reference --less-packets - -To run the test after having run the ``--generate-references``, all that is -needed is: - -.. code-block:: shell - - > pytest --integration=integration.yml -m integration --less-packets --remote-data + --tardis-regression-data="/path/to/tardis-regression-data/" --generate-reference \ No newline at end of file diff --git a/docs/io/output/how_to_rpacket_tracking.ipynb b/docs/io/output/how_to_rpacket_tracking.ipynb index 622b9f25eae..b9a4b8cd718 100644 --- a/docs/io/output/how_to_rpacket_tracking.ipynb +++ b/docs/io/output/how_to_rpacket_tracking.ipynb @@ -72,9 +72,9 @@ "```yaml\n", "... \n", "montecarlo:\n", - "...\n", - "tracking:\n", - " track_rpacket: true\n", + " ...\n", + " tracking:\n", + " track_rpacket: true\n", "```" ] }, diff --git a/docs/physics/setup/model.ipynb b/docs/physics/setup/model.ipynb index 426d8010ebd..e9ca9fffda8 100644 --- a/docs/physics/setup/model.ipynb +++ b/docs/physics/setup/model.ipynb @@ -81,7 +81,7 @@ "id": "cee054e9", "metadata": {}, "source": [ - "In the cell below, we set up a model. We use the [specific structure](../../io/configuration/components/models/index.rst#specific-structure) where we supply $t_\\mathrm{explosion}$, the velocity of the inner and outer boundaries of the supernova (labeled `start` and `stop`), and the number of shells (labeled `num`). The shells are then evenly spaced between the inner and outer boundaries of the supernova. The time after the explosion, the inner and outer velocities, and the number of shells can be varied to get different shell structures. The `SimulationState` object stores information about the model in the following attributes: `velocity` shows the velocity of the shell boundaries, `v_inner` shows the velocities of the inner boundaries of each shell, `v_outer` shows the velocity of the outer boundaries of each shell, and `v_middle` shows the velocity of the middle of each shell. Similarly, `radius`, `r_inner`, `r_outer`, and `r_middle` show the radii of each shell boundary, the inner boundaries, the outer boundaries, and the middles of each shell, respectively. `v_boundary_inner` shows the velocity of the inner boundary of the computational domain, and `v_boundary_outer` shows the velocity of the outer boundary of the computational domain. Finally, `volume` shows the volume of each shell, calculated via the formula of the volume of a spherical shell: $V=\\frac{4}{3}\\pi (r_\\mathrm{outer}^3-r_\\mathrm{inner}^3)$." + "In the cell below, we set up a model. We use the [specific structure](../../io/configuration/components/models/index.rst#specific-structure) where we supply $t_\\mathrm{explosion}$, the velocity of the inner and outer boundaries of the supernova (labeled `start` and `stop`), and the number of shells (labeled `num`). The shells are then evenly spaced between the inner and outer boundaries of the supernova. The time after the explosion, the inner and outer velocities, and the number of shells can be varied to get different shell structures. The `SimulationState` object stores information about the model in the following attributes: `velocity` shows the velocity of the shell boundaries, `v_inner` shows the velocities of the inner boundaries of each shell, `v_outer` shows the velocity of the outer boundaries of each shell, and `v_middle` shows the velocity of the middle of each shell. Similarly, `radius`, `r_inner`, `r_outer`, and `r_middle` show the radii of each shell boundary, the inner boundaries, the outer boundaries, and the middles of each shell, respectively. `v_inner_boundary` shows the velocity of the inner boundary of the computational domain, and `v_outer_boundary` shows the velocity of the outer boundary of the computational domain. Finally, `volume` shows the volume of each shell, calculated via the formula of the volume of a spherical shell: $V=\\frac{4}{3}\\pi (r_\\mathrm{outer}^3-r_\\mathrm{inner}^3)$." ] }, { @@ -111,8 +111,8 @@ "print('v_inner:\\n', shell_simulation_state.v_inner)\n", "print('v_outer:\\n', shell_simulation_state.v_outer)\n", "print('v_middle:\\n', shell_simulation_state.v_middle)\n", - "print('v_boundary_inner:\\n', shell_simulation_state.v_boundary_inner)\n", - "print('v_boundary_outer:\\n', shell_simulation_state.v_boundary_outer)\n", + "print('v_inner_boundary:\\n', shell_simulation_state.v_inner_boundary)\n", + "print('v_outer_boundary:\\n', shell_simulation_state.v_outer_boundary)\n", "print('radius:\\n', shell_simulation_state.radius)\n", "print('r_inner:\\n', shell_simulation_state.r_inner)\n", "print('r_outer:\\n', shell_simulation_state.r_outer)\n", @@ -125,7 +125,7 @@ "id": "1ee56110", "metadata": {}, "source": [ - "Notice that `radius = velocity*time_explosion`, and similarly for `r_inner`, `r_outer`, and `r_middle`. You can get the radius of the photosphere via `v_boundary_inner*time_explosion` and outer edge of the supernova via `v_boundary_outer*time_explosion`.\n", + "Notice that `radius = velocity*time_explosion`, and similarly for `r_inner`, `r_outer`, and `r_middle`. You can get the radius of the photosphere via `v_inner_boundary*time_explosion` and outer edge of the supernova via `v_boundary_outer*time_explosion`.\n", "\n", "
\n", " \n", diff --git a/docs/quickstart.ipynb b/docs/quickstart.ipynb index 4479a42c655..3e9669dbd96 100644 --- a/docs/quickstart.ipynb +++ b/docs/quickstart.ipynb @@ -17,7 +17,7 @@ "\n", "## Atomic Data\n", "\n", - "We recommend using the [kurucz_cd23_chianti_H_He.h5](https://dev.azure.com/tardis-sn/TARDIS/_apis/git/repositories/tardis-refdata/items?path=atom_data/kurucz_cd23_chianti_H_He.h5&resolveLfs=true) dataset." + "We recommend using the [kurucz_cd23_chianti_H_He.h5](https://github.com/tardis-sn/tardis-regression-data/raw/main/atom_data/kurucz_cd23_chianti_H_He.h5) dataset." ] }, { diff --git a/docs/resources/credits.rst b/docs/resources/credits.rst index 5672a9b6928..22ef542db45 100644 --- a/docs/resources/credits.rst +++ b/docs/resources/credits.rst @@ -74,14 +74,14 @@ The following BibTeX entries are needed for the references: adsnote = {Provided by the SAO/NASA Astrophysics Data System} } -.. |CITATION| replace:: kerzendorf_2024_14300679 +.. |CITATION| replace:: kerzendorf_2025_14633332 -.. |DOI_BADGE| image:: https://img.shields.io/badge/DOI-10.5281/zenodo.14300679-blue - :target: https://doi.org/10.5281/zenodo.14300679 +.. |DOI_BADGE| image:: https://img.shields.io/badge/DOI-10.5281/zenodo.14633332-blue + :target: https://doi.org/10.5281/zenodo.14633332 .. code-block:: bibtex - @software{kerzendorf_2024_14300679, + @software{kerzendorf_2025_14633332, author = {Kerzendorf, Wolfgang and Sim, Stuart and Vogl, Christian and @@ -189,12 +189,12 @@ The following BibTeX entries are needed for the references: Nayak U, Ashwin and Kumar, Atul and Kharkar, Atharwa}, - title = {tardis-sn/tardis: TARDIS v2024.12.08}, - month = dec, - year = 2024, + title = {tardis-sn/tardis: TARDIS v2025.01.12}, + month = jan, + year = 2025, publisher = {Zenodo}, - version = {release-2024.12.08}, - doi = {10.5281/zenodo.14300679}, - url = {https://doi.org/10.5281/zenodo.14300679} + version = {release-2025.01.12}, + doi = {10.5281/zenodo.14633332}, + url = {https://doi.org/10.5281/zenodo.14633332}, } diff --git a/docs/workflows/v_inner_solver_workflow.ipynb b/docs/workflows/v_inner_solver_workflow.ipynb new file mode 100644 index 00000000000..e1cf9507447 --- /dev/null +++ b/docs/workflows/v_inner_solver_workflow.ipynb @@ -0,0 +1,128 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from tardis.workflows.v_inner_solver import InnerVelocitySolverWorkflow\n", + "from tardis.io.configuration.config_reader import Configuration" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config = Configuration.from_yaml('../tardis_example.yml')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This code modifies the TARDIS example configuration to include convergence information for the inner boundary velocity solver.\n", + "Note that the number of shells is increased and the starting velocity is reduced to provide more granularity and a wider search window, respectively." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "from astropy import units as u\n", + "\n", + "config.montecarlo.convergence_strategy['v_inner_boundary'] = {\n", + " 'damping_constant' : 0.5,\n", + " 'threshold' : 0.01,\n", + " 'type' : 'damped'\n", + " }\n", + "\n", + "config.montecarlo.convergence_strategy.stop_if_converged = True\n", + "config.model.structure.velocity.start = 5000 * u.km/u.s # Larger window over which to search\n", + "config.model.structure.velocity.num = 50 # Increase number of shells\n", + "\n", + "workflow = InnerVelocitySolverWorkflow(\n", + " config, tau=2.0/3,\n", + " mean_optical_depth=\"rosseland\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "workflow.run()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "spectrum = workflow.spectrum_solver.spectrum_real_packets\n", + "spectrum_virtual = workflow.spectrum_solver.spectrum_virtual_packets\n", + "spectrum_integrated = workflow.spectrum_solver.spectrum_integrated" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%matplotlib inline\n", + "plt.figure(figsize=(10, 6.5))\n", + "\n", + "spectrum.plot(label=\"Normal packets\")\n", + "spectrum_virtual.plot(label=\"Virtual packets\")\n", + "spectrum_integrated.plot(label='Formal integral')\n", + "\n", + "plt.xlim(500, 9000)\n", + "plt.title(\"TARDIS example model spectrum\")\n", + "plt.xlabel(r\"Wavelength [$\\AA$]\")\n", + "plt.ylabel(r\"Luminosity density [erg/s/$\\AA$]\")\n", + "plt.legend()\n", + "plt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "tardis", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/pyproject.toml b/pyproject.toml index da196d6c396..17ad0e69cba 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -190,5 +190,5 @@ owner = "tardis-sn" repo = "tardis" [tool.codespell] -skip = "*.png,*.ggb,*.jpg,*.gif,*.ico,docs/contributing/CHANGELOG.md,docs/tardis.bib" +skip = "*.ipynb,*.png,*.ggb,*.jpg,*.gif,*.ico,docs/contributing/CHANGELOG.md,docs/tardis.bib,docs/resources/research_done_using_TARDIS/research_papers.rst" quiet-level = 3 \ No newline at end of file diff --git a/tardis/io/atom_data/base.py b/tardis/io/atom_data/base.py index 40acbe0b233..028083a9761 100644 --- a/tardis/io/atom_data/base.py +++ b/tardis/io/atom_data/base.py @@ -448,8 +448,11 @@ def prepare_lines(self): self.selected_atomic_numbers, level="atomic_number" ) ] - - self.lines = self.lines.sort_values(by="wavelength") + # see https://github.com/numpy/numpy/issues/27725#issuecomment-2465471648 + # with kind="stable" the returned array will maintain the relative order of a values which compare as equal. + # this is important especially after numpy v2 release + # https://numpy.org/doc/stable/release/2.0.0-notes.html#minor-changes-in-behavior-of-sorting-functions + self.lines = self.lines.sort_values(by=["wavelength", "line_id"], kind="stable") def prepare_line_level_indexes(self): levels_index = pd.Series( diff --git a/tardis/io/configuration/config_reader.py b/tardis/io/configuration/config_reader.py index c4efb949031..bfed003e583 100644 --- a/tardis/io/configuration/config_reader.py +++ b/tardis/io/configuration/config_reader.py @@ -431,7 +431,7 @@ def parse_convergence_section(convergence_section_dict): """ convergence_parameters = ["damping_constant", "threshold", "type"] - for convergence_variable in ["t_inner", "t_rad", "w"]: + for convergence_variable in ["t_inner", "t_rad", "w", "v_inner_boundary"]: if convergence_variable not in convergence_section_dict: convergence_section_dict[convergence_variable] = {} convergence_variable_section = convergence_section_dict[ diff --git a/tardis/io/configuration/schemas/montecarlo_definitions.yml b/tardis/io/configuration/schemas/montecarlo_definitions.yml index 1adf22599ae..b79a4b7da00 100644 --- a/tardis/io/configuration/schemas/montecarlo_definitions.yml +++ b/tardis/io/configuration/schemas/montecarlo_definitions.yml @@ -5,7 +5,6 @@ definitions: title: 'Damped Convergence Strategy' type: object additionalProperties: false - properties: properties: type: enum: @@ -59,6 +58,24 @@ definitions: type: string default: 'damped' description: THIS IS A DUMMY VARIABLE DO NOT USE + v_inner_boundary: + type: object + additionalProperties: false + properties: + damping_constant: + type: number + default: 0.0 + description: damping constant + minimum: 0 + threshold: + type: number + description: specifies the threshold that is taken as convergence (i.e. + 0.05 means that the value does not change more than 5%) + minimum: 0 + type: + type: string + default: 'damped' + description: THIS IS A DUMMY VARIABLE DO NOT USE t_rad: type: object additionalProperties: false diff --git a/tardis/io/model/parse_geometry_configuration.py b/tardis/io/model/parse_geometry_configuration.py index c517a4a7ea4..e2f0c8fd20f 100644 --- a/tardis/io/model/parse_geometry_configuration.py +++ b/tardis/io/model/parse_geometry_configuration.py @@ -135,17 +135,17 @@ def parse_geometry_from_csvy( """ if hasattr(config, "model"): if hasattr(config.model, "v_inner_boundary"): - v_boundary_inner = config.model.v_inner_boundary + v_inner_boundary = config.model.v_inner_boundary else: - v_boundary_inner = None + v_inner_boundary = None if hasattr(config.model, "v_outer_boundary"): - v_boundary_outer = config.model.v_outer_boundary + v_outer_boundary = config.model.v_outer_boundary else: - v_boundary_outer = None + v_outer_boundary = None else: - v_boundary_inner = None - v_boundary_outer = None + v_inner_boundary = None + v_outer_boundary = None if hasattr(csvy_model_config, "velocity"): velocity = quantity_linspace( @@ -166,8 +166,8 @@ def parse_geometry_from_csvy( geometry = HomologousRadial1DGeometry( velocity[:-1], # v_inner velocity[1:], # v_outer - v_inner_boundary=v_boundary_inner, - v_outer_boundary=v_boundary_outer, + v_inner_boundary=v_inner_boundary, + v_outer_boundary=v_outer_boundary, time_explosion=time_explosion, ) return geometry diff --git a/tardis/io/model/parse_radiation_field_configuration.py b/tardis/io/model/parse_radiation_field_configuration.py index d0ff62badc9..11b878ed2e8 100644 --- a/tardis/io/model/parse_radiation_field_configuration.py +++ b/tardis/io/model/parse_radiation_field_configuration.py @@ -8,6 +8,7 @@ parse_structure_from_config, ) from tardis.plasma.radiation_field import DilutePlanckianRadiationField +from tardis.radiation_field.validate_radiation_field import validate_radiative_temperature logger = logging.getLogger(__name__) @@ -113,12 +114,7 @@ def parse_radiation_field_state_from_csvy( geometry, packet_source ) - if np.any(t_radiative < 1000 * u.K): - logging.critical( - "Radiative temperature is too low in some of the shells, temperatures below 1000K " - f"(e.g., T_rad = {t_radiative[np.argmin(t_radiative)]} in shell {np.argmin(t_radiative)} in your model) " - "are not accurately handled by TARDIS.", - ) + validate_radiative_temperature(t_radiative) if hasattr(csvy_model_data, "columns") and ( "dilution_factor" in csvy_model_data.columns diff --git a/tardis/io/model/readers/generic_readers.py b/tardis/io/model/readers/generic_readers.py index 96a4bfd2077..e2ca77c1f2a 100644 --- a/tardis/io/model/readers/generic_readers.py +++ b/tardis/io/model/readers/generic_readers.py @@ -48,8 +48,8 @@ def read_simple_ascii_density( fname, skip_header=1, names=("index", "velocity", "density"), - dtype=None, - encoding=None, + dtype=[('index', 'i8'), ('velocity', 'f8'), ('density', 'f8')], + encoding='utf-8', ) velocity = (data["velocity"] * u.km / u.s).to("cm/s") mean_density = (data["density"] * u.Unit("g/cm^3"))[1:] diff --git a/tardis/model/base.py b/tardis/model/base.py index b3f3ae98851..960c47a9455 100644 --- a/tardis/model/base.py +++ b/tardis/model/base.py @@ -58,11 +58,11 @@ class SimulationState(HDFWriterMixin): dilution_factor : np.ndarray If None, the dilution_factor will be initialized with the geometric dilution factor. - v_boundary_inner : astropy.units.Quantity - v_boundary_outer : astropy.units.Quantity + v_inner_boundary : astropy.units.Quantity + v_outer_boundary : astropy.units.Quantity raw_velocity : np.ndarray The complete array of the velocities, without being cut by - `v_boundary_inner` and `v_boundary_outer` + `v_inner_boundary` and `v_outer_boundary` electron_densities : astropy.units.quantity.Quantity Attributes @@ -81,8 +81,8 @@ class SimulationState(HDFWriterMixin): density : astropy.units.quantity.Quantity volume : astropy.units.quantity.Quantity no_of_shells : int - The number of shells as formed by `v_boundary_inner` and - `v_boundary_outer` + The number of shells as formed by `v_inner_boundary` and + `v_outer_boundary` no_of_raw_shells : int """ @@ -206,11 +206,11 @@ def radius(self): return self.time_explosion * self.velocity @property - def v_boundary_inner(self): + def v_inner_boundary(self): return self.geometry.v_inner_boundary @property - def v_boundary_outer(self): + def v_outer_boundary(self): return self.geometry.v_outer_boundary @property diff --git a/tardis/opacities/tests/__init__.py b/tardis/opacities/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tardis/radiation_field/validate_radiation_field.py b/tardis/radiation_field/validate_radiation_field.py new file mode 100644 index 00000000000..3d0a71f9b08 --- /dev/null +++ b/tardis/radiation_field/validate_radiation_field.py @@ -0,0 +1,31 @@ +import logging +import numpy as np +from astropy import units as u + +logger = logging.getLogger(__name__) + +def validate_radiative_temperature(t_radiative): + """ + Validates the radiative temperature to ensure it is physically reasonable. + + Parameters + ---------- + t_radiative : Quantity + The radiative temperature array. + + Raises + ------ + ValueError + If any radiative temperature is below 1000 K. + """ + if np.any(t_radiative < 1000 * u.K): + min_t_rad = t_radiative[np.argmin(t_radiative)] + min_shell = np.argmin(t_radiative) + logging.critical( + "Radiative temperature is too low in some of the shells, temperatures below 1000K " + f"(e.g., T_rad = {min_t_rad} in shell {min_shell} in your model) " + "are not accurately handled by TARDIS." + ) + raise ValueError( + f"Radiative temperature below 1000 K detected: T_rad = {min_t_rad} in shell {min_shell}." + ) diff --git a/tardis/spectrum/formal_integral.py b/tardis/spectrum/formal_integral.py index 18dfedb5975..12c0ca56313 100644 --- a/tardis/spectrum/formal_integral.py +++ b/tardis/spectrum/formal_integral.py @@ -434,9 +434,22 @@ def make_source_function(self): ------- Numpy array containing ( 1 - exp(-tau_ul) ) S_ul ordered by wavelength of the transition u -> l """ + simulation_state = self.simulation_state + # slice for the active shells + local_slice = slice( + simulation_state.geometry.v_inner_boundary_index, + simulation_state.geometry.v_outer_boundary_index, + ) + transport = self.transport montecarlo_transport_state = transport.transport_state + transition_probabilities = self.opacity_state.transition_probabilities[ + :, local_slice + ] + tau_sobolevs = self.opacity_state.tau_sobolev[:, local_slice] + + columns = range(simulation_state.no_of_shells) # macro_ref = self.atomic_data.macro_atom_references macro_ref = self.atomic_data.macro_atom_references @@ -449,9 +462,7 @@ def make_source_function(self): if transport.line_interaction_type == "macroatom": internal_jump_mask = (macro_data.transition_type >= 0).values ma_int_data = macro_data[internal_jump_mask] - internal = self.opacity_state.transition_probabilities[ - internal_jump_mask - ] + internal = transition_probabilities[internal_jump_mask] source_level_idx = ma_int_data.source_level_idx.values destination_level_idx = ma_int_data.destination_level_idx.values @@ -460,7 +471,7 @@ def make_source_function(self): montecarlo_transport_state.packet_collection.time_of_simulation * simulation_state.volume ) - exptau = 1 - np.exp(-self.opacity_state.tau_sobolev) + exptau = 1 - np.exp(-tau_sobolevs) Edotlu = ( Edotlu_norm_factor * exptau @@ -493,14 +504,14 @@ def make_source_function(self): upper_level_index = self.atomic_data.lines.index.droplevel( "level_number_lower" ) - e_dot_lu = pd.DataFrame(Edotlu.value, index=upper_level_index) + e_dot_lu = pd.DataFrame( + Edotlu.value, index=upper_level_index, columns=columns + ) e_dot_u = e_dot_lu.groupby(level=[0, 1, 2]).sum() e_dot_u_src_idx = macro_ref.loc[e_dot_u.index].references_idx.values if transport.line_interaction_type == "macroatom": - C_frame = pd.DataFrame( - columns=np.arange(no_shells), index=macro_ref.index - ) + C_frame = pd.DataFrame(columns=columns, index=macro_ref.index) q_indices = (source_level_idx, destination_level_idx) for shell in range(no_shells): Q = sp.coo_matrix( @@ -523,7 +534,7 @@ def make_source_function(self): ["atomic_number", "ion_number", "source_level_number"] ).index.copy() tmp = pd.DataFrame( - self.opacity_state.transition_probabilities[ + transition_probabilities[ (self.atomic_data.macro_atom_data.transition_type == -1).values ] ) @@ -539,13 +550,15 @@ def make_source_function(self): att_S_ul = wave * (q_ul * e_dot_u) * t / (4 * np.pi) result = pd.DataFrame( - att_S_ul.values, index=transitions.transition_line_id.values + att_S_ul.values, + index=transitions.transition_line_id.values, + columns=columns, ) att_S_ul = result.loc[lines.index.values].values # Jredlu should already by in the correct order, i.e. by wavelength of # the transition l->u (similar to Jbluelu) - Jredlu = Jbluelu * np.exp(-self.opacity_state.tau_sobolev) + att_S_ul + Jredlu = Jbluelu * np.exp(-tau_sobolevs) + att_S_ul if self.interpolate_shells > 0: ( att_S_ul, @@ -592,7 +605,9 @@ def interpolate_integrator_quantities( transport.electron_densities_integ = interp1d( r_middle, - plasma.electron_densities, + plasma.electron_densities.iloc[ + self.simulation_state.geometry.v_inner_boundary_index : self.simulation_state.geometry.v_outer_boundary_index + ], fill_value="extrapolate", kind="nearest", )(r_middle_integ) @@ -600,7 +615,10 @@ def interpolate_integrator_quantities( # (as in the MC simulation) transport.tau_sobolevs_integ = interp1d( r_middle, - self.opacity_state.tau_sobolev, + self.opacity_state.tau_sobolev[ + :, + self.simulation_state.geometry.v_inner_boundary_index : self.simulation_state.geometry.v_outer_boundary_index, + ], fill_value="extrapolate", kind="nearest", )(r_middle_integ) diff --git a/tardis/transport/montecarlo/tests/test_rpacket_tracker.py b/tardis/transport/montecarlo/tests/test_rpacket_tracker.py index 971183d2360..9317089a65e 100644 --- a/tardis/transport/montecarlo/tests/test_rpacket_tracker.py +++ b/tardis/transport/montecarlo/tests/test_rpacket_tracker.py @@ -135,10 +135,9 @@ def test_rpacket_tracker_properties(expected, obtained, request): def test_boundary_interactions(rpacket_tracker, regression_data): no_of_packets = len(rpacket_tracker) - # Hard coding the number of columns - # Based on the largest size of boundary_interaction array (60) + max_boundary_interaction_size = max([tracker.boundary_interaction.size for tracker in rpacket_tracker]) obtained_boundary_interaction = np.full( - (no_of_packets, 64), + (no_of_packets, max_boundary_interaction_size), [-1], dtype=rpacket_tracker[0].boundary_interaction.dtype, ) diff --git a/tardis/workflows/simple_tardis_workflow.py b/tardis/workflows/simple_tardis_workflow.py index 18e48ead456..9abb21be7fe 100644 --- a/tardis/workflows/simple_tardis_workflow.py +++ b/tardis/workflows/simple_tardis_workflow.py @@ -268,6 +268,8 @@ def solve_simulation_state( "t_inner" ] + return next_values + def solve_plasma(self, estimated_radfield_properties): """Update the plasma solution with the new radiation field estimates diff --git a/tardis/workflows/util.py b/tardis/workflows/util.py new file mode 100644 index 00000000000..43aee66989d --- /dev/null +++ b/tardis/workflows/util.py @@ -0,0 +1,96 @@ +import numpy as np +from astropy import constants as const +from astropy import units as u + + +def get_tau_integ(plasma, opacity_state, simulation_state, bin_size=10): + """Estimate the integrated mean optical depth at each velocity bin + + Parameters + ---------- + plasma : tardis.plasma.BasePlasma + The tardis legacy plasma + simulation_state : tardis.model.base.SimulationState + the current simulation state + bin_size : int, optional. Default : 10 + bin size for the aggregation of line opacities + + Returns + ------- + dict + rosseland : np.ndarray + Roassland Mean Optical Depth + planck : np.ndarray + Planck Mean Optical Depth + """ + index = plasma.atomic_data.lines.nu.index + freqs = plasma.atomic_data.lines.nu.values * u.Hz + order = np.argsort(freqs) + freqs = freqs[order] + taus = opacity_state.tau_sobolev.values[order] + + check_bin_size = True + while check_bin_size: + extra = bin_size - len(freqs) % bin_size + extra_freqs = (np.arange(extra + 1) + 1) * u.Hz + extra_taus = np.zeros((extra + 1, taus.shape[1])) + freqs = np.hstack((extra_freqs, freqs)) + taus = np.vstack((extra_taus, taus)) + + bins_low = freqs[:-bin_size:bin_size] + bins_high = freqs[bin_size::bin_size] + delta_nu = bins_high - bins_low + n_bins = len(delta_nu) + + if np.any(delta_nu == 0): + bin_size += 2 + else: + check_bin_size = False + + taus = taus[1 : n_bins * bin_size + 1] + freqs = freqs[1 : n_bins * bin_size + 1] + + ct = simulation_state.time_explosion * const.c + t_rad = simulation_state.radiation_field_state.temperature + + def B(nu, T): + return ( + 2 + * const.h + * nu**3 + / const.c**2 + / (np.exp(const.h * nu / (const.k_B * T)) - 1) + ) + + def U(nu, T): + return ( + B(nu, T) ** 2 * (const.c / nu) ** 2 * (2 * const.k_B * T**2) ** -1 + ) + + kappa_exp = ( + (bins_low / delta_nu).reshape(-1, 1) + / ct + * (1 - np.exp(-taus.reshape(n_bins, bin_size, -1))).sum(axis=1) + ) + kappa_thom = plasma.electron_densities.values * u.cm ** (-3) * const.sigma_T + Bdnu = B(bins_low.reshape(-1, 1), t_rad.reshape(1, -1)) * delta_nu.reshape( + -1, 1 + ) + kappa_planck = kappa_thom + (Bdnu * kappa_exp).sum(axis=0) / ( + Bdnu.sum(axis=0) + ) + + udnu = U(bins_low.reshape(-1, 1), t_rad.reshape(1, -1)) * delta_nu.reshape( + -1, 1 + ) + kappa_tot = kappa_thom + kappa_exp + kappa_rosseland = ( + (udnu * kappa_tot**-1).sum(axis=0) / (udnu.sum(axis=0)) + ) ** -1 + + dr = simulation_state.geometry.r_outer - simulation_state.geometry.r_inner + dtau = kappa_planck * dr + planck_integ_tau = np.cumsum(dtau[::-1])[::-1] + rosseland_integ_tau = np.cumsum((kappa_rosseland * dr)[::-1])[::-1] + + return {"rosseland": rosseland_integ_tau, "planck": planck_integ_tau} diff --git a/tardis/workflows/v_inner_solver.py b/tardis/workflows/v_inner_solver.py new file mode 100644 index 00000000000..20edd13c8c9 --- /dev/null +++ b/tardis/workflows/v_inner_solver.py @@ -0,0 +1,360 @@ +import logging + +import numpy as np +import pandas as pd +from astropy import units as u +from scipy.interpolate import interp1d + +from tardis.plasma.radiation_field import DilutePlanckianRadiationField +from tardis.simulation.convergence import ConvergenceSolver +from tardis.workflows.simple_tardis_workflow import SimpleTARDISWorkflow +from tardis.workflows.util import get_tau_integ + +# logging support +logger = logging.getLogger(__name__) + +# TODO: +# Option to estimate initial v_inner from electron opacity +# Add option for desired optical depth +# Think about csvy vs other formats for specifying grid +# Handle non-explicit formats when going out of the simulation + + +class InnerVelocitySolverWorkflow(SimpleTARDISWorkflow): + TAU_TARGET = np.log(2.0 / 3.0) + + def __init__(self, configuration, mean_optical_depth="rosseland", tau=None): + super().__init__(configuration) + self.mean_optical_depth = mean_optical_depth.lower() + + self.convergence_solvers["v_inner_boundary"] = ConvergenceSolver( + self.convergence_strategy.v_inner_boundary + ) + + # Need to compute the opacity state on init to get the optical depths + # for the first inner boundary calculation. + self.opacity_states = self.solve_opacity() + + if tau is not None: + self.TAU_TARGET = np.log(tau) + + initial_v_inner = self.estimate_v_inner() + + self.simulation_state.geometry.v_inner_boundary = initial_v_inner + self.simulation_state.blackbody_packet_source.radius = ( + self.simulation_state.r_inner[0] + ) + + def estimate_v_inner(self): + """ + Compute the Rosseland Mean Optical Depth, + Estimate location where v_inner makes t=2/3 (or target) + Extrapolate with exponential fits + + Need some way to return and inspect the optical depths for later logging + """ + tau_integ = np.log( + get_tau_integ( + self.plasma_solver, + self.opacity_states["opacity_state"], + self.simulation_state, + )[self.mean_optical_depth] + ) + + interpolator = interp1d( + tau_integ[self.simulation_state.geometry.v_inner_boundary_index:], + self.simulation_state.geometry.v_inner_active, # Only use the active values as we only need a numerical estimate, not an index + fill_value="extrapolate", + ) + + estimated_v_inner = interpolator(self.TAU_TARGET) * u.cm / u.s + + if estimated_v_inner < self.simulation_state.geometry.v_inner[0]: + estimated_v_inner = self.simulation_state.geometry.v_inner[0] + logger.warning( + "WARNING: v_inner_boundary outside of simulation, setting to first shell" + ) + elif estimated_v_inner > self.simulation_state.geometry.v_inner[-1]: + estimated_v_inner = self.simulation_state.geometry.v_inner[-1] + logger.warning( + "WARNING: v_inner_boundary outside of simulation, setting to last shell" + ) + + return estimated_v_inner + + @property + def property_mask(self): + mask = np.zeros( + (len(self.simulation_state.geometry.r_inner)), dtype=bool + ) + mask[ + self.simulation_state.geometry.v_inner_boundary_index : self.simulation_state.geometry.v_outer_boundary_index + ] = True + return mask + + def get_convergence_estimates(self, transport_state): + """Compute convergence estimates from the transport state + + Parameters + ---------- + transport_state : MonteCarloTransportState + Transport state object to compute estimates + + Returns + ------- + dict + Convergence estimates + EstimatedRadiationFieldProperties + Dilute radiation file and j_blues dataclass + """ + estimates = super().get_convergence_estimates(transport_state) + + estimated_v_inner = self.estimate_v_inner() + + estimates[0].update( + {"v_inner_boundary": estimated_v_inner, "mask": self.property_mask} + ) + + return estimates + + def reproject(self, array_one, mask_one, array_two, mask_two): + """Reprojects two sub_arrays defined by a set of masks onto an array where the masks of both objects are true + + let A1, A2 be arrays of size gemetry.no_of_shells and + a1 = A1[mask_one], + a2 = A2[mask_two] + find a1*, a2* s.t. + a1* = A1[mask_one & mask_two], + a2* = A2[mask_one & mask_two] + this is equivalent to + a1* = A1[mask_one][mask_two[mask_one]] = a1[mask_two[mask_one]], + a2* = A2[mask_two][mask_one[mask_two]] = a2[mask_one[mask_two]] + + Parameters + ---------- + array1 : np.ndarray + A sub array of an array with the shape of a geometry property + mask_one : np.ndarray(bool) + Mask such that the parrent array accessed at this mask gives a1 + array_two : np.ndarray + A sub array of an array with the shape of a geometry property + mask_two : np.ndarray(bool) + Mask such that the parrent array accessed at this mask gives a2 + + Returns + ------- + array_one* + reprojection of array_one onto mask_one & mask_two + array_two* + reprojection of array_two onto mask_one & mask_two + """ + return array_one[mask_two[mask_one]], array_two[mask_one[mask_two]] + + def print_mask(self, mask): + return "".join([{True: "-", False: "X"}[m] for m in mask]).join("[]") + + def check_convergence( + self, + estimated_values, + ): + """Check convergence status for a dict of estimated values + + Parameters + ---------- + estimated_values : dict + Estimates to check convergence + + Returns + ------- + bool + If convergence has occurred + """ + convergence_statuses = [] + + mask = estimated_values["mask"] + + if not np.all(mask == self.property_mask): + convergence_statuses.append(False) + # Need to set status to False if change in mask size + logger.info( + f"Resized Geometry, Convergence Suppressed\n" + f"\t Old Geometry: {self.print_mask(mask)}\n" + f"\t New Geometry: {self.print_mask(self.property_mask)}" + ) + + for key, solver in self.convergence_solvers.items(): + current_value = getattr(self.simulation_state, key) + estimated_value = estimated_values[key] + + if key in ["t_radiative", "dilution_factor"]: + current_value, estimated_value = self.reproject( + current_value, self.property_mask, estimated_value, mask + ) + no_of_shells = current_value.shape[0] + else: + no_of_shells = 1 + + convergence_statuses.append( + solver.get_convergence_status( + current_value, estimated_value, no_of_shells + ) + ) + + if np.all(convergence_statuses): + hold_iterations = self.convergence_strategy.hold_iterations + self.consecutive_converges_count += 1 + logger.info( + f"Iteration converged {self.consecutive_converges_count:d}/{(hold_iterations + 1):d} consecutive " + f"times." + ) + + return self.consecutive_converges_count == hold_iterations + 1 + + self.consecutive_converges_count = 0 + return False + + def solve_simulation_state(self, estimated_values): + """Update the simulation state with new inputs computed from previous + iteration estimates. + + Parameters + ---------- + estimated_values : dict + Estimated from the previous iterations + + Returns + ------- + next_values : dict + The next values assigned to the simulation state + """ + next_values = super().solve_simulation_state(estimated_values) + self.simulation_state.geometry.v_inner_boundary = next_values[ + "v_inner_boundary" + ] + self.simulation_state.blackbody_packet_source.radius = ( + self.simulation_state.r_inner[0] + ) + + return next_values + + def solve_plasma( + self, + estimated_radfield_properties, + mask, + ): + """Update the plasma solution with the new radiation field estimates + + Parameters + ---------- + estimated_radfield_properties : EstimatedRadiationFieldProperties + The radiation field properties to use for updating the plasma + radiation_field: tardis.plasma.radiation_field.RadiationField + Current radiation field object from the last iteration + + Raises + ------ + ValueError + If the plasma solver radiative rates type is unknown + """ + radiation_field = DilutePlanckianRadiationField( + temperature=self.simulation_state.radiation_field_state.temperature, + dilution_factor=self.simulation_state.radiation_field_state.dilution_factor, + ) + update_properties = dict( + dilute_planckian_radiation_field=radiation_field + ) + # A check to see if the plasma is set with JBluesDetailed, in which + # case it needs some extra kwargs. + if ( + self.plasma_solver.plasma_solver_settings.RADIATIVE_RATES_TYPE + == "blackbody" + ): + planckian_radiation_field = ( + radiation_field.to_planckian_radiation_field() + ) + j_blues = planckian_radiation_field.calculate_mean_intensity( + self.plasma_solver.atomic_data.lines.nu.values + ) + update_properties["j_blues"] = pd.DataFrame( + j_blues, index=self.plasma_solver.atomic_data.lines.index + ) + elif ( + self.plasma_solver.plasma_solver_settings.RADIATIVE_RATES_TYPE + == "dilute-blackbody" + ): + j_blues = radiation_field.calculate_mean_intensity( + self.plasma_solver.atomic_data.lines.nu.values + ) + update_properties["j_blues"] = pd.DataFrame( + j_blues, index=self.plasma_solver.atomic_data.lines.index + ) + elif ( + self.plasma_solver.plasma_solver_settings.RADIATIVE_RATES_TYPE + == "detailed" + ): + j_blues = radiation_field.calculate_mean_intensity( + self.plasma_solver.atomic_data.lines.nu.values + ) + j_blues[:, mask] = estimated_radfield_properties.j_blues + update_properties["j_blues"] = pd.DataFrame( + j_blues, + index=self.plasma_solver.atomic_data.lines.index, + ) + else: + raise ValueError( + f"radiative_rates_type type unknown - {self.plasma.plasma_solver_settings.RADIATIVE_RATES_TYPE}" + ) + + self.plasma_solver.update(**update_properties) + + def run(self): + """Run the TARDIS simulation until convergence is reached""" + converged = False + while self.completed_iterations < self.total_iterations - 1: + logger.info( + f"\n\tStarting iteration {(self.completed_iterations + 1):d} of {self.total_iterations:d}" + ) + + # Note that we are updating the class attribute here to ensure consistency + self.opacity_states = self.solve_opacity() + + transport_state, virtual_packet_energies = self.solve_montecarlo( + self.opacity_states, self.real_packet_count + ) + + ( + estimated_values, + estimated_radfield_properties, + ) = self.get_convergence_estimates(transport_state) + + self.solve_simulation_state(estimated_values) + + self.solve_plasma( + estimated_radfield_properties, + estimated_values["mask"], + ) + + converged = self.check_convergence(estimated_values) + + self.completed_iterations += 1 + + if converged and self.convergence_strategy.stop_if_converged: + break + + if converged: + logger.info("\n\tStarting final iteration") + else: + logger.error( + "\n\tITERATIONS HAVE NOT CONVERGED, starting final iteration" + ) + self.opacity_states = self.solve_opacity() + transport_state, virtual_packet_energies = self.solve_montecarlo( + self.opacity_states, + self.final_iteration_packet_count, + self.virtual_packet_count, + ) + self.initialize_spectrum_solver( + transport_state, + self.opacity_states, + virtual_packet_energies, + )