diff --git a/.github/ci/min-core-deps.yml b/.github/ci/min-core-deps.yml index 25eec60d9c..d6c835ec5b 100644 --- a/.github/ci/min-core-deps.yml +++ b/.github/ci/min-core-deps.yml @@ -8,7 +8,6 @@ dependencies: # Run ci/min_deps_check.py to verify that this file respects the policy. - python=3.10 - cftime=1.6 - - cgen=2020.1 - dask=2022.8 - matplotlib-base=3.5 # netcdf follows a 1.major.minor[.patch] convention diff --git a/docs/examples/tutorial_parcels_structure.ipynb b/docs/examples/tutorial_parcels_structure.ipynb index df6427faca..79251bdfd2 100644 --- a/docs/examples/tutorial_parcels_structure.ipynb +++ b/docs/examples/tutorial_parcels_structure.ipynb @@ -25,7 +25,7 @@ "source": [ "1. [**FieldSet**](#1.-FieldSet). Load and set up the fields. These can be velocity fields that are used to advect the particles, but it can also be e.g. temperature.\n", "2. [**ParticleSet**](#2.-ParticleSet). Define the type of particles. Also additional `Variables` can be added to the particles (e.g. temperature, to keep track of the temperature that particles experience).\n", - "3. [**Kernels**](#3.-Kernels). Define and compile kernels. Kernels perform some specific operation on the particles every time step (e.g. interpolate the temperature from the temperature field to the particle location).\n", + "3. [**Kernels**](#3.-Kernels). Kernels perform some specific operation on the particles every time step (e.g. interpolate the temperature from the temperature field to the particle location).\n", "4. [**Execution and output**](#4.-Execution-and-Output). Execute the simulation and write and store the output in a Zarr file.\n", "5. [**Optimising and parallelising**](#5.-Optimising-and-parallelising). Optimise and parallelise the code to run faster.\n", "\n", diff --git a/environment.yml b/environment.yml index 04eaad6b99..9c4a4e8ac1 100644 --- a/environment.yml +++ b/environment.yml @@ -3,7 +3,6 @@ channels: - conda-forge dependencies: - python>=3.10 - - cgen - ffmpeg>=3.2.3 - jupyter - matplotlib-base>=2.0.2 diff --git a/parcels/_typing.py b/parcels/_typing.py index b646032099..66e123d5e6 100644 --- a/parcels/_typing.py +++ b/parcels/_typing.py @@ -6,17 +6,11 @@ """ -import ast import datetime import os from collections.abc import Callable from typing import Any, Literal, get_args - -class ParcelsAST(ast.AST): - ccode: str - - InterpMethodOption = Literal[ "linear", "nearest", diff --git a/parcels/field.py b/parcels/field.py index 43807a4518..d808550a7e 100644 --- a/parcels/field.py +++ b/parcels/field.py @@ -149,7 +149,6 @@ class Field: Maximum allowed value on the field. Data above this value are set to zero cast_data_dtype : str Cast Field data to dtype. Supported dtypes are "float32" (np.float32 (default)) and "float64 (np.float64). - Note that dtype can only be "float32" in JIT mode time_origin : parcels.tools.converters.TimeConverter Time origin of the time axis (only if grid is None) interp_method : str @@ -320,7 +319,6 @@ def __init__( self._scaling_factor = None - # Variable names in JIT code self._dimensions = kwargs.pop("dimensions", None) self.indices = kwargs.pop("indices", None) self._dataFiles = kwargs.pop("dataFiles", None) @@ -1084,18 +1082,6 @@ def eval(self, time, z, y, x, particle=None, applyConversion=True): else: return value - def _ccode_eval(self, var, t, z, y, x): - self._check_velocitysampling() - ccode_str = ( - f"temporal_interpolation({t}, {z}, {y}, {x}, {self.ccode_name}, " - + "&particles->ti[pnum*ngrid], &particles->zi[pnum*ngrid], &particles->yi[pnum*ngrid], &particles->xi[pnum*ngrid], " - + f"&{var}, {self.interp_method.upper()}, {self.gridindexingtype.upper()})" - ) - return ccode_str - - def _ccode_convert(self, _, z, y, x): - return self.units.ccode_to_target(z, y, x) - def _get_block_id(self, block): return np.ravel_multi_index(block, self.nchunks) @@ -1923,22 +1909,6 @@ def __getitem__(self, key): except tuple(AllParcelsErrorCodes.keys()) as error: return _deal_with_errors(error, key, vector_type=self.vector_type) - def _ccode_eval(self, varU, varV, varW, U, V, W, t, z, y, x): - ccode_str = "" - if "3D" in self.vector_type: - ccode_str = ( - f"temporal_interpolationUVW({t}, {z}, {y}, {x}, {U.ccode_name}, {V.ccode_name}, {W.ccode_name}, " - + "&particles->ti[pnum*ngrid], &particles->zi[pnum*ngrid], &particles->yi[pnum*ngrid], &particles->xi[pnum*ngrid]," - + f"&{varU}, &{varV}, &{varW}, {U.interp_method.upper()}, {U.gridindexingtype.upper()})" - ) - else: - ccode_str = ( - f"temporal_interpolationUV({t}, {z}, {y}, {x}, {U.ccode_name}, {V.ccode_name}, " - + "&particles->ti[pnum*ngrid], &particles->zi[pnum*ngrid], &particles->yi[pnum*ngrid], &particles->xi[pnum*ngrid]," - + f" &{varU}, &{varV}, {U.interp_method.upper()}, {U.gridindexingtype.upper()})" - ) - return ccode_str - class DeferredArray: """Class used for throwing error when Field.data is not read in deferred loading mode.""" diff --git a/parcels/fieldset.py b/parcels/fieldset.py index 6416704946..2094dcec09 100644 --- a/parcels/fieldset.py +++ b/parcels/fieldset.py @@ -309,16 +309,6 @@ def check_velocityfields(U, V, W): g._time_origin = self.time_origin self._add_UVfield() - ccode_fieldnames = [] - counter = 1 - for fld in self.get_fields(): - if fld.name not in ccode_fieldnames: - fld.ccode_name = fld.name - else: - fld.ccode_name = fld.name + str(counter) - counter += 1 - ccode_fieldnames.append(fld.ccode_name) - for f in self.get_fields(): if isinstance(f, (VectorField, NestedField)) or f._dataFiles is None: continue @@ -1447,8 +1437,7 @@ def get_fields(self) -> list[Field | VectorField]: def add_constant(self, name, value): """Add a constant to the FieldSet. Note that all constants are - stored as 32-bit floats. While constants can be updated during - execution in SciPy mode, they can not be updated in JIT mode. + stored as 32-bit floats. Parameters ---------- diff --git a/parcels/kernel.py b/parcels/kernel.py index 07e2a61609..0fbca991fd 100644 --- a/parcels/kernel.py +++ b/parcels/kernel.py @@ -55,7 +55,7 @@ def __init__( self.funcvars = funcvars self.funccode = funccode self.py_ast = py_ast # TODO v4: check if this is needed - self.scipy_positionupdate_kernels_added = False + self._positionupdate_kernels_added = False @property def ptype(self): @@ -94,7 +94,7 @@ class Kernel(BaseKernel): Notes ----- - A Kernel is either created from a compiled object + A Kernel is either created from a object or the necessary information (funcname, funccode, funcvars) is provided. The py_ast argument may be derived from the code string, but for concatenation, the merged AST plus the new header definition is required. @@ -159,7 +159,7 @@ def __init__( user_ctx = globals() finally: del stack # Remove cyclic references - # Compile and generate Python function from AST + # Generate Python function from AST py_mod = ast.parse("") py_mod.body = [self.py_ast] exec(compile(py_mod, "", "exec"), user_ctx) @@ -181,7 +181,7 @@ def pyfunc(self): def fieldset(self): return self._fieldset - def add_scipy_positionupdate_kernels(self): + def add_positionupdate_kernels(self): # Adding kernels that set and update the coordinate changes def Setcoords(particle, fieldset, time): # pragma: no cover particle_dlon = 0 # noqa @@ -324,23 +324,6 @@ def from_list(cls, fieldset, ptype, pyfunc_list, *args, **kwargs): pyfunc_list[0] = cls(fieldset, ptype, pyfunc_list[0], *args, **kwargs) return functools.reduce(lambda x, y: x + y, pyfunc_list) - def execute_python(self, pset, endtime, dt): - """Performs the core update loop via Python.""" - if self.fieldset is not None: - for f in self.fieldset.get_fields(): - if isinstance(f, (VectorField, NestedField)): - continue - f.data = np.array(f.data) - - if not self.scipy_positionupdate_kernels_added: - self.add_scipy_positionupdate_kernels() - self.scipy_positionupdate_kernels_added = True - - for p in pset: - self.evaluate_particle(p, endtime) - if p.state == StatusCode.StopAllExecution: - return StatusCode.StopAllExecution - def execute(self, pset, endtime, dt): """Execute this Kernel over a ParticleSet for several timesteps.""" pset.particledata.state[:] = StatusCode.Evaluate @@ -359,7 +342,19 @@ def execute(self, pset, endtime, dt): g._load_chunk == g._chunk_loaded_touched, g._chunk_deprecated, g._load_chunk ) - self.execute_python(pset, endtime, dt) + for f in self.fieldset.get_fields(): + if isinstance(f, (VectorField, NestedField)): + continue + f.data = np.array(f.data) + + if not self._positionupdate_kernels_added: + self.add_positionupdate_kernels() + self._positionupdate_kernels_added = True + + for p in pset: + self.evaluate_particle(p, endtime) + if p.state == StatusCode.StopAllExecution: + return StatusCode.StopAllExecution # Remove all particles that signalled deletion self.remove_deleted(pset) @@ -398,7 +393,9 @@ def execute(self, pset, endtime, dt): # Remove all particles that signalled deletion self.remove_deleted(pset) # Generalizable version! - self.execute_python(pset, endtime, dt) + # Re-execute Kernels to retry particles with StatusCode.Repeat + for p in pset: + self.evaluate_particle(p, endtime) n_error = pset._num_error_particles diff --git a/parcels/particle.py b/parcels/particle.py index c6bb8ee7e0..9190f91064 100644 --- a/parcels/particle.py +++ b/parcels/particle.py @@ -231,4 +231,6 @@ def setLastID(cls, offset): # TODO v4: check if we can implement this in anothe class JITParticle(ScipyParticle): def __init__(self, *args, **kwargs): - raise NotImplementedError("JITParticle has been deprecated in Parcels v4. Use ScipyParticle instead.") + raise NotImplementedError( + "JITParticle has been deprecated in Parcels v4. Use ScipyParticle instead." + ) # TODO v4: link to migration guide diff --git a/parcels/particledata.py b/parcels/particledata.py index 3d384dbf07..ea239b327e 100644 --- a/parcels/particledata.py +++ b/parcels/particledata.py @@ -147,7 +147,7 @@ def __init__(self, pclass, lon, lat, depth, time, lonlatdepth_dtype, pid_orig, n self._data["id"][:] = pid self._data["obs_written"][:] = 0 - # special case for exceptions which can only be handled from scipy + # special case for exceptions which can only be handled from scipy # TODO v4: check if this can be removed now that JIT is dropped self._data["exception"] = np.empty(self._ncount, dtype=object) initialised |= { diff --git a/parcels/particleset.py b/parcels/particleset.py index 5585b4c9f4..75d7d22564 100644 --- a/parcels/particleset.py +++ b/parcels/particleset.py @@ -932,13 +932,13 @@ def execute( Notes ----- - ``ParticleSet.execute()`` acts as the main entrypoint for simulations, and provides the simulation time-loop. This method encapsulates the logic controlling the switching between kernel execution (where control in handed to C in JIT mode), output file writing, reading in fields for new timesteps, adding new particles to the simulation domain, stopping the simulation, and executing custom functions (``postIterationCallbacks`` provided by the user). + ``ParticleSet.execute()`` acts as the main entrypoint for simulations, and provides the simulation time-loop. This method encapsulates the logic controlling the switching between kernel execution, output file writing, reading in fields for new timesteps, adding new particles to the simulation domain, stopping the simulation, and executing custom functions (``postIterationCallbacks`` provided by the user). """ # check if particleset is empty. If so, return immediately if len(self) == 0: return - # check if pyfunc has changed since last compile. If so, recompile + # check if pyfunc has changed since last generation. If so, regenerate if self._kernel is None or (self._kernel.pyfunc is not pyfunc and self._kernel is not pyfunc): # Generate and store Kernel if isinstance(pyfunc, Kernel): diff --git a/parcels/tools/converters.py b/parcels/tools/converters.py index bc2dec2e82..4c6af74c49 100644 --- a/parcels/tools/converters.py +++ b/parcels/tools/converters.py @@ -169,15 +169,9 @@ class UnitConverter: def to_target(self, value, z, y, x): return value - def ccode_to_target(self, z, y, x): - return "1.0" - def to_source(self, value, z, y, x): return value - def ccode_to_source(self, z, y, x): - return "1.0" - class Geographic(UnitConverter): """Unit converter from geometric to geographic coordinates (m to degree)""" @@ -191,12 +185,6 @@ def to_target(self, value, z, y, x): def to_source(self, value, z, y, x): return value * 1000.0 * 1.852 * 60.0 - def ccode_to_target(self, z, y, x): - return "(1.0 / (1000.0 * 1.852 * 60.0))" - - def ccode_to_source(self, z, y, x): - return "(1000.0 * 1.852 * 60.0)" - class GeographicPolar(UnitConverter): """Unit converter from geometric to geographic coordinates (m to degree) @@ -212,12 +200,6 @@ def to_target(self, value, z, y, x): def to_source(self, value, z, y, x): return value * 1000.0 * 1.852 * 60.0 * cos(y * pi / 180) - def ccode_to_target(self, z, y, x): - return f"(1.0 / (1000. * 1.852 * 60. * cos({y} * M_PI / 180)))" - - def ccode_to_source(self, z, y, x): - return f"(1000. * 1.852 * 60. * cos({y} * M_PI / 180))" - class GeographicSquare(UnitConverter): """Square distance converter from geometric to geographic coordinates (m2 to degree2)""" @@ -231,12 +213,6 @@ def to_target(self, value, z, y, x): def to_source(self, value, z, y, x): return value * pow(1000.0 * 1.852 * 60.0, 2) - def ccode_to_target(self, z, y, x): - return "pow(1.0 / (1000.0 * 1.852 * 60.0), 2)" - - def ccode_to_source(self, z, y, x): - return "pow((1000.0 * 1.852 * 60.0), 2)" - class GeographicPolarSquare(UnitConverter): """Square distance converter from geometric to geographic coordinates (m2 to degree2) @@ -252,12 +228,6 @@ def to_target(self, value, z, y, x): def to_source(self, value, z, y, x): return value * pow(1000.0 * 1.852 * 60.0 * cos(y * pi / 180), 2) - def ccode_to_target(self, z, y, x): - return f"pow(1.0 / (1000. * 1.852 * 60. * cos({y} * M_PI / 180)), 2)" - - def ccode_to_source(self, z, y, x): - return f"pow((1000. * 1.852 * 60. * cos({y} * M_PI / 180)), 2)" - unitconverters_map = { "U": GeographicPolar(), diff --git a/pyproject.toml b/pyproject.toml index 991a6691cf..e24b149c2d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,7 +21,6 @@ classifiers = [ "Intended Audience :: Science/Research", ] dependencies = [ - "cgen", "cftime", "numpy", "dask", @@ -152,6 +151,5 @@ module = [ "cftime", "pykdtree.kdtree", "netCDF4", - "cgen" ] ignore_missing_imports = true diff --git a/tests/test_grids.py b/tests/test_grids.py index aae482cdb6..2947d251ea 100644 --- a/tests/test_grids.py +++ b/tests/test_grids.py @@ -255,7 +255,7 @@ def bath_func(lon): for i in range(depth_g0.shape[0]): for k in range(depth_g0.shape[2]): depth_g0[i, :, k] = bath[i] * k / (depth_g0.shape[2] - 1) - depth_g0 = depth_g0.transpose() # we don't change it on purpose, to check if the transpose op if fixed in jit + depth_g0 = depth_g0.transpose() grid = RectilinearSGrid(lon_g0, lat_g0, depth=depth_g0) diff --git a/tests/test_interpolation.py b/tests/test_interpolation.py index 62c00bd554..957cef977e 100644 --- a/tests/test_interpolation.py +++ b/tests/test_interpolation.py @@ -90,10 +90,7 @@ def data_2d(): ], ) def test_raw_2d_interpolation(data_2d, func, eta, xsi, expected): - """Test the 2D interpolation functions on the raw arrays. - - Interpolation via the other interpolation methods are tested in `test_scipy_vs_jit`. - """ + """Test the 2D interpolation functions on the raw arrays.""" ti = 0 yi, xi = 1, 1 ctx = interpolation.InterpolationContext2D(data_2d, eta, xsi, ti, yi, xi) diff --git a/tests/test_particles.py b/tests/test_particles.py index 7976337599..dba621e0f3 100644 --- a/tests/test_particles.py +++ b/tests/test_particles.py @@ -47,7 +47,7 @@ def addOne(particle, fieldset, time): # pragma: no cover @pytest.mark.parametrize("type", ["np.int8", "mp.float", "np.int16"]) def test_variable_unsupported_dtypes(fieldset, type): - """Test that checks errors thrown for unsupported dtypes in JIT mode.""" + """Test that checks errors thrown for unsupported dtypes.""" TestParticle = ScipyParticle.add_variable("p", dtype=type, initial=10.0) with pytest.raises((RuntimeError, TypeError)): ParticleSet(fieldset, pclass=TestParticle, lon=[0], lat=[0])