Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 5 additions & 6 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,18 +20,17 @@ jobs:
submodules: false
coverage: codecov
envs: |
- linux: py38-oldestdeps
- linux: py39-online
- linux: py39
#- linux: py310
#- linux: py311
- linux: py312-devdeps
#- windows: py38
- linux: py312
#- windows: py39
- windows: py310
#- windows: py311
#- windows: py312
#- macos: py38
#- macos: py39
#- macos: py310
- macos: py311
#- macos: py312
- macos: py311-online
- macos: py312
- windows: py313-devdeps
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
`cdflib` is a python module to read/write CDF (Common Data Format `.cdf`) files without needing to install the
[CDF NASA library](https://cdf.gsfc.nasa.gov/).

Python >= 3.8 is required.
Python >= 3.9 is required.
The core of this package uses only numpy, with no complicated compiler requirements.

## Install
Expand Down
2 changes: 1 addition & 1 deletion cdflib/cdfwrite.py
Original file line number Diff line number Diff line change
Expand Up @@ -994,7 +994,7 @@ def _write_var_data_nonsparse(
if isinstance(indata, complex):
epoch16.append(indata.real)
epoch16.append(indata.imag)
indata = epoch16
indata = np.array(epoch16)

# Convert to byte stream
recs, data = self._convert_data(dataType, numElems, numValues, indata)
Expand Down
57 changes: 30 additions & 27 deletions cdflib/epochs.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,14 +239,15 @@ def timestamp_to_cdfepoch(unixtime_data: npt.ArrayLike) -> np.ndarray:
for ud in times:
if not np.isnan(ud):
dt = np.datetime64(int(ud * 1000), "ms")
dt_item: datetime.datetime = dt.item()
dt_to_convert = [
dt.item().year,
dt.item().month,
dt.item().day,
dt.item().hour,
dt.item().minute,
dt.item().second,
int(dt.item().microsecond / 1000),
dt_item.year,
dt_item.month,
dt_item.day,
dt_item.hour,
dt_item.minute,
dt_item.second,
int(dt_item.microsecond / 1000),
]
converted_data = CDFepoch.compute(dt_to_convert)
else:
Expand All @@ -267,15 +268,16 @@ def timestamp_to_cdfepoch16(unixtime_data: npt.ArrayLike) -> np.ndarray:
for ud in times:
if not np.isnan(ud):
dt = np.datetime64(int(ud * 1000000), "us")
dt_item: datetime.datetime = dt.item()
dt_to_convert = [
dt.item().year,
dt.item().month,
dt.item().day,
dt.item().hour,
dt.item().minute,
dt.item().second,
int(dt.item().microsecond / 1000),
int(dt.item().microsecond % 1000),
dt_item.year,
dt_item.month,
dt_item.day,
dt_item.hour,
dt_item.minute,
dt_item.second,
int(dt_item.microsecond / 1000),
int(dt_item.microsecond % 1000),
0,
0,
]
Expand All @@ -298,15 +300,16 @@ def timestamp_to_tt2000(unixtime_data: npt.ArrayLike) -> np.ndarray:
for ud in times:
if not np.isnan(ud):
dt = np.datetime64(int(ud * 1000000), "us")
dt_item: datetime.datetime = dt.item()
dt_to_convert = [
dt.item().year,
dt.item().month,
dt.item().day,
dt.item().hour,
dt.item().minute,
dt.item().second,
int(dt.item().microsecond / 1000),
int(dt.item().microsecond % 1000),
dt_item.year,
dt_item.month,
dt_item.day,
dt_item.hour,
dt_item.minute,
dt_item.second,
int(dt_item.microsecond / 1000),
int(dt_item.microsecond % 1000),
0,
]
converted_data = CDFepoch.compute(dt_to_convert)
Expand Down Expand Up @@ -487,7 +490,7 @@ def breakdown_tt2000(tt2000: cdf_tt2000_type) -> np.ndarray:
999 ns is returned.
"""

new_tt2000 = np.atleast_1d(tt2000).astype(np.longlong)
new_tt2000 = np.atleast_1d(tt2000).astype(np.int64)
count = len(new_tt2000)
toutcs = np.zeros((9, count), dtype=int)
datxs = CDFepoch._LeapSecondsfromJ2000(new_tt2000)
Expand All @@ -498,8 +501,8 @@ def breakdown_tt2000(tt2000: cdf_tt2000_type) -> np.ndarray:
nanoSecsSinceJ2000[~post2000] += CDFepoch.T12hinNanoSecs
nanoSecsSinceJ2000[~post2000] -= CDFepoch.dTinNanoSecs

secsSinceJ2000 = (nanoSecsSinceJ2000 / CDFepoch.SECinNanoSecsD).astype(np.longlong)
nansecs = (nanoSecsSinceJ2000 - secsSinceJ2000 * CDFepoch.SECinNanoSecs).astype(np.longlong)
secsSinceJ2000 = (nanoSecsSinceJ2000 / CDFepoch.SECinNanoSecsD).astype(np.int64)
nansecs = (nanoSecsSinceJ2000 - (secsSinceJ2000 * CDFepoch.SECinNanoSecs)).astype(np.int64) # type: ignore

posNanoSecs = new_tt2000 > 0
secsSinceJ2000[posNanoSecs] -= 32
Expand All @@ -512,7 +515,7 @@ def breakdown_tt2000(tt2000: cdf_tt2000_type) -> np.ndarray:

t2s = secsSinceJ2000 * CDFepoch.SECinNanoSecs + nansecs

post72 = datxs[:, 0] > 0
post72: np.ndarray = datxs[:, 0] > 0
secsSinceJ2000[post72] -= datxs[post72, 0].astype(int)
epochs = CDFepoch.J2000Since0AD12hSec + secsSinceJ2000

Expand Down
2 changes: 1 addition & 1 deletion cdflib/xarray/cdf_to_xarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ def _convert_fillvals_to_nan(var_data: npt.NDArray, var_atts: Dict[str, Any], va
new_data[new_data == var_atts["FILLVAL"]] = np.datetime64("nat")
else:
new_data[new_data == var_atts["FILLVAL"]] = np.nan
else:
elif new_data.size == 1:
if new_data == var_atts["FILLVAL"]:
if new_data.dtype.type == np.datetime64:
new_data[new_data == var_atts["FILLVAL"]] = np.array(np.datetime64("nat"))
Expand Down
130 changes: 91 additions & 39 deletions cdflib/xarray/xarray_to_cdf.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,10 +194,12 @@ def _dtype_to_cdf_type(var: xr.DataArray, terminate_on_warning: bool = False) ->
return STRINGS_TO_DATATYPES[cdf_data_type], element_size


def _dtype_to_fillval(var: xr.DataArray, terminate_on_warning: bool = False) -> Union[np.number, np.str_, np.datetime64]:
def _dtype_to_fillval(
var: xr.DataArray, terminate_on_warning: bool = False
) -> Union[np.number, np.str_, np.datetime64, np.complex128]:
datatype, _ = _dtype_to_cdf_type(var, terminate_on_warning=terminate_on_warning)
if datatype in DATATYPE_FILLVALS:
return DATATYPE_FILLVALS[datatype] # type: ignore
return DATATYPE_FILLVALS[datatype] # type: ignore[return-value]
else:
return np.str_(" ")

Expand Down Expand Up @@ -247,50 +249,98 @@ def _verify_depend_dimensions(
coordinate_variable_name: str,
terminate_on_warning: bool = False,
) -> bool:
primary_data = np.array(dataset[primary_variable_name])
coordinate_data = np.array(dataset[coordinate_variable_name])
try:
primary_data = np.array(dataset[primary_variable_name])
coordinate_data = np.array(dataset[coordinate_variable_name])

if len(primary_data.shape) != 0 and len(coordinate_data.shape) == 0:
_warn_or_except(
f"ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.",
terminate_on_warning,
)
return False
if len(primary_data.shape) != 0 and len(coordinate_data.shape) == 0:
_warn_or_except(
f"ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.",
terminate_on_warning,
)
return False

if len(coordinate_data.shape) != 0 and len(primary_data.shape) == 0:
_warn_or_except(
f"ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.",
terminate_on_warning,
)
return False
if len(coordinate_data.shape) != 0 and len(primary_data.shape) == 0:
_warn_or_except(
f"ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.",
terminate_on_warning,
)
return False

if len(coordinate_data.shape) > 2:
_warn_or_except(
f"ISTP Compliance Warning: {coordinate_variable_name} has too many dimensions to be the DEPEND_{dimension_number} for variable {primary_variable_name}",
terminate_on_warning,
)
return False
if len(coordinate_data.shape) == 2:
if primary_data.shape[0] != coordinate_data.shape[0]:
if len(coordinate_data.shape) > 2:
_warn_or_except(
f"ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the Epoch dimensions do not match.",
f"ISTP Compliance Warning: {coordinate_variable_name} has too many dimensions to be the DEPEND_{dimension_number} for variable {primary_variable_name}",
terminate_on_warning,
)
return False

if len(primary_data.shape) <= dimension_number:
_warn_or_except(
f"ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but {primary_variable_name} does not have that many dimensions",
terminate_on_warning,
)
return False
if len(coordinate_data.shape) == 2:
if primary_data.shape[0] != coordinate_data.shape[0]:
_warn_or_except(
f"ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the Epoch dimensions do not match.",
terminate_on_warning,
)
return False

if primary_data.shape[dimension_number] != coordinate_data.shape[-1]:
_warn_or_except(
f"ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.",
terminate_on_warning,
)
return False
# All variables should have at the very least a size of dimension_number
# (i.e. a variable with a DEPEND_2 should have 2 dimensions)
if len(primary_data.shape) < dimension_number:
_warn_or_except(
f"ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but {primary_variable_name} does not have that many dimensions",
terminate_on_warning,
)
return False

# All variables with a DEPEND_0 should always have a shape size of at least dimension_number + 1
# (i.e. a variable with a DEPEND_2 should have 2 dimensions, 2 for DEPEND_1 and DEPEND_2, and 1 for DEPEND_0)
if len(primary_data.shape) < dimension_number + 1:
if "VAR_TYPE" in dataset[primary_variable_name].attrs:
if dataset[primary_variable_name].attrs["VAR_TYPE"] == "data":
# Data variables should always have as many dimensions as their are DEPENDS
_warn_or_except(
f"ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but {primary_variable_name} does not have that many dimensions",
terminate_on_warning,
)
return False
else:
for key in dataset[primary_variable_name].attrs:
if key.lower() == "depend_0":
# support_data variables with a DEPEND_0 should always match the dimension number
_warn_or_except(
f"ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but {primary_variable_name} does not have that many dimensions",
terminate_on_warning,
)
return False

# Check that the size of the dimension that DEPEND_{i} is refering to is
# also the same size of the DEPEND_{i}'s last dimension
for key in dataset[primary_variable_name].attrs:
if key.lower() == "depend_0":
if primary_data.shape[dimension_number] != coordinate_data.shape[-1]:
_warn_or_except(
f"ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.",
terminate_on_warning,
)
return False
else:
if primary_data.shape[dimension_number - 1] != coordinate_data.shape[-1]:
_warn_or_except(
f"ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.",
terminate_on_warning,
)
return False
except ISTPError as istp_e:
raise istp_e
except Exception as e:
if terminate_on_warning:
raise Exception(
f"Unknown error occured verifying {primary_variable_name}'s DEPEND_{dimension_number}, which is pointed to {coordinate_variable_name}. Error message: {e}"
)
else:
print(
f"Unknown error occured verifying {primary_variable_name}'s DEPEND_{dimension_number}, which is pointed to {coordinate_variable_name}"
)
return False

return True

Expand Down Expand Up @@ -333,7 +383,9 @@ def _dimension_checker(dataset: xr.Dataset, terminate_on_warning: bool = False)
if depend_regex.match(att.lower()) and att != "DEPEND_0":
if (dataset[var].attrs[att] in dataset) or (dataset[var].attrs[att] in dataset.coords):
depend_i = dataset[var].attrs[att]
if _verify_depend_dimensions(dataset, int(att[-1]), var, depend_i):
if _verify_depend_dimensions(
dataset, int(att[-1]), var, depend_i, terminate_on_warning=terminate_on_warning
):
istp_depend_dimension_list.append(dataset[var].attrs[att])
else:
_warn_or_except(
Expand Down Expand Up @@ -424,7 +476,7 @@ def _epoch_checker(dataset: xr.Dataset, dim_vars: List[str], terminate_on_warnin

# Ensure that the dimension is listed somewhere else in the dataset
if potential_depend_0 in dataset or potential_depend_0 in dataset.coords:
if _verify_depend_dimensions(dataset, 0, var, potential_depend_0):
if _verify_depend_dimensions(dataset, 0, var, potential_depend_0, terminate_on_warning=terminate_on_warning):
depend_0_list.append(potential_depend_0)
time_varying_dimensions.append(var)
else:
Expand Down
2 changes: 1 addition & 1 deletion mypy.ini
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ show_column_numbers = True

warn_unused_configs = True
warn_redundant_casts = True
warn_unused_ignores = True
warn_unused_ignores = False
strict_equality = True
strict_concatenate = True
check_untyped_defs = True
Expand Down
3 changes: 1 addition & 2 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ classifiers =
Environment :: Console
Intended Audience :: Science/Research
Operating System :: OS Independent
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: 3.11
Expand All @@ -27,7 +26,7 @@ long_description = file: README.md
long_description_content_type = text/markdown

[options]
python_requires = >= 3.8
python_requires = >= 3.9
include_package_data = True
packages = cdflib
install_requires =
Expand Down
Loading
Loading