Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 36 additions & 2 deletions imap_processing/cdf/config/imap_idex_l2b_variable_attrs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,8 @@ spin_phase:
VAR_TYPE: support_data
SCALETYP: linear
FILLVAL: *int_fillval
FORMAT: I4
VALIDMIN: 0.0
FORMAT: I8
VALIDMIN: 0
VALIDMAX: 360
LABL_PTR_1: spin_phase_labels
UNITS: deg
Expand Down Expand Up @@ -162,3 +162,37 @@ rate_calculation_quality_flags:
VAR_TYPE: data
UNITS: " "
DICT_KEY: SPASE>Support>SupportQuantity:QualityFlag

on_off_times:
CATDESC: Science acquisition on/off event times.
FIELDNAM: On/Off Times
LABLAXIS: On/Off Times
FILLVAL: -9223372036854775808
FORMAT: " "
VALIDMIN: 315576066184000000
VALIDMAX: 3155716869184000000
UNITS: ns
VAR_TYPE: support_data
SCALETYP: linear
MONOTON: INCREASE
TIME_BASE: J2000
TIME_SCALE: Terrestrial Time
REFERENCE_POSITION: Rotating Earth Geoid
RESOLUTION: ' '
CDF_DATA_TYPE: "CDF_TIME_TT2000"
DICT_KEY: SPASE>Support>SupportQuantity:Temporal

on_off_events:
CATDESC: Science acquisition on/off event values (1 = on, 0 = off).
FIELDNAM: On/Off Events
LABLAXIS: On/Off Events
DEPEND_0: on_off_times
VAR_TYPE: support_data
SCALETYP: linear
FILLVAL: 255
FORMAT: I1
VALIDMIN: 0
VALIDMAX: 1
UNITS: " "
DICT_KEY: SPASE>Support>SupportQuantity:Other

36 changes: 35 additions & 1 deletion imap_processing/cdf/config/imap_idex_l2c_variable_attrs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -117,4 +117,38 @@ rate_by_mass_map:
FIELDNAM: Rate by Mass Map
UNITS: day^-1
FILLVAL: *double_fillval
DICT_KEY: SPASE>SupportQuantity:CountRate,Qualifier:Array
DICT_KEY: SPASE>SupportQuantity:CountRate,Qualifier:Array


on_off_times:
CATDESC: Science acquisition on/off event times.
FIELDNAM: On/Off Times
LABLAXIS: On/Off Times
FILLVAL: -9223372036854775808
FORMAT: " "
VALIDMIN: 315576066184000000
VALIDMAX: 3155716869184000000
UNITS: ns
VAR_TYPE: support_data
SCALETYP: linear
MONOTON: INCREASE
TIME_BASE: J2000
TIME_SCALE: Terrestrial Time
REFERENCE_POSITION: Rotating Earth Geoid
RESOLUTION: ' '
CDF_DATA_TYPE: "CDF_TIME_TT2000"
DICT_KEY: SPASE>Support>SupportQuantity:Temporal

on_off_events:
CATDESC: Science acquisition on/off event values (1 = on, 0 = off).
FIELDNAM: On/Off Events
LABLAXIS: On/Off Events
DEPEND_0: on_off_times
VAR_TYPE: support_data
SCALETYP: linear
FILLVAL: 255
FORMAT: I1
VALIDMIN: 0
VALIDMAX: 1
UNITS: " "
DICT_KEY: SPASE>Support>SupportQuantity:Other
4 changes: 4 additions & 0 deletions imap_processing/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -1004,9 +1004,13 @@ def do_processing(
source="idex", descriptor="sci-1week"
)
sci_dependencies = [load_cdf(f) for f in sci_files]
# sort science files by the first epoch value
sci_dependencies.sort(key=lambda ds: ds["epoch"].values[0])
hk_files = dependencies.get_file_paths(source="idex", descriptor="evt")
# Remove duplicate housekeeping files
hk_dependencies = [load_cdf(dep) for dep in list(set(hk_files))]
# sort housekeeping files by the first epoch value
hk_dependencies.sort(key=lambda ds: ds["epoch"].values[0])
datasets = idex_l2b(sci_dependencies, hk_dependencies)
return datasets

Expand Down
46 changes: 35 additions & 11 deletions imap_processing/idex/idex_l2b.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@

import numpy as np
import xarray as xr
from numpy._typing import NDArray

from imap_processing.ena_maps.ena_maps import SkyTilingType
from imap_processing.ena_maps.utils.spatial_utils import AzElSkyGrid
Expand Down Expand Up @@ -116,16 +117,23 @@ def idex_l2b(

# Concat all the l2a datasets together
l2a_dataset = xr.concat(l2a_datasets, dim="epoch")
epoch_doy_unique = np.unique(epoch_to_doy(l2a_dataset["epoch"].data))
epoch_doy = epoch_to_doy(l2a_dataset["epoch"].data)
# Use dict.fromkeys to preserve order while getting unique DOYs. We want to make
# sure the order of DOYs stays the same in case we are dealing with data that
# spans over the new year. E.g., we want 365 to come before 1 if we have data from
# Dec and Jan.
epoch_doy_unique = np.array(list(dict.fromkeys(epoch_doy)))
(
counts_by_charge,
counts_by_mass,
counts_by_charge_map,
counts_by_mass_map,
daily_epoch,
) = compute_counts_by_charge_and_mass(l2a_dataset, epoch_doy_unique)
# Get science acquisition start and stop times
_, evt_time, evt_values = get_science_acquisition_timestamps(evt_dataset)
# Get science acquisition percentage for each day
daily_on_percentage = get_science_acquisition_on_percentage(evt_dataset)
daily_on_percentage = get_science_acquisition_on_percentage(evt_time, evt_values)
(
rate_by_charge,
rate_by_mass,
Expand All @@ -144,16 +152,32 @@ def idex_l2b(
charge_bin_means = np.sqrt(CHARGE_BIN_EDGES[:-1] * CHARGE_BIN_EDGES[1:])
mass_bin_means = np.sqrt(MASS_BIN_EDGES[:-1] * MASS_BIN_EDGES[1:])
spin_phase_means = (SPIN_PHASE_BIN_EDGES[:-1] + SPIN_PHASE_BIN_EDGES[1:]) / 2

# convert to integers
spin_phase_means = spin_phase_means.astype(np.uint16)
# Define xarrays that are shared between l2b and l2c
epoch = xr.DataArray(
name="epoch",
data=daily_epoch,
dims="epoch",
attrs=idex_l2b_attrs.get_variable_attributes("epoch", check_schema=False),
)

common_vars = {
"on_off_times": xr.DataArray(
name="on_off_times",
data=evt_time,
dims="on_off_times",
attrs=idex_l2b_attrs.get_variable_attributes(
"on_off_times", check_schema=False
),
),
"on_off_events": xr.DataArray(
name="on_off_events",
data=np.asarray(evt_values, dtype=np.uint8),
dims="on_off_times",
attrs=idex_l2b_attrs.get_variable_attributes(
"on_off_events", check_schema=False
),
),
"impact_day_of_year": xr.DataArray(
name="impact_day_of_year",
data=epoch_doy_unique,
Expand Down Expand Up @@ -319,7 +343,6 @@ def idex_l2b(
attrs=idex_l2c_attrs.get_variable_attributes("rate_by_mass_map"),
),
}

l2b_dataset = xr.Dataset(
coords={"epoch": epoch},
data_vars=l2b_vars,
Expand All @@ -339,7 +362,6 @@ def idex_l2b(
l2c_dataset.attrs.update(map_attrs)

logger.info("IDEX L2B and L2C science data processing completed.")

return [l2b_dataset, l2c_dataset]


Expand Down Expand Up @@ -629,23 +651,25 @@ def get_science_acquisition_timestamps(
)


def get_science_acquisition_on_percentage(evt_dataset: xr.Dataset) -> dict:
def get_science_acquisition_on_percentage(
evt_time: NDArray, evt_values: NDArray
) -> dict:
"""
Calculate the percentage of time science acquisition was occurring for each day.

Parameters
----------
evt_dataset : xarray.Dataset
Contains IDEX event message data.
evt_time : np.ndarray
Array of timestamps for science acquisition start and stop events.
evt_values : np.ndarray
Array of values indicating if the event is a start (1) or stop (0).

Returns
-------
dict
Percentages of time the instrument was in science acquisition mode for each day
of year.
"""
# Get science acquisition start and stop times
_evt_logs, evt_time, evt_values = get_science_acquisition_timestamps(evt_dataset)
if len(evt_time) == 0:
logger.warning(
"No science acquisition events found in event dataset. Returning empty "
Expand Down
11 changes: 8 additions & 3 deletions imap_processing/tests/idex/test_idex_l2b.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ def test_l2c_attrs_and_vars(
l2c_dataset["counts_by_mass_map"].sum(), len(l2a_dataset.epoch) * 2
)
assert l2c_dataset.sizes == {
"on_off_times": 4,
"epoch": 2,
"impact_charge": 10,
"mass": 10,
Expand Down Expand Up @@ -200,7 +201,8 @@ def test_science_acquisition_times(decom_test_data_evt: list[xr.Dataset]):

def test_get_science_acquisition_on_percentage(decom_test_data_evt: list[xr.Dataset]):
"""Test the function that calculates the percentage of uptime."""
on_percentages = get_science_acquisition_on_percentage(decom_test_data_evt[1])
_, evt_time, evt_event = get_science_acquisition_timestamps(decom_test_data_evt[1])
on_percentages = get_science_acquisition_on_percentage(evt_time, evt_event)
# We expect 1 DOY and ~87% uptime for the science acquisition.
assert len(on_percentages) == 1
# The DOY should be 8 for this test dataset.
Expand All @@ -211,7 +213,8 @@ def test_get_science_acquisition_on_percentage(decom_test_data_evt: list[xr.Data
evt_ds_shifted["epoch"] = evt_ds["epoch"] + NANOSECONDS_IN_DAY
combined_ds = xr.concat([evt_ds, evt_ds_shifted], dim="epoch")
# expect a second DOY.
on_percentages = get_science_acquisition_on_percentage(combined_ds)
_, evt_time, evt_event = get_science_acquisition_timestamps(combined_ds)
on_percentages = get_science_acquisition_on_percentage(evt_time, evt_event)
# We expect 2 DOYs
assert len(on_percentages) == 2
# The uptime should be less than 1% for both
Expand All @@ -225,7 +228,9 @@ def test_get_science_acquisition_on_percentage_no_acquisition(caplog):
"imap_processing.idex.idex_l2b.get_science_acquisition_timestamps",
return_value=([], [], []),
):
on_percentages = get_science_acquisition_on_percentage(xr.Dataset())
on_percentages = get_science_acquisition_on_percentage(
np.array([]), np.array([])
)
assert not on_percentages
assert "No science acquisition events found" in caplog.text

Expand Down