Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Speed improvement #218

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 12 additions & 8 deletions scarches/dataset/scpoli/anndata.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,27 +76,31 @@ def __init__(self,
self.cell_types = np.stack(self.cell_types).T
self.cell_types = torch.tensor(self.cell_types, dtype=torch.long)

def __getitem__(self, index):
def __getitems__(self, indices):

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why introduce a new name here? __getitem__ in this implementation simply calls this method. Maybe it is better not to duplicate it in such case. indices can simply be a Union[int, Collection[int]]

Copy link
Contributor Author

@moinfar moinfar May 29, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The core implementation is not duplicated. __getitem__ now calls __getitems__ which accepts both list indices and single indices.
The reason that we have to maintain both interfaces is that pytorch data loader originally uses __getitem__. But if you also provide __getitems__, then it will use it, and we benefit from the parallel fetching of samples. I am not sure about the newer versions of pytorch but the one scArches is using does not automatically fetch multiple samples if __getitems__ interface is not available.
You can also check this issue in pytorch repo.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see, thanks! Didn't know it about pytorch

# Make sure this function supports both single-element and list input
outputs = dict()

if self._is_sparse:
x = torch.tensor(np.squeeze(self.data[index].toarray()), dtype=torch.float32)
x = torch.tensor(np.squeeze(self.data[indices].toarray()), dtype=torch.float32)
else:
x = self.data[index]
x = self.data[indices]
outputs["x"] = x

outputs["labeled"] = self.labeled_vector[index]
outputs["sizefactor"] = self.size_factors[index]
outputs["labeled"] = self.labeled_vector[indices]
outputs["sizefactor"] = self.size_factors[indices]

if self.condition_keys:
outputs["batch"] = self.conditions[index, :]
outputs["combined_batch"] = self.conditions_combined[index]
outputs["batch"] = self.conditions[indices, :]
outputs["combined_batch"] = self.conditions_combined[indices]

if self.cell_type_keys:
outputs["celltypes"] = self.cell_types[index, :]
outputs["celltypes"] = self.cell_types[indices, :]

return outputs

def __getitem__(self, index):
return self.__getitems__(index)

def __len__(self):
return self.data.shape[0]

Expand Down
35 changes: 18 additions & 17 deletions scarches/trainers/scpoli/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,9 @@ def _print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, leng
sys.stdout.flush()

def custom_collate(batch):
if isinstance(batch, dict):
return batch

r"""Puts each data field into a tensor with outer dimension batch size"""
np_str_obj_array_pattern = re.compile(r'[SaUO]')
default_collate_err_msg_format = (
Expand Down Expand Up @@ -127,28 +130,26 @@ def train_test_split(adata, train_frac=0.85, condition_keys=None, cell_type_key=
labeled_idx = indices[labeled_array == 1]
unlabeled_idx = indices[labeled_array == 0]

train_labeled_idx = []
val_labeled_idx = []
train_unlabeled_idx = []
val_unlabeled_idx = []
train_labeled_idx = np.array([], dtype=int)
val_labeled_idx = np.array([], dtype=int)
train_unlabeled_idx = np.array([], dtype=int)
val_unlabeled_idx = np.array([], dtype=int)

if len(labeled_idx) > 0:
cell_types = adata[labeled_idx].obs[cell_type_key].unique().tolist()
for cell_type in cell_types:
ct_idx = labeled_idx[adata[labeled_idx].obs[cell_type_key] == cell_type]
n_train_samples = int(np.ceil(train_frac * len(ct_idx)))
np.random.shuffle(ct_idx)
train_labeled_idx.append(ct_idx[:n_train_samples])
val_labeled_idx.append(ct_idx[n_train_samples:])
cell_type_info = adata[labeled_idx].obs[[cell_type_key]].copy()
cell_type_info['random'] = np.random.rand(len(cell_type_info.index))
cell_type_info['count_in_ct'] = cell_type_info.groupby(cell_type_key, observed=True)['random'].transform('count')
cell_type_info['rank_in_ct'] = cell_type_info.groupby(cell_type_key, observed=True)['random'].rank(method="first") - 1
cell_type_info['train'] = cell_type_info['count_in_ct'] * train_frac > cell_type_info['rank_in_ct']
train_labeled_idx = labeled_idx[cell_type_info['train']]
val_labeled_idx = labeled_idx[~cell_type_info['train']]
if len(unlabeled_idx) > 0:
n_train_samples = int(np.ceil(train_frac * len(unlabeled_idx)))
train_unlabeled_idx.append(unlabeled_idx[:n_train_samples])
val_unlabeled_idx.append(unlabeled_idx[n_train_samples:])
train_idx = train_labeled_idx + train_unlabeled_idx
val_idx = val_labeled_idx + val_unlabeled_idx
train_unlabeled_idx = unlabeled_idx[:n_train_samples]
val_unlabeled_idx = unlabeled_idx[n_train_samples:]

train_idx = np.concatenate(train_idx)
val_idx = np.concatenate(val_idx)
train_idx = np.concatenate([train_labeled_idx, train_unlabeled_idx])
val_idx = np.concatenate([val_labeled_idx, val_unlabeled_idx])

elif condition_keys is not None:
train_idx = []
Expand Down