From 771159d4b14797733d5aa0c85b653331dc65c872 Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Thu, 19 Dec 2024 09:25:32 -0800 Subject: [PATCH] Change defaults for loop experiments back to max_num_embs=float('inf') --- tutorial_code/example1_1_fm_loop_balancing.py | 9 ++++----- tutorial_code/example1_2_fm_loop_correlations.py | 11 +++++------ tutorial_code/example2_2_frustrated_loop_anneal.py | 9 ++++----- 3 files changed, 13 insertions(+), 16 deletions(-) diff --git a/tutorial_code/example1_1_fm_loop_balancing.py b/tutorial_code/example1_1_fm_loop_balancing.py index 335efcb..a705716 100644 --- a/tutorial_code/example1_1_fm_loop_balancing.py +++ b/tutorial_code/example1_1_fm_loop_balancing.py @@ -244,7 +244,7 @@ def main( coupling: float = -0.2, num_iters: int = 100, num_iters_unshimmed_flux: int = 10, - max_num_emb: int = 1, + max_num_emb: float = float('Inf'), use_cache: bool = True, ) -> None: """Main function to run example. @@ -264,10 +264,9 @@ def main( of flux_biases. Defaults to 100. num_iters_unshimmed_J (int): Number of iterations without shimming of couplings. Defaults to 200. - max_num_emb (int): Maximum number of embeddings to use per programming. - Published tutorial data uses several parallel embeddings, but this - tutorial uses 1 (max_num_emb=1) by default to bypass the otherwise - slow search process. + max_num_emb (float): Maximum number of embeddings to use per programming. + Published tutorial data uses the maximum number the process can + accommodate. use_cache (bool): When True embeddings and data are read from (and saved to) local directories, repeated executions can reuse collected data. When False embeddings and data are recalculated on diff --git a/tutorial_code/example1_2_fm_loop_correlations.py b/tutorial_code/example1_2_fm_loop_correlations.py index ea07978..c84f8fe 100644 --- a/tutorial_code/example1_2_fm_loop_correlations.py +++ b/tutorial_code/example1_2_fm_loop_correlations.py @@ -243,7 +243,7 @@ def main( num_iters: int = 300, num_iters_unshimmed_flux: int = 100, num_iters_unshimmed_J: int = 200, - max_num_emb: int = 1, + max_num_emb: float = float('Inf'), use_cache: bool = True, ) -> None: """Main function to run example. @@ -259,10 +259,9 @@ def main( num_iters (int): Number of sequential programmings. num_iters_unshimmed_flux (int): Number of sequential programmings without flux shimming. num_iters_unshimmed_J (int): Number of sequential programmings without J shimming. - max_num_emb (int): Maximum number of embeddings to use per programming. - Published tutorial data uses several parallel embeddings, but this - tutorial uses 1 (max_num_emb=1) by default to bypass the otherwise - slow search process. + max_num_emb (float): Maximum number of embeddings to use per programming. + Published tutorial data uses the maximum number the process can + accommodate. use_cache (bool): When True embeddings and data are read from (and saved to) local directories, repeated executions can reuse collected data. When False embeddings and data are recalculated on @@ -309,4 +308,4 @@ def main( if __name__ == "__main__": - main(use_cache=False) + main() diff --git a/tutorial_code/example2_2_frustrated_loop_anneal.py b/tutorial_code/example2_2_frustrated_loop_anneal.py index a2fa037..33e09d7 100644 --- a/tutorial_code/example2_2_frustrated_loop_anneal.py +++ b/tutorial_code/example2_2_frustrated_loop_anneal.py @@ -275,7 +275,7 @@ def main( num_iters: int = 300, num_iters_unshimmed_flux: int = 100, num_iters_unshimmed_J: int = 200, - max_num_emb: int = 1, + max_num_emb: float = float('Inf'), use_cache: bool = True, ) -> None: """Main function to run example @@ -296,10 +296,9 @@ def main( iteratrions that doesn't shim flux_biases. Defaults to 100. num_iters_unshimmed_J (int): option to specify number of iterations that doesn't shim alpha_J. Defaults to 200. - max_num_emb (int): Maximum number of embeddings to use per programming. - Published tutorial data uses several parallel embeddings, but this - tutorial uses 1 (max_num_emb=1) by default to bypass the otherwise - slow search process. + max_num_emb (float): Maximum number of embeddings to use per programming. + Published tutorial data uses the maximum number the process can + accommodate. use_cache (bool): When True embeddings and data are read from (and saved to) local directories, repeated executions can reuse collected data. When False embeddings and data are recalculated on