diff --git a/.github/workflows/run-e2e-nua-prod.yml b/.github/workflows/run-e2e-nua-prod.yml index 3bafb1d..09ea068 100644 --- a/.github/workflows/run-e2e-nua-prod.yml +++ b/.github/workflows/run-e2e-nua-prod.yml @@ -6,7 +6,7 @@ on: - main schedule: # run every at every saturdat at 12 - - cron: '25 4 * * 6' + - cron: '25 4 * * *' jobs: test: runs-on: ubuntu-22.04 diff --git a/nua/e2e/regional/test_llm_generate.py b/nua/e2e/regional/test_llm_generate.py index 5bc2cc1..2583b62 100644 --- a/nua/e2e/regional/test_llm_generate.py +++ b/nua/e2e/regional/test_llm_generate.py @@ -37,3 +37,23 @@ def test_llm_generate_nuclia_everest_v1(nua_config): "Which is the capital of Catalonia?", model="nuclia-everest-v1" ) assert "Barcelona" in generated.answer + + +def test_llm_generate_nuclia_mistral_small(nua_config): + if "stashify" not in nua_config: + # Lets only test on stashify as everest is not on prod + return + + np = NucliaPredict() + generated = np.generate("Which is the capital of Catalonia?", model="mistral") + assert "Barcelona" in generated.answer + + +def test_llm_generate_nuclia_mistral_large(nua_config): + if "stashify" not in nua_config: + # Lets only test on stashify as everest is not on prod + return + + np = NucliaPredict() + generated = np.generate("Which is the capital of Catalonia?", model="azure-mistral") + assert "Barcelona" in generated.answer diff --git a/nua/e2e/regional/test_llm_rag.py b/nua/e2e/regional/test_llm_rag.py index 94e9131..fae7fdb 100644 --- a/nua/e2e/regional/test_llm_rag.py +++ b/nua/e2e/regional/test_llm_rag.py @@ -70,3 +70,37 @@ def test_llm_rag_nuclia_everest_v1(nua_config): model="nuclia-everest-v1", ) assert "Eudald" in generated.answer + + +def test_llm_rag_nuclia_mistral_small(nua_config): + if "stashify" not in nua_config: + # Lets only test on stashify as everest is not on prod + return + + np = NucliaPredict() + generated = np.rag( + question="Which is the CEO of Nuclia?", + context=[ + "Nuclia CTO is Ramon Navarro", + "Eudald Camprubí is CEO at the same company as Ramon Navarro", + ], + model="mistral", + ) + assert "Eudald" in generated.answer + + +def test_llm_rag_nuclia_mistral_large(nua_config): + if "stashify" not in nua_config: + # Lets only test on stashify as everest is not on prod + return + + np = NucliaPredict() + generated = np.rag( + question="Which is the CEO of Nuclia?", + context=[ + "Nuclia CTO is Ramon Navarro", + "Eudald Camprubí is CEO at the same company as Ramon Navarro", + ], + model="azure-mistral", + ) + assert "Eudald" in generated.answer diff --git a/nua/e2e/regional/test_llm_summarize.py b/nua/e2e/regional/test_llm_summarize.py index 97aa0ef..7ad942a 100644 --- a/nua/e2e/regional/test_llm_summarize.py +++ b/nua/e2e/regional/test_llm_summarize.py @@ -56,3 +56,23 @@ def test_summarize_nuclia_everest_v1(nua_config): embed = np.summarize(DATA, model="nuclia-everest-v1") assert "Manresa" in embed.summary assert "Barcelona" in embed.summary + + +def test_summarize_nuclia_mistral_small(nua_config): + if "stashify" not in nua_config: + # Lets only test on stashify as everest is not on prod + return + np = NucliaPredict() + embed = np.summarize(DATA, model="mistral") + assert "Manresa" in embed.summary + assert "Barcelona" in embed.summary + + +def test_summarize_nuclia_mistral_large(nua_config): + if "stashify" not in nua_config: + # Lets only test on stashify as everest is not on prod + return + np = NucliaPredict() + embed = np.summarize(DATA, model="azure-mistral") + assert "Manresa" in embed.summary + assert "Barcelona" in embed.summary