Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 56 additions & 14 deletions tests/integration/icontracts/conftest.py
Original file line number Diff line number Diff line change
@@ -1,48 +1,92 @@
import pytest
import os
from dotenv import load_dotenv
from typing import Any

from tests.common.request import payload, post_request_localhost
from tests.common.response import has_success_status


def get_provider_config() -> dict[str, str]:
"""
Returns provider configuration for non-mock integration tests.

Override via environment variables:
TEST_PROVIDER - provider name (default: openai)
TEST_PROVIDER_MODEL - model name (default: gpt-4o)

Example (local Ollama):
TEST_PROVIDER=ollama TEST_PROVIDER_MODEL=llama3 pytest ...
"""
return {
"provider": os.getenv("TEST_PROVIDER", "openai"),
"model": os.getenv("TEST_PROVIDER_MODEL", "gpt-4o"),
}


def get_mock_provider_config() -> dict[str, str]:
"""
Returns provider configuration for mock (TEST_WITH_MOCK_LLMS=true) tests.

Override via environment variables:
TEST_MOCK_PROVIDER - provider name (default: openrouter)
TEST_MOCK_MODEL - model name (default: @preset/rally-testnet-gpt-5-1)
TEST_MOCK_API_KEY_ENV_VAR - env var holding the API key (default: OPENROUTERAPIKEY)
TEST_MOCK_API_URL - base API URL (default: https://openrouter.ai/api)
"""
return {
"provider": os.getenv("TEST_MOCK_PROVIDER", "openrouter"),
"model": os.getenv("TEST_MOCK_MODEL", "@preset/rally-testnet-gpt-5-1"),
"api_key_env_var": os.getenv("TEST_MOCK_API_KEY_ENV_VAR", "OPENROUTERAPIKEY"),
"api_url": os.getenv("TEST_MOCK_API_URL", "https://openrouter.ai/api"),
}


@pytest.fixture
def setup_validators():
created_validator_addresses = []

def _setup(mock_response=None):
def _setup(mock_response: Any = None) -> None:
nonlocal created_validator_addresses
if mock_llms():
mock_cfg = get_mock_provider_config()
# Mock mode: create validators with specific mock_response for this test
for _ in range(5):
result = post_request_localhost(
payload(
"sim_createValidator",
8,
"openrouter",
"@preset/rally-testnet-gpt-5-1",
mock_cfg["provider"],
mock_cfg["model"],
{"temperature": 0.75, "max_tokens": 500},
"openai-compatible",
{
"api_key_env_var": "OPENROUTERAPIKEY",
"api_url": "https://openrouter.ai/api",
"mock_response": mock_response if mock_response else {},
"api_key_env_var": mock_cfg["api_key_env_var"],
"api_url": mock_cfg["api_url"],
"mock_response": mock_response if mock_response is not None else {},
},
)
).json()
assert has_success_status(result)
created_validator_addresses.append(result["result"]["address"])
else:
# Non-mock mode: only create validators if not enough exist
cfg = get_provider_config()
# Non-mock mode: only create the validators that are still missing
validators_result = post_request_localhost(
payload("sim_getAllValidators")
).json()
assert has_success_status(validators_result)
existing_validators = validators_result.get("result", [])
if len(existing_validators) < 5:
validators_to_create = 5 - len(existing_validators)
if validators_to_create > 0:
result = post_request_localhost(
payload(
"sim_createRandomValidators", 5, 8, 12, ["openai"], ["gpt-4o"]
"sim_createRandomValidators",
validators_to_create,
8,
12,
[cfg["provider"]],
[cfg["model"]],
)
).json()
assert has_success_status(result)
Expand All @@ -61,12 +105,10 @@ def _setup(mock_response=None):
has_success_status(delete_result)


def mock_llms():
def mock_llms() -> bool:
env_var = os.getenv("TEST_WITH_MOCK_LLMS", "false") # default no mocking
if env_var == "true":
return True
return False
return env_var.lower() == "true"


def pytest_configure(config):
def pytest_configure(config: Any) -> None:
load_dotenv(override=True)