Skip to content

Commit 3f4def7

Browse files
committed
fix formating
1 parent c65ef27 commit 3f4def7

File tree

8 files changed

+35
-24
lines changed

8 files changed

+35
-24
lines changed

src/ragas/executor.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
from concurrent.futures import ThreadPoolExecutor, as_completed
44
from dataclasses import dataclass, field
55

6-
import numpy as np
76
from tqdm.auto import tqdm
87

98

src/ragas/llms/base.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,11 +36,10 @@ def is_multiple_completion_supported(llm: BaseLanguageModel) -> bool:
3636

3737
@dataclass
3838
class BaseRagasLLM(ABC):
39-
4039
def get_temperature(self, n: int) -> float:
4140
"""Return the temperature to use for completion based on n."""
4241
return 0.3 if n > 1 else 1e-8
43-
42+
4443
@abstractmethod
4544
def generate_text(
4645
self,

src/ragas/testset/docstore.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import heapq
2-
import typing as t
32
import logging
3+
import typing as t
44
import uuid
55
from abc import ABC, abstractmethod
66
from dataclasses import dataclass, field
@@ -11,8 +11,8 @@
1111
import numpy.typing as npt
1212
from langchain.text_splitter import TextSplitter
1313
from langchain_core.documents import Document as LCDocument
14-
from llama_index.readers.schema import Document as LlamaindexDocument
1514
from langchain_core.pydantic_v1 import Field
15+
from llama_index.readers.schema import Document as LlamaindexDocument
1616

1717
from ragas.async_utils import run_async_tasks
1818
from ragas.embeddings.base import BaseRagasEmbeddings, embedding_factory

src/ragas/testset/evolutions.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,17 @@
1-
from abc import ABC, abstractmethod
2-
from dataclasses import dataclass, field
31
import logging
42
import typing as t
5-
from fsspec.exceptions import asyncio
6-
from random import choice, choices
3+
from abc import ABC, abstractmethod
4+
from dataclasses import dataclass, field
5+
from random import choice
76

7+
from fsspec.exceptions import asyncio
88
from langchain.prompts import ChatPromptTemplate
99
from numpy.random import default_rng
1010

1111
from ragas.llms import BaseRagasLLM
1212
from ragas.llms.json_load import load_as_json
1313
from ragas.llms.prompt import PromptValue
14-
from ragas.testset.docstore import Document, DocumentStore, Direction
14+
from ragas.testset.docstore import Direction, Document, DocumentStore, Node
1515
from ragas.testset.prompts import (
1616
FILTER_QUESTION,
1717
MULTICONTEXT_QUESTION,
@@ -20,7 +20,6 @@
2020
TABLE_QA,
2121
demonstrations,
2222
)
23-
from ragas.testset.docstore import Node
2423

2524
rng = default_rng()
2625
logger = logging.getLogger(__name__)

src/ragas/testset/generator.py

Lines changed: 25 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,12 @@
33

44
from langchain.chat_models import ChatOpenAI
55
from langchain.embeddings import OpenAIEmbeddings
6-
from ragas.llms import BaseRagasLLM, LangchainLLMWrapper
7-
from ragas.embeddings import BaseRagasEmbeddings
8-
from ragas.testset.docstore import DocumentStore, Document
9-
from ragas.testset.evolutions import SimpleEvolution
10-
116
from llama_index.readers.schema import Document as LlamaindexDocument
127

8+
from ragas.embeddings import BaseRagasEmbeddings
9+
from ragas.llms import BaseRagasLLM, LangchainLLMWrapper
10+
from ragas.testset.docstore import Document, DocumentStore, InMemoryDocumentStore
11+
1312

1413
@dataclass
1514
class TestsetGenerator:
@@ -24,15 +23,31 @@ def with_openai(
2423
generator_llm: str = "gpt-3.5-turbo",
2524
critic_llm: str = "gpt-4",
2625
embeddings: str = "text-embedding-ada-002",
26+
docstore: t.Optional[DocumentStore] = None,
27+
chunk_size: int = 512,
2728
) -> "TestsetGenerator":
2829
generator_llm_model = LangchainLLMWrapper(ChatOpenAI(model=generator_llm))
2930
critic_llm_model = LangchainLLMWrapper(ChatOpenAI(model=critic_llm))
3031
embeddings_model = OpenAIEmbeddings(model=embeddings)
31-
return cls(
32-
generator_llm=generator_llm_model,
33-
critic_llm=critic_llm_model,
34-
embeddings=embeddings_model,
35-
)
32+
if docstore is None:
33+
from langchain.text_splitter import TokenTextSplitter
34+
35+
splitter = TokenTextSplitter(chunk_size=chunk_size, chunk_overlap=0)
36+
docstore = InMemoryDocumentStore(splitter)
37+
return cls(
38+
generator_llm=generator_llm_model,
39+
critic_llm=critic_llm_model,
40+
# TODO: remove type ignore after fixing embeddigns
41+
embeddings=embeddings_model, # type: ignore
42+
docstore=docstore,
43+
)
44+
else:
45+
return cls(
46+
generator_llm=generator_llm_model,
47+
critic_llm=critic_llm_model,
48+
embeddings=embeddings_model, # type: ignore
49+
docstore=docstore,
50+
)
3651

3752
def generate_with_llamaindex_docs(self, documents: t.Sequence[LlamaindexDocument]):
3853
# chunk documents and add to docstore

src/ragas/testset/testset_generator.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -544,4 +544,4 @@ def generate(
544544
count += 1
545545
pbar.update(count)
546546

547-
return TestDataset(test_data=samples)
547+
return TestDataset(test_data=samples)

tests/e2e/test_adaptation.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
21
from ragas import adapt
32
from ragas.metrics import context_recall
43

tests/unit/testset_generator/test_docstore.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
import pytest
66
from langchain_core.embeddings import Embeddings
77

8-
from ragas.testset.docstore import InMemoryDocumentStore, Node, Direction
8+
from ragas.testset.docstore import Direction, InMemoryDocumentStore, Node
99

1010

1111
def test_adjacent_nodes():

0 commit comments

Comments
 (0)