-
Notifications
You must be signed in to change notification settings - Fork 2k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #185 from stanford-oval/costorm-integration
Costorm integration
Showing
45 changed files
with
5,191 additions
and
270 deletions.
There are no files selected for viewing
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,241 @@ | ||
""" | ||
Co-STORM pipeline powered by GPT-4o/4o-mini and Bing search engine. | ||
You need to set up the following environment variables to run this script: | ||
- OPENAI_API_KEY: OpenAI API key | ||
- OPENAI_API_TYPE: OpenAI API type (e.g., 'openai' or 'azure') | ||
- AZURE_API_BASE: Azure API base URL if using Azure API | ||
- AZURE_API_VERSION: Azure API version if using Azure API | ||
- BING_SEARCH_API_KEY: Biang search API key; BING_SEARCH_API_KEY: Bing Search API key, SERPER_API_KEY: Serper API key, BRAVE_API_KEY: Brave API key, or TAVILY_API_KEY: Tavily API key | ||
Output will be structured as below | ||
args.output_dir/ | ||
log.json # Log of information-seeking conversation | ||
report.txt # Final article generated | ||
""" | ||
|
||
import os | ||
import json | ||
from argparse import ArgumentParser | ||
from knowledge_storm.collaborative_storm.engine import CollaborativeStormLMConfigs, RunnerArgument, CoStormRunner | ||
from knowledge_storm.collaborative_storm.modules.callback import LocalConsolePrintCallBackHandler | ||
from knowledge_storm.lm import OpenAIModel, AzureOpenAIModel | ||
from knowledge_storm.logging_wrapper import LoggingWrapper | ||
from knowledge_storm.rm import YouRM, BingSearch, BraveRM, SerperRM, DuckDuckGoSearchRM, TavilySearchRM, SearXNG | ||
from knowledge_storm.utils import load_api_key | ||
|
||
|
||
def main(args): | ||
load_api_key(toml_file_path='secrets.toml') | ||
lm_config: CollaborativeStormLMConfigs = CollaborativeStormLMConfigs() | ||
openai_kwargs = { | ||
"api_key": os.getenv("OPENAI_API_KEY"), | ||
"api_provider": "openai", | ||
"temperature": 1.0, | ||
"top_p": 0.9, | ||
"api_base": None, | ||
} if os.getenv('OPENAI_API_TYPE') == 'openai' else { | ||
"api_key": os.getenv("AZURE_API_KEY"), | ||
"temperature": 1.0, | ||
"top_p": 0.9, | ||
"api_base": os.getenv("AZURE_API_BASE"), | ||
"api_version": os.getenv("AZURE_API_VERSION"), | ||
} | ||
|
||
ModelClass = OpenAIModel if os.getenv('OPENAI_API_TYPE') == 'openai' else AzureOpenAIModel | ||
# If you are using Azure service, make sure the model name matches your own deployed model name. | ||
# The default name here is only used for demonstration and may not match your case. | ||
gpt_4o_mini_model_name = 'gpt-4o-mini' | ||
gpt_4o_model_name = 'gpt-4o' | ||
if os.getenv('OPENAI_API_TYPE') == 'azure': | ||
openai_kwargs['api_base'] = os.getenv('AZURE_API_BASE') | ||
openai_kwargs['api_version'] = os.getenv('AZURE_API_VERSION') | ||
|
||
# STORM is a LM system so different components can be powered by different models. | ||
# For a good balance between cost and quality, you can choose a cheaper/faster model for conv_simulator_lm | ||
# which is used to split queries, synthesize answers in the conversation. We recommend using stronger models | ||
# for outline_gen_lm which is responsible for organizing the collected information, and article_gen_lm | ||
# which is responsible for generating sections with citations. | ||
question_answering_lm = ModelClass(model=gpt_4o_model_name, max_tokens=1000, **openai_kwargs) | ||
discourse_manage_lm = ModelClass(model=gpt_4o_model_name, max_tokens=500, **openai_kwargs) | ||
utterance_polishing_lm = ModelClass(model=gpt_4o_model_name, max_tokens=2000, **openai_kwargs) | ||
warmstart_outline_gen_lm = ModelClass(model=gpt_4o_model_name, max_tokens=500, **openai_kwargs) | ||
question_asking_lm = ModelClass(model=gpt_4o_model_name, max_tokens=300, **openai_kwargs) | ||
knowledge_base_lm = ModelClass(model=gpt_4o_model_name, max_tokens=1000, **openai_kwargs) | ||
|
||
lm_config.set_question_answering_lm(question_answering_lm) | ||
lm_config.set_discourse_manage_lm(discourse_manage_lm) | ||
lm_config.set_utterance_polishing_lm(utterance_polishing_lm) | ||
lm_config.set_warmstart_outline_gen_lm(warmstart_outline_gen_lm) | ||
lm_config.set_question_asking_lm(question_asking_lm) | ||
lm_config.set_knowledge_base_lm(knowledge_base_lm) | ||
|
||
topic = input('Topic: ') | ||
runner_argument = RunnerArgument( | ||
topic=topic, | ||
retrieve_top_k=args.retrieve_top_k, | ||
max_search_queries=args.max_search_queries, | ||
total_conv_turn=args.total_conv_turn, | ||
max_search_thread=args.max_search_thread, | ||
max_search_queries_per_turn=args.max_search_queries_per_turn, | ||
warmstart_max_num_experts=args.warmstart_max_num_experts, | ||
warmstart_max_turn_per_experts=args.warmstart_max_turn_per_experts, | ||
warmstart_max_thread=args.warmstart_max_thread, | ||
max_thread_num=args.max_thread_num, | ||
max_num_round_table_experts=args.max_num_round_table_experts, | ||
moderator_override_N_consecutive_answering_turn=args.moderator_override_N_consecutive_answering_turn, | ||
node_expansion_trigger_count=args.node_expansion_trigger_count) | ||
logging_wrapper = LoggingWrapper(lm_config) | ||
callback_handler = LocalConsolePrintCallBackHandler() if args.enable_log_print else None | ||
|
||
# Co-STORM is a knowledge curation system which consumes information from the retrieval module. | ||
# Currently, the information source is the Internet and we use search engine API as the retrieval module. | ||
match args.retriever: | ||
case 'bing': | ||
rm = BingSearch(bing_search_api=os.getenv('BING_SEARCH_API_KEY'), k=runner_argument.retrieve_top_k) | ||
case 'you': | ||
rm = YouRM(ydc_api_key=os.getenv('YDC_API_KEY'), k=runner_argument.retrieve_top_k) | ||
case 'brave': | ||
rm = BraveRM(brave_search_api_key=os.getenv('BRAVE_API_KEY'), k=runner_argument.retrieve_top_k) | ||
case 'duckduckgo': | ||
rm = DuckDuckGoSearchRM(k=runner_argument.retrieve_top_k, safe_search='On', region='us-en') | ||
case 'serper': | ||
rm = SerperRM(serper_search_api_key=os.getenv('SERPER_API_KEY'), query_params={'autocorrect': True, 'num': 10, 'page': 1}) | ||
case 'tavily': | ||
rm = TavilySearchRM(tavily_search_api_key=os.getenv('TAVILY_API_KEY'), k=runner_argument.retrieve_top_k, include_raw_content=True) | ||
case 'searxng': | ||
rm = SearXNG(searxng_api_key=os.getenv('SEARXNG_API_KEY'), k=runner_argument.retrieve_top_k) | ||
case _: | ||
raise ValueError(f'Invalid retriever: {args.retriever}. Choose either "bing", "you", "brave", "duckduckgo", "serper", "tavily", or "searxng"') | ||
|
||
costorm_runner = CoStormRunner(lm_config=lm_config, | ||
runner_argument=runner_argument, | ||
logging_wrapper=logging_wrapper, | ||
rm=rm, | ||
callback_handler=callback_handler) | ||
|
||
# warm start the system | ||
costorm_runner.warm_start() | ||
|
||
# Below is an example of how users may interact with Co-STORM to seek information together | ||
# In actual deployment, we suggest allowing the user to decide whether to observe the agent utterance or inject a turn | ||
|
||
# observing Co-STORM LLM agent utterance for 5 turns | ||
for _ in range(1): | ||
conv_turn = costorm_runner.step() | ||
print(f"**{conv_turn.role}**: {conv_turn.utterance}\n") | ||
|
||
# active engaging by injecting your utterance | ||
your_utterance = input('Your utterance: ') | ||
costorm_runner.step(user_utterance=your_utterance) | ||
|
||
# continue observing | ||
conv_turn = costorm_runner.step() | ||
print(f"**{conv_turn.role}**: {conv_turn.utterance}\n") | ||
|
||
# generate report | ||
costorm_runner.knowledge_base.reogranize() | ||
article = costorm_runner.generate_report() | ||
|
||
# save results | ||
os.makedirs(args.output_dir, exist_ok=True) | ||
|
||
# Save article | ||
with open(os.path.join(args.output_dir, "report.md"), "w") as f: | ||
f.write(article) | ||
|
||
# Save logging | ||
log_dump = costorm_runner.dump_logging_and_reset() | ||
with open(os.path.join(args.output_dir, "log.json"), "w") as f: | ||
json.dump(log_dump, f, indent=2) | ||
|
||
|
||
if __name__ == '__main__': | ||
parser = ArgumentParser() | ||
# global arguments | ||
parser.add_argument('--output-dir', type=str, default='./results/co-storm', | ||
help='Directory to store the outputs.') | ||
parser.add_argument('--retriever', type=str, choices=['bing', 'you', 'brave', 'serper', 'duckduckgo', 'tavily', 'searxng'], | ||
help='The search engine API to use for retrieving information.') | ||
# hyperparameters for co-storm | ||
parser.add_argument( | ||
'--retrieve_top_k', | ||
type=int, | ||
default=10, | ||
help='Retrieve top k results for each query in retriever.' | ||
) | ||
parser.add_argument( | ||
'--max_search_queries', | ||
type=int, | ||
default=2, | ||
help='Maximum number of search queries to consider for each question.' | ||
) | ||
parser.add_argument( | ||
'--total_conv_turn', | ||
type=int, | ||
default=20, | ||
help='Maximum number of turns in conversation.' | ||
) | ||
parser.add_argument( | ||
'--max_search_thread', | ||
type=int, | ||
default=5, | ||
help='Maximum number of parallel threads for retriever.' | ||
) | ||
parser.add_argument( | ||
'--max_search_queries_per_turn', | ||
type=int, | ||
default=3, | ||
help='Maximum number of search queries to consider in each turn.' | ||
) | ||
parser.add_argument( | ||
'--warmstart_max_num_experts', | ||
type=int, | ||
default=3, | ||
help='Max number of experts in perspective-guided QA during warm start.' | ||
) | ||
parser.add_argument( | ||
'--warmstart_max_turn_per_experts', | ||
type=int, | ||
default=2, | ||
help='Max number of turns per perspective during warm start.' | ||
) | ||
parser.add_argument( | ||
'--warmstart_max_thread', | ||
type=int, | ||
default=3, | ||
help='Max number of threads for parallel perspective-guided QA during warm start.' | ||
) | ||
parser.add_argument( | ||
'--max_thread_num', | ||
type=int, | ||
default=10, | ||
help=("Maximum number of threads to use. " | ||
"Consider reducing it if you keep getting 'Exceed rate limit' errors when calling the LM API.") | ||
) | ||
parser.add_argument( | ||
'--max_num_round_table_experts', | ||
type=int, | ||
default=2, | ||
help='Max number of active experts in round table discussion.' | ||
) | ||
parser.add_argument( | ||
'--moderator_override_N_consecutive_answering_turn', | ||
type=int, | ||
default=3, | ||
help=('Number of consecutive expert answering turns before the moderator overrides the conversation.') | ||
) | ||
parser.add_argument( | ||
'--node_expansion_trigger_count', | ||
type=int, | ||
default=10, | ||
help='Trigger node expansion for nodes that contain more than N snippets.' | ||
) | ||
|
||
# Boolean flags | ||
parser.add_argument( | ||
'--enable_log_print', | ||
action='store_true', | ||
help='If set, enable console log print.' | ||
) | ||
|
||
main(parser.parse_args()) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
File renamed without changes.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
File renamed without changes.
File renamed without changes.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,7 +1,10 @@ | ||
from .storm_wiki.engine import ( | ||
STORMWikiLMConfigs, | ||
STORMWikiRunnerArguments, | ||
STORMWikiRunner, | ||
) | ||
from .storm_wiki import * | ||
from .collaborative_storm import * | ||
from .encoder import * | ||
from .interface import * | ||
from .lm import * | ||
from .rm import * | ||
from .utils import * | ||
from .dataclass import * | ||
|
||
__version__ = "0.2.8" | ||
__version__ = "1.0.0" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
from .modules import * | ||
from .engine import * |
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,8 @@ | ||
from .article_generation import * | ||
from .grounded_question_answering import * | ||
from .grounded_question_generation import * | ||
from .information_insertion_module import * | ||
from .simulate_user import * | ||
from .warmstart_hierarchical_chat import * | ||
from .knowledge_base_summary import * | ||
from .costorm_expert_utterance_generator import * |
123 changes: 123 additions & 0 deletions
123
knowledge_storm/collaborative_storm/modules/article_generation.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,123 @@ | ||
import dspy | ||
from concurrent.futures import ThreadPoolExecutor, as_completed | ||
from typing import Set, Union | ||
|
||
from .collaborative_storm_utils import clean_up_section | ||
from ...dataclass import KnowledgeBase, KnowledgeNode | ||
|
||
|
||
class ArticleGenerationModule(dspy.Module): | ||
"""Use the information collected from the information-seeking conversation to write a section.""" | ||
|
||
def __init__( | ||
self, | ||
engine: Union[dspy.dsp.LM, dspy.dsp.HFModel], | ||
): | ||
super().__init__() | ||
self.write_section = dspy.Predict(WriteSection) | ||
self.engine = engine | ||
|
||
def _get_cited_information_string( | ||
self, | ||
all_citation_index: Set[int], | ||
knowledge_base: KnowledgeBase, | ||
max_words: int = 1500, | ||
): | ||
information = [] | ||
cur_word_count = 0 | ||
for index in sorted(list(all_citation_index)): | ||
info = knowledge_base.info_uuid_to_info_dict[index] | ||
snippet = info.snippets[0] | ||
info_text = f"[{index}]: {snippet} (Question: {info.meta['question']}. Query: {info.meta['query']})" | ||
cur_snippet_length = len(info_text.split()) | ||
if cur_snippet_length + cur_word_count > max_words: | ||
break | ||
cur_word_count += cur_snippet_length | ||
information.append(info_text) | ||
return "\n".join(information) | ||
|
||
def gen_section( | ||
self, topic: str, node: KnowledgeNode, knowledge_base: KnowledgeBase | ||
): | ||
if node is None or len(node.content) == 0: | ||
return "" | ||
if ( | ||
node.synthesize_output is not None | ||
and node.synthesize_output | ||
and not node.need_regenerate_synthesize_output | ||
): | ||
return node.synthesize_output | ||
all_citation_index = node.collect_all_content() | ||
information = self._get_cited_information_string( | ||
all_citation_index=all_citation_index, knowledge_base=knowledge_base | ||
) | ||
with dspy.settings.context(lm=self.engine): | ||
synthesize_output = clean_up_section( | ||
self.write_section( | ||
topic=topic, info=information, section=node.name | ||
).output | ||
) | ||
node.synthesize_output = synthesize_output | ||
node.need_regenerate_synthesize_output = False | ||
return node.synthesize_output | ||
|
||
def forward(self, knowledge_base: KnowledgeBase): | ||
all_nodes = knowledge_base.collect_all_nodes() | ||
node_to_paragraph = {} | ||
|
||
# Define a function to generate paragraphs for nodes | ||
def _node_generate_paragraph(node): | ||
node_gen_paragraph = self.gen_section( | ||
topic=knowledge_base.topic, node=node, knowledge_base=knowledge_base | ||
) | ||
lines = node_gen_paragraph.split("\n") | ||
if lines[0].strip().replace("*", "").replace("#", "") == node.name: | ||
lines = lines[1:] | ||
node_gen_paragraph = "\n".join(lines) | ||
path = " -> ".join(node.get_path_from_root()) | ||
return path, node_gen_paragraph | ||
|
||
with ThreadPoolExecutor(max_workers=5) as executor: | ||
# Submit all tasks | ||
future_to_node = { | ||
executor.submit(_node_generate_paragraph, node): node | ||
for node in all_nodes | ||
} | ||
|
||
# Collect the results as they complete | ||
for future in as_completed(future_to_node): | ||
path, node_gen_paragraph = future.result() | ||
node_to_paragraph[path] = node_gen_paragraph | ||
|
||
def helper(cur_root, level): | ||
to_return = [] | ||
if cur_root is not None: | ||
hash_tag = "#" * level + " " | ||
cur_path = " -> ".join(cur_root.get_path_from_root()) | ||
node_gen_paragraph = node_to_paragraph[cur_path] | ||
to_return.append(f"{hash_tag}{cur_root.name}\n{node_gen_paragraph}") | ||
for child in cur_root.children: | ||
to_return.extend(helper(child, level + 1)) | ||
return to_return | ||
|
||
to_return = [] | ||
for child in knowledge_base.root.children: | ||
to_return.extend(helper(child, level=1)) | ||
|
||
return "\n".join(to_return) | ||
|
||
|
||
class WriteSection(dspy.Signature): | ||
"""Write a Wikipedia section based on the collected information. You will be given the topic, the section you are writing and relevant information. | ||
Each information will be provided with the raw content along with question and query lead to that information. | ||
Here is the format of your writing: | ||
Use [1], [2], ..., [n] in line (for example, "The capital of the United States is Washington, D.C.[1][3]."). You DO NOT need to include a References or Sources section to list the sources at the end. | ||
""" | ||
|
||
info = dspy.InputField(prefix="The collected information:\n", format=str) | ||
topic = dspy.InputField(prefix="The topic of the page: ", format=str) | ||
section = dspy.InputField(prefix="The section you need to write: ", format=str) | ||
output = dspy.OutputField( | ||
prefix="Write the section with proper inline citations (Start your writing. Don't include the page title, section name, or try to write other sections. Do not start the section with topic name.):\n", | ||
format=str, | ||
) |
110 changes: 110 additions & 0 deletions
110
knowledge_storm/collaborative_storm/modules/callback.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,110 @@ | ||
from typing import List | ||
from ...interface import Information | ||
|
||
|
||
class BaseCallbackHandler: | ||
"""Base callback handler to manage callbacks from the Co-STORM pipeline.""" | ||
|
||
def on_turn_policy_planning_start(self, **kwargs): | ||
"""Run when the turn policy planning begins, before deciding the direction or goal for the next conversation turn.""" | ||
pass | ||
|
||
def on_expert_action_planning_start(self, **kwargs): | ||
"""Run when the expert action planning begins, preparing to determine the actions that each expert should take.""" | ||
pass | ||
|
||
def on_expert_action_planning_end(self, **kwargs): | ||
"""Run when the expert action planning ends, after deciding the actions that each expert should take.""" | ||
pass | ||
|
||
def on_expert_information_collection_start(self, **kwargs): | ||
"""Run when the expert information collection starts, start gathering all necessary data from selected sources.""" | ||
pass | ||
|
||
def on_expert_information_collection_end(self, info: List[Information], **kwargs): | ||
"""Run when the expert information collection ends, after gathering all necessary data from selected sources.""" | ||
pass | ||
|
||
def on_expert_utterance_generation_end(self, **kwargs): | ||
"""Run when the expert utterance generation ends, before creating responses or statements from each expert.""" | ||
pass | ||
|
||
def on_expert_utterance_polishing_start(self, **kwargs): | ||
"""Run when the expert utterance polishing begins, to refine and improve the clarity and coherence of generated content.""" | ||
pass | ||
|
||
def on_mindmap_insert_start(self, **kwargs): | ||
"""Run when the process of inserting new information into the mindmap starts.""" | ||
pass | ||
|
||
def on_mindmap_insert_end(self, **kwargs): | ||
"""Run when the process of inserting new information into the mindmap ends.""" | ||
pass | ||
|
||
def on_mindmap_reorg_start(self, **kwargs): | ||
"""Run when the reorganization of the mindmap begins, to restructure and optimize the flow of information.""" | ||
pass | ||
|
||
def on_expert_list_update_start(self, **kwargs): | ||
"""Run when the expert list update starts, to modify or refresh the list of active experts.""" | ||
pass | ||
|
||
def on_article_generation_start(self, **kwargs): | ||
"""Run when the article generation process begins, to compile and format the final article content.""" | ||
pass | ||
|
||
def on_warmstart_update(self, message, **kwargs): | ||
"""Run when the warm start process has update.""" | ||
pass | ||
|
||
|
||
class LocalConsolePrintCallBackHandler(BaseCallbackHandler): | ||
def __init__(self): | ||
pass | ||
|
||
def on_turn_policy_planning_start(self, **kwargs): | ||
"""Run when the turn policy planning begins, before deciding the direction or goal for the next conversation turn.""" | ||
print("Start planning next expert; inspect mind map; inspect system state.") | ||
|
||
def on_expert_action_planning_start(self, **kwargs): | ||
"""Run when the expert action planning begins, preparing to determine the actions that each expert should take.""" | ||
print("Reviewing discourse history; Deciding utterance intent.") | ||
|
||
def on_expert_information_collection_start(self, **kwargs): | ||
"""Run when the expert information collection ends, after gathering all necessary data from selected sources.""" | ||
print("Start searching with the search engine; browsing collected information.") | ||
|
||
def on_expert_information_collection_end(self, info: List[Information], **kwargs): | ||
"""Run when the expert information collection ends, after gathering all necessary data from selected sources.""" | ||
if info: | ||
urls = [i.url for i in info] | ||
information_string = "\n".join([f"Finish browsing {url}" for url in urls]) | ||
print(information_string) | ||
|
||
def on_expert_utterance_generation_end(self, **kwargs): | ||
"""Run when the expert utterance generation ends, before creating responses or statements from each expert.""" | ||
print("Finish generating utterance from collected information.") | ||
|
||
def on_expert_utterance_polishing_start(self, **kwargs): | ||
"""Run when the expert utterance polishing begins, to refine and improve the clarity and coherence of generated content.""" | ||
print("Start polishing utterance.") | ||
|
||
def on_mindmap_insert_start(self, **kwargs): | ||
"""Run when the process of inserting new information into the mindmap starts.""" | ||
print("Start inserting information into mind map.") | ||
|
||
def on_mindmap_insert_end(self, **kwargs): | ||
"""Run when the process of inserting new information into the mindmap ends.""" | ||
print("Finish inserting information into mind map.") | ||
|
||
def on_mindmap_reorg_start(self, **kwargs): | ||
"""Run when the reorganization of the mindmap begins, to restructure and optimize the flow of information.""" | ||
print("Start re-organizing mind map.") | ||
|
||
def on_expert_list_update_start(self, **kwargs): | ||
"""Run when the expert list update starts, to modify or refresh the list of active experts.""" | ||
print("Start updating expert candidates.") | ||
|
||
def on_warmstart_update(self, message, **kwargs): | ||
"""Run when the warm start process has update.""" | ||
print(f"Warm start update: {message}") |
381 changes: 381 additions & 0 deletions
381
knowledge_storm/collaborative_storm/modules/co_storm_agents.py
Large diffs are not rendered by default.
Oops, something went wrong.
261 changes: 261 additions & 0 deletions
261
knowledge_storm/collaborative_storm/modules/collaborative_storm_utils.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,261 @@ | ||
import dspy | ||
import os | ||
import re | ||
import sys | ||
import toml | ||
from typing import List, Tuple, Dict, Optional, TYPE_CHECKING | ||
|
||
if TYPE_CHECKING: | ||
from ..engine import RunnerArgument | ||
from ...interface import Information, Retriever, LMConfigs | ||
from ...logging_wrapper import LoggingWrapper | ||
from ...rm import BingSearch | ||
|
||
|
||
def extract_storm_info_snippet(info: Information, snippet_index: int) -> Information: | ||
""" | ||
Constructs a new Information instance with only the specified snippet index. | ||
Args: | ||
storm_info (Information): The original Information instance. | ||
snippet_index (int): The index of the snippet to retain. | ||
Returns: | ||
Information: A new Information instance with only the specified snippet. | ||
""" | ||
if snippet_index < 0 or snippet_index >= len(info.snippets): | ||
raise ValueError("Snippet index out of range") | ||
|
||
new_snippets = [info.snippets[snippet_index]] | ||
new_storm_info = Information( | ||
info.url, info.description, new_snippets, info.title, info.meta | ||
) | ||
return new_storm_info | ||
|
||
|
||
def format_search_results( | ||
searched_results: List[Information], | ||
info_max_num_words: int = 1000, | ||
mode: str = "brief", | ||
) -> Tuple[str, Dict[int, Information]]: | ||
""" | ||
Constructs a string from a list of search results with a specified word limit and returns a mapping of indices to Information. | ||
Args: | ||
searched_results (List[Information]): List of Information objects to process. | ||
info_max_num_words (int, optional): Maximum number of words allowed in the output string. Defaults to 1000. | ||
mode (str, optional): Mode of summarization. 'brief' takes only the first snippet of each Information. | ||
'extensive' adds snippets iteratively until the word limit is reached. Defaults to 'brief'. | ||
Returns: | ||
Tuple[str, Dict[int, Information]]: | ||
- Formatted string with search results, constrained by the word limit. | ||
- Dictionary mapping indices to the corresponding Information objects. | ||
""" | ||
total_length = 0 | ||
|
||
extracted_snippet_queue = [] | ||
max_snippets = ( | ||
max(len(info.snippets) for info in searched_results) if searched_results else 0 | ||
) | ||
max_snippets = 1 if mode == "brief" else max_snippets | ||
abort = False | ||
included_snippets = set() | ||
for i in range(max_snippets): | ||
for info in searched_results: | ||
if i < len(info.snippets) and not abort: | ||
cur_snippet = info.snippets[i] | ||
cur_snippet_len = len(info.snippets[i].split()) | ||
if total_length + cur_snippet_len > info_max_num_words: | ||
abort = True | ||
break | ||
if cur_snippet not in included_snippets: | ||
included_snippets.add(cur_snippet) | ||
info = extract_storm_info_snippet(info, snippet_index=i) | ||
extracted_snippet_queue.append(info) | ||
total_length += cur_snippet_len | ||
output = [] | ||
index_mapping = {} | ||
for idx, info in enumerate(extracted_snippet_queue): | ||
output.append(f"[{idx + 1}]: {info.snippets[0]}") | ||
index_mapping[idx + 1] = info | ||
assert -1 not in index_mapping | ||
return "\n".join(output), index_mapping | ||
|
||
|
||
def extract_cited_storm_info( | ||
response: str, index_to_storm_info: Dict[int, Information] | ||
) -> Dict[int, Information]: | ||
""" | ||
Extracts a sub-dictionary of Information instances that are cited in the response. | ||
Args: | ||
response (str): The response string containing inline citations like [1], [2], etc. | ||
index_to_storm_info (Dict[int, Information]): A dictionary mapping indices to Information instances. | ||
Returns: | ||
Dict[int, Information]: A sub-dictionary with only the indices that appear in the response. | ||
""" | ||
cited_indices = set(map(int, re.findall(r"\[(\d+)\]", response))) | ||
cited_storm_info = { | ||
index: info | ||
for index, info in index_to_storm_info.items() | ||
if index in cited_indices | ||
} | ||
return cited_storm_info | ||
|
||
|
||
def trim_output_after_hint(response: str, hint: str) -> str: | ||
""" | ||
Trims the output string to only keep the substring after the given hint (not including the hint). | ||
Args: | ||
response (str): The original output string. | ||
hint (str): The hint string after which the substring should be kept. | ||
Returns: | ||
str: The trimmed output string, or the original string if the hint is not found. | ||
""" | ||
if hint in response: | ||
start_index = response.find(hint) + len(hint) | ||
return response[start_index:].strip() | ||
return response.strip("\n") | ||
|
||
|
||
def separate_citations(text: str) -> str: | ||
""" | ||
Separates multiple citations within square brackets into individual citations. | ||
Args: | ||
text (str): The input string containing citations. | ||
Returns: | ||
str: The string with separated citations. | ||
""" | ||
|
||
# Define a function to process each match | ||
def replace_citations(match): | ||
citations = match.group(1).split(",") | ||
return "".join(f"[{citation.strip()}]" for citation in citations) | ||
|
||
# Use regular expressions to find and replace citations | ||
pattern = re.compile(r"\[(\d+(?:,\s*\d+)*)\]") | ||
return pattern.sub(replace_citations, text) | ||
|
||
|
||
def extract_and_remove_citations(text: str) -> Tuple[str, List[int]]: | ||
""" | ||
Removes single inline citations from the input string and returns the modified string and a list of citation integers. | ||
Args: | ||
text (str): The input string containing citations. | ||
Returns: | ||
Tuple[str, List[int]]: The string after removal of citations and a list of citation integers. | ||
""" | ||
citations = [] | ||
|
||
# Define a function to process each match | ||
def extract_citation(match): | ||
citation = int(match.group(1)) | ||
citations.append(citation) | ||
return "" | ||
|
||
# Use regular expressions to find and replace citations | ||
pattern = re.compile(r"\[(\d+)\]") | ||
modified_text = pattern.sub(extract_citation, text) | ||
|
||
return modified_text, citations | ||
|
||
|
||
def keep_first_and_last_paragraph(text: str) -> str: | ||
""" | ||
Processes the input text to keep the first and last paragraphs and replace | ||
the middle paragraphs with '[content omitted due to space limit]'. | ||
Args: | ||
text (str): The input text containing paragraphs separated by '\n\n'. | ||
Returns: | ||
str: The processed text. | ||
""" | ||
paragraphs = text.split("\n\n") | ||
|
||
if len(paragraphs) <= 3: | ||
return text | ||
|
||
first_paragraph = paragraphs[0] | ||
last_paragraph = "\n\n".join(paragraphs[-2:]) | ||
return ( | ||
f"{first_paragraph}\n\n[content omitted due to space limit]\n\n{last_paragraph}" | ||
) | ||
|
||
|
||
def clean_up_section(text): | ||
"""Clean up a section: | ||
1. Remove uncompleted sentences (usually due to output token limitation). | ||
2. Deduplicate individual groups of citations. | ||
3. Remove unnecessary summary.""" | ||
|
||
paragraphs = text.split("\n") | ||
output_paragraphs = [] | ||
summary_sec_flag = False | ||
for p in paragraphs: | ||
p = p.strip() | ||
if len(p) == 0: | ||
continue | ||
if not p.startswith("#"): | ||
p = separate_citations(p) | ||
if summary_sec_flag: | ||
if p.startswith("#"): | ||
summary_sec_flag = False | ||
else: | ||
continue | ||
if ( | ||
p.startswith("Overall") | ||
or p.startswith("In summary") | ||
or p.startswith("In conclusion") | ||
): | ||
continue | ||
if "# Summary" in p or "# Conclusion" in p: | ||
summary_sec_flag = True | ||
continue | ||
output_paragraphs.append(p) | ||
|
||
return "\n\n".join(output_paragraphs) # Join with '\n\n' for markdown format. | ||
|
||
|
||
def load_api_key(toml_file_path): | ||
try: | ||
with open(toml_file_path, "r") as file: | ||
data = toml.load(file) | ||
except FileNotFoundError: | ||
print(f"File not found: {toml_file_path}", file=sys.stderr) | ||
return | ||
except toml.TomlDecodeError: | ||
print(f"Error decoding TOML file: {toml_file_path}", file=sys.stderr) | ||
return | ||
# Set environment variables | ||
for key, value in data.items(): | ||
os.environ[key] = str(value) | ||
|
||
|
||
def _get_answer_question_module_instance( | ||
lm_config: LMConfigs, | ||
runner_argument: "RunnerArgument", | ||
logging_wrapper: LoggingWrapper, | ||
rm: Optional[dspy.Retrieve] = None, | ||
): | ||
from .grounded_question_answering import AnswerQuestionModule | ||
|
||
# configure retriever | ||
if rm is None: | ||
rm = BingSearch(k=runner_argument.retrieve_top_k) | ||
retriever = Retriever(rm=rm, max_thread=runner_argument.max_search_thread) | ||
# return AnswerQuestionModule instance | ||
return AnswerQuestionModule( | ||
retriever=retriever, | ||
max_search_queries=runner_argument.max_search_queries, | ||
question_answering_lm=lm_config.question_answering_lm, | ||
logging_wrapper=logging_wrapper, | ||
) |
160 changes: 160 additions & 0 deletions
160
knowledge_storm/collaborative_storm/modules/costorm_expert_utterance_generator.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,160 @@ | ||
import dspy | ||
from typing import Union | ||
|
||
from .callback import BaseCallbackHandler | ||
from .collaborative_storm_utils import ( | ||
trim_output_after_hint, | ||
extract_and_remove_citations, | ||
keep_first_and_last_paragraph, | ||
) | ||
|
||
from .grounded_question_answering import AnswerQuestionModule | ||
from .grounded_question_generation import ConvertUtteranceStyle | ||
from ...dataclass import ConversationTurn | ||
from ...logging_wrapper import LoggingWrapper | ||
|
||
|
||
class GenExpertActionPlanning(dspy.Signature): | ||
""" | ||
You are an invited speaker in the round table conversation. Your task is to make a very short note to your assistant to help you prepare for your turn in the conversation. | ||
You will be given the topic we are discussing, your expertise, and the conversation history. | ||
Take a look at conversation history, especially last few turns, then let your assistant prepare the material for you with one of following ways. | ||
1. Original Question: Initiates a new question to other speakers. | ||
2. Further Details: Provides additional information. | ||
3. Information Request: Requests information from other speakers. | ||
4. Potential Answer: Offers a possible solution or answer. | ||
Strictly follow this format: [type of contribution]: [one sentence description]. For example, Original Question: [description] | ||
""" | ||
|
||
topic = dspy.InputField(prefix="topic of discussion: ", format=str) | ||
expert = dspy.InputField(prefix="You are inivited as: ", format=str) | ||
summary = dspy.InputField(prefix="Discussion history: \n", format=str) | ||
last_utterance = dspy.InputField( | ||
prefix="Last utterance in the conversation: \n", format=str | ||
) | ||
resposne = dspy.OutputField( | ||
prefix="Now give your note. Start with one of [Original Question, Further Details, Information Request, Potential Answer] with one sentence description\n", | ||
format=str, | ||
) | ||
|
||
|
||
class CoStormExpertUtteranceGenerationModule(dspy.Module): | ||
def __init__( | ||
self, | ||
action_planning_lm: Union[dspy.dsp.LM, dspy.dsp.HFModel], | ||
utterance_polishing_lm: Union[dspy.dsp.LM, dspy.dsp.HFModel], | ||
answer_question_module: AnswerQuestionModule, | ||
logging_wrapper: LoggingWrapper, | ||
callback_handler: BaseCallbackHandler = None, | ||
): | ||
self.action_planning_lm = action_planning_lm | ||
self.utterance_polishing_lm = utterance_polishing_lm | ||
self.expert_action = dspy.Predict(GenExpertActionPlanning) | ||
self.change_style = dspy.Predict(ConvertUtteranceStyle) | ||
self.answer_question_module = answer_question_module | ||
self.logging_wrapper = logging_wrapper | ||
self.callback_handler = callback_handler | ||
|
||
def parse_action(self, action): | ||
action_types = [ | ||
"Original Question", | ||
"Further Details", | ||
"Information Request", | ||
"Potential Answer", | ||
] | ||
for action_type in action_types: | ||
if f"{action_type}:" in action: | ||
return action_type, trim_output_after_hint(action, f"{action_type}:") | ||
elif f"[{action_type}]:" in action: | ||
return action_type, trim_output_after_hint(action, f"[{action_type}]:") | ||
return "Undefined", "" | ||
|
||
def polish_utterance( | ||
self, conversation_turn: ConversationTurn, last_conv_turn: ConversationTurn | ||
): | ||
# change utterance style | ||
action_type = conversation_turn.utterance_type | ||
with self.logging_wrapper.log_event( | ||
"RoundTableConversationModule.ConvertUtteranceStyle" | ||
): | ||
with dspy.settings.context( | ||
lm=self.utterance_polishing_lm, show_guidelines=False | ||
): | ||
action_string = ( | ||
f"{action_type} about: {conversation_turn.claim_to_make}" | ||
) | ||
if action_type in ["Original Question", "Information Request"]: | ||
action_string = f"{action_type}" | ||
last_expert_utterance_wo_citation, _ = extract_and_remove_citations( | ||
last_conv_turn.utterance | ||
) | ||
trimmed_last_expert_utterance = keep_first_and_last_paragraph( | ||
last_expert_utterance_wo_citation | ||
) | ||
utterance = self.change_style( | ||
expert=conversation_turn.role, | ||
action=action_string, | ||
prev=trimmed_last_expert_utterance, | ||
content=conversation_turn.raw_utterance, | ||
).utterance | ||
conversation_turn.utterance = utterance | ||
|
||
def forward( | ||
self, | ||
topic: str, | ||
current_expert: str, | ||
conversation_summary: str, | ||
last_conv_turn: ConversationTurn, | ||
): | ||
last_utterance, _ = extract_and_remove_citations(last_conv_turn.utterance) | ||
if last_conv_turn.utterance_type in [ | ||
"Original Question", | ||
"Information Request", | ||
]: | ||
action_type = "Potential Answer" | ||
action_content = last_utterance | ||
else: | ||
with self.logging_wrapper.log_event( | ||
"CoStormExpertUtteranceGenerationModule: GenExpertActionPlanning" | ||
): | ||
with dspy.settings.context( | ||
lm=self.action_planning_lm, show_guidelines=False | ||
): | ||
action = self.expert_action( | ||
topic=topic, | ||
expert=current_expert, | ||
summary=conversation_summary, | ||
last_utterance=last_utterance, | ||
).resposne | ||
action_type, action_content = self.parse_action(action) | ||
|
||
if self.callback_handler is not None: | ||
self.callback_handler.on_expert_action_planning_end() | ||
# get response | ||
conversation_turn = ConversationTurn( | ||
role=current_expert, raw_utterance="", utterance_type=action_type | ||
) | ||
|
||
if action_type == "Undefined": | ||
raise Exception(f"unexpected output: {action}") | ||
elif action_type in ["Further Details", "Potential Answer"]: | ||
with self.logging_wrapper.log_event( | ||
"RoundTableConversationModule: QuestionAnswering" | ||
): | ||
grounded_answer = self.answer_question_module( | ||
topic=topic, | ||
question=action_content, | ||
mode="brief", | ||
style="conversational and concise", | ||
callback_handler=self.callback_handler, | ||
) | ||
conversation_turn.claim_to_make = action_content | ||
conversation_turn.raw_utterance = grounded_answer.response | ||
conversation_turn.queries = grounded_answer.queries | ||
conversation_turn.raw_retrieved_info = grounded_answer.raw_retrieved_info | ||
conversation_turn.cited_info = grounded_answer.cited_info | ||
elif action_type in ["Original Question", "Information Request"]: | ||
conversation_turn.raw_utterance = action_content | ||
|
||
return dspy.Prediction(conversation_turn=conversation_turn) |
83 changes: 83 additions & 0 deletions
83
knowledge_storm/collaborative_storm/modules/expert_generation.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,83 @@ | ||
import dspy | ||
import re | ||
from typing import Union | ||
|
||
|
||
class GenerateExpertGeneral(dspy.Signature): | ||
"""You need to select a group of diverse experts who will be suitable to be invited to a roundtable discussion on the given topic. | ||
Each expert should represent a different perspective, role, or affiliation related to this topic. | ||
You can use the background information provided about the topic for inspiration. For each expert, add a description of their expertise and what they will focus on during the discussion. | ||
No need to include speakers name in the output. | ||
Strictly follow format below: | ||
1. [speaker 1 role]: [speaker 1 short description] | ||
2. [speaker 2 role]: [speaker 2 short description] | ||
""" | ||
|
||
topic = dspy.InputField(prefix="Topic of interest:", format=str) | ||
background_info = dspy.InputField( | ||
prefix="Background information about the topic:\n", format=str | ||
) | ||
topN = dspy.InputField(prefix="Number of speakers needed: ", format=str) | ||
experts = dspy.OutputField(format=str) | ||
|
||
|
||
class GenerateExpertWithFocus(dspy.Signature): | ||
""" | ||
You need to select a group of speakers who will be suitable to have roundtable discussion on the [topic] of specific [focus]. | ||
You may consider inviting speakers having opposite stands on the topic; speakers representing different interest parties; Ensure that the selected speakers are directly connected to the specific context and scenario provided. | ||
For example, if the discussion focus is about a recent event at a specific university, consider inviting students, faculty members, journalists covering the event, university officials, and local community members. | ||
Use the background information provided about the topic for inspiration. For each speaker, add a description of their interests and what they will focus on during the discussion. | ||
No need to include speakers name in the output. | ||
Strictly follow format below: | ||
1. [speaker 1 role]: [speaker 1 short description] | ||
2. [speaker 2 role]: [speaker 2 short description] | ||
""" | ||
|
||
topic = dspy.InputField(prefix="Topic of interest:", format=str) | ||
background_info = dspy.InputField(prefix="Background information:\n", format=str) | ||
focus = dspy.InputField(prefix="Discussion focus: ", format=str) | ||
topN = dspy.InputField(prefix="Number of speakers needed: ", format=str) | ||
experts = dspy.OutputField(format=str) | ||
|
||
|
||
class GenerateExpertModule(dspy.Module): | ||
def __init__(self, engine: Union[dspy.dsp.LM, dspy.dsp.HFModel]): | ||
self.engine = engine | ||
self.generate_expert_general = dspy.Predict(GenerateExpertGeneral) | ||
self.generate_expert_w_focus = dspy.ChainOfThought(GenerateExpertWithFocus) | ||
|
||
def trim_background(self, background: str, max_words: int = 100): | ||
words = background.split() | ||
cur_len = len(words) | ||
if cur_len <= max_words: | ||
return background | ||
trimmed_words = words[: min(cur_len, max_words)] | ||
trimmed_background = " ".join(trimmed_words) | ||
return f"{trimmed_background} [rest content omitted]." | ||
|
||
def forward( | ||
self, topic: str, num_experts: int, background_info: str = "", focus: str = "" | ||
): | ||
with dspy.settings.context(lm=self.engine, show_guidelines=False): | ||
if not focus: | ||
output = self.generate_expert_general( | ||
topic=topic, background_info=background_info, topN=num_experts | ||
).experts | ||
else: | ||
background_info = self.trim_background( | ||
background=background_info, max_words=100 | ||
) | ||
output = self.generate_expert_w_focus( | ||
topic=topic, | ||
background_info=background_info, | ||
focus=focus, | ||
topN=num_experts, | ||
).experts | ||
output = output.replace("*", "").replace("[", "").replace("]", "") | ||
expert_list = [] | ||
for s in output.split("\n"): | ||
match = re.search(r"\d+\.\s*(.*)", s) | ||
if match: | ||
expert_list.append(match.group(1)) | ||
expert_list = [expert.strip() for expert in expert_list if expert.strip()] | ||
return dspy.Prediction(experts=expert_list, raw_output=output) |
163 changes: 163 additions & 0 deletions
163
knowledge_storm/collaborative_storm/modules/grounded_question_answering.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,163 @@ | ||
import dspy | ||
from typing import Union, List | ||
|
||
from .callback import BaseCallbackHandler | ||
from .collaborative_storm_utils import ( | ||
trim_output_after_hint, | ||
format_search_results, | ||
extract_cited_storm_info, | ||
separate_citations, | ||
) | ||
from ...logging_wrapper import LoggingWrapper | ||
from ...utils import ArticleTextProcessing | ||
from ...interface import Information | ||
|
||
|
||
class QuestionToQuery(dspy.Signature): | ||
"""You want to answer the question or support a claim using Google search. What do you type in the search box? | ||
The question is raised in a round table discussion on a topic. The question may or may not focus on the topic itself. | ||
Write the queries you will use in the following format: | ||
- query 1 | ||
- query 2 | ||
... | ||
- query n""" | ||
|
||
topic = dspy.InputField(prefix="Topic context:", format=str) | ||
question = dspy.InputField( | ||
prefix="I want to collect information about: ", format=str | ||
) | ||
queries = dspy.OutputField(prefix="Queries: \n", format=str) | ||
|
||
|
||
class AnswerQuestion(dspy.Signature): | ||
"""You are an expert who can use information effectively. You have gathered the related information and will now use the information to form a response. | ||
Make your response as informative as possible and make sure every sentence is supported by the gathered information. | ||
If [Gathered information] is not directly related to the [Topic] and [Question], provide the most relevant answer you can based on the available information, and explain any limitations or gaps. | ||
Use [1], [2], ..., [n] in line (for example, "The capital of the United States is Washington, D.C.[1][3]."). | ||
You DO NOT need to include a References or Sources section to list the sources at the end. The style of writing should be formal. | ||
""" | ||
|
||
topic = dspy.InputField(prefix="Topic you are discussing about:", format=str) | ||
question = dspy.InputField(prefix="You want to provide insight on: ", format=str) | ||
info = dspy.InputField(prefix="Gathered information:\n", format=str) | ||
style = dspy.InputField(prefix="Style of your response should be:", format=str) | ||
answer = dspy.OutputField( | ||
prefix="Now give your response. (Try to use as many different sources as possible and do not hallucinate.)", | ||
format=str, | ||
) | ||
|
||
|
||
class AnswerQuestionModule(dspy.Module): | ||
def __init__( | ||
self, | ||
retriever: dspy.Retrieve, | ||
max_search_queries: int, | ||
question_answering_lm: Union[dspy.dsp.LM, dspy.dsp.HFModel], | ||
logging_wrapper: LoggingWrapper, | ||
): | ||
super().__init__() | ||
self.question_answering_lm = question_answering_lm | ||
self.question_to_query = dspy.Predict(QuestionToQuery) | ||
self.answer_question = dspy.Predict(AnswerQuestion) | ||
self.retriever = retriever | ||
self.max_search_queries = max_search_queries | ||
self.logging_wrapper = logging_wrapper | ||
|
||
def retrieve_information(self, topic, question): | ||
# decompose question to queries | ||
with self.logging_wrapper.log_event( | ||
f"AnswerQuestionModule.question_to_query ({hash(question)})" | ||
): | ||
with dspy.settings.context(lm=self.question_answering_lm): | ||
queries = self.question_to_query(topic=topic, question=question).queries | ||
queries = trim_output_after_hint(queries, hint="Queries:") | ||
queries = [ | ||
q.replace("-", "").strip().strip('"').strip('"').strip() | ||
for q in queries.split("\n") | ||
] | ||
queries = queries[: self.max_search_queries] | ||
self.logging_wrapper.add_query_count(count=len(queries)) | ||
with self.logging_wrapper.log_event( | ||
f"AnswerQuestionModule.retriever.retrieve ({hash(question)})" | ||
): | ||
# retrieve information using retriever | ||
searched_results: List[Information] = self.retriever.retrieve( | ||
list(set(queries)), exclude_urls=[] | ||
) | ||
# update storm information meta to include the question | ||
for storm_info in searched_results: | ||
storm_info.meta["question"] = question | ||
return queries, searched_results | ||
|
||
def forward( | ||
self, | ||
topic: str, | ||
question: str, | ||
mode: str = "brief", | ||
style: str = "conversational", | ||
callback_handler: BaseCallbackHandler = None, | ||
): | ||
""" | ||
Processes a topic and question to generate a response with relevant information and citations. | ||
Args: | ||
topic (str): The topic of interest. | ||
question (str): The specific question related to the topic. | ||
mode (str, optional): Mode of summarization. 'brief' takes only the first snippet of each Information. | ||
'extensive' adds snippets iteratively until the word limit is reached. Defaults to 'brief'. | ||
Returns: | ||
dspy.Prediction: An object containing the following: | ||
- question (str): the question to answer | ||
- queries (List[str]): List of query strings used for information retrieval. | ||
- raw_retrieved_info (List[Information]): List of Information instances retrieved. | ||
- cited_info (Dict[int, Information]): Dictionary of cited Information instances, indexed by their citation number. | ||
- response (str): The generated response string with inline citations. | ||
""" | ||
# retrieve information | ||
if callback_handler is not None: | ||
callback_handler.on_expert_information_collection_start() | ||
queries, searched_results = self.retrieve_information( | ||
topic=topic, question=question | ||
) | ||
if callback_handler is not None: | ||
callback_handler.on_expert_information_collection_end(searched_results) | ||
# format information string for answer generation | ||
info_text, index_to_information_mapping = format_search_results( | ||
searched_results, mode=mode | ||
) | ||
answer = "Sorry, there is insufficient information to answer the question." | ||
# generate answer to the question | ||
if info_text: | ||
with self.logging_wrapper.log_event( | ||
f"AnswerQuestionModule.answer_question ({hash(question)})" | ||
): | ||
with dspy.settings.context( | ||
lm=self.question_answering_lm, show_guidelines=False | ||
): | ||
answer = self.answer_question( | ||
topic=topic, question=question, info=info_text, style=style | ||
).answer | ||
answer = ArticleTextProcessing.remove_uncompleted_sentences_with_citations( | ||
answer | ||
) | ||
answer = trim_output_after_hint( | ||
answer, | ||
hint="Now give your response. (Try to use as many different sources as possible and do not hallucinate.)", | ||
) | ||
# enforce single citation index bracket. [1, 2] -> [1][2] | ||
answer = separate_citations(answer) | ||
if callback_handler is not None: | ||
callback_handler.on_expert_utterance_generation_end() | ||
# construct cited search result | ||
cited_searched_results = extract_cited_storm_info( | ||
response=answer, index_to_storm_info=index_to_information_mapping | ||
) | ||
|
||
return dspy.Prediction( | ||
question=question, | ||
queries=queries, | ||
raw_retrieved_info=searched_results, | ||
cited_info=cited_searched_results, | ||
response=answer, | ||
) |
113 changes: 113 additions & 0 deletions
113
knowledge_storm/collaborative_storm/modules/grounded_question_generation.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,113 @@ | ||
""" | ||
This module handles question generation within the Co-STORM framework, specifically designed to support the Moderator role. | ||
The Moderator generates insightful, thought-provoking questions that introduce new directions into the conversation. | ||
By leveraging uncited or unused snippets of information retrieved during the discussion, the Moderator ensures the conversation remains dynamic and avoids repetitive or overly niche topics. | ||
For more detailed information, refer to Section 3.5 of the Co-STORM paper: https://www.arxiv.org/pdf/2408.15232. | ||
""" | ||
|
||
import dspy | ||
from typing import List, Union | ||
|
||
from .collaborative_storm_utils import ( | ||
format_search_results, | ||
extract_and_remove_citations, | ||
keep_first_and_last_paragraph, | ||
extract_cited_storm_info, | ||
) | ||
from ...dataclass import ConversationTurn, KnowledgeBase | ||
from ...interface import Information | ||
|
||
|
||
class KnowledgeBaseSummmary(dspy.Signature): | ||
"""Your job is to give brief summary of what's been discussed in a roundtable conversation. Contents are themantically organized into hierarchical sections. | ||
You will be presented with these sections where "#" denotes level of section. | ||
""" | ||
|
||
topic = dspy.InputField(prefix="topic: ", format=str) | ||
structure = dspy.InputField(prefix="Tree structure: \n", format=str) | ||
output = dspy.OutputField(prefix="Now give brief summary:\n", format=str) | ||
|
||
|
||
class ConvertUtteranceStyle(dspy.Signature): | ||
""" | ||
You are an invited speaker in the round table conversation. | ||
Your task is to make the question or the response more conversational and engaging to facilicate the flow of conversation. | ||
Note that this is ongoing conversation so no need to have welcoming and concluding words. Previous speaker utterance is provided only for making the conversation more natural. | ||
Note that do not hallucinate and keep the citation index like [1] as it is. Also, | ||
""" | ||
|
||
expert = dspy.InputField(prefix="You are inivited as: ", format=str) | ||
action = dspy.InputField( | ||
prefix="You want to contribute to conversation by: ", format=str | ||
) | ||
prev = dspy.InputField(prefix="Previous speaker said: ", format=str) | ||
content = dspy.InputField( | ||
prefix="Question or response you want to say: ", format=str | ||
) | ||
utterance = dspy.OutputField( | ||
prefix="Your utterance (keep the information as much as you can with citations, prefer shorter answers without loss of information): ", | ||
format=str, | ||
) | ||
|
||
|
||
class GroundedQuestionGeneration(dspy.Signature): | ||
"""Your job is to find next discussion focus in a roundtable conversation. You will be given previous conversation summary and some information that might assist you discover new discussion focus. | ||
Note that the new discussion focus should bring new angle and perspective to the discussion and avoid repetition. The new discussion focus should be grounded on the available information and push the boundaries of the current discussion for broader exploration. | ||
The new discussion focus should have natural flow from last utterance in the conversation. | ||
Use [1][2] in line to ground your question. | ||
""" | ||
|
||
topic = dspy.InputField(prefix="topic: ", format=str) | ||
summary = dspy.InputField(prefix="Discussion history: \n", format=str) | ||
information = dspy.InputField(prefix="Available information: \n", format=str) | ||
last_utterance = dspy.InputField( | ||
prefix="Last utterance in the conversation: \n", format=str | ||
) | ||
output = dspy.OutputField( | ||
prefix="Now give next discussion focus in the format of one sentence question:\n", | ||
format=str, | ||
) | ||
|
||
|
||
class GroundedQuestionGenerationModule(dspy.Module): | ||
def __init__(self, engine: Union[dspy.dsp.LM, dspy.dsp.HFModel]): | ||
self.engine = engine | ||
self.gen_focus = dspy.Predict(GroundedQuestionGeneration) | ||
self.polish_style = dspy.Predict(ConvertUtteranceStyle) | ||
self.gen_summary = dspy.Predict(KnowledgeBaseSummmary) | ||
|
||
def forward( | ||
self, | ||
topic: str, | ||
knowledge_base: KnowledgeBase, | ||
last_conv_turn: ConversationTurn, | ||
unused_snippets: List[Information], | ||
): | ||
information, index_to_information_mapping = format_search_results( | ||
unused_snippets, info_max_num_words=1000 | ||
) | ||
summary = knowledge_base.get_knowledge_base_summary() | ||
last_utterance, _ = extract_and_remove_citations(last_conv_turn.utterance) | ||
with dspy.settings.context(lm=self.engine, show_guidelines=False): | ||
raw_utterance = self.gen_focus( | ||
topic=topic, | ||
summary=summary, | ||
information=information, | ||
last_utterance=last_utterance, | ||
).output | ||
utterance = self.polish_style( | ||
expert="Roundtable conversation moderator", | ||
action="Raising a new question by natural transit from previous utterance.", | ||
prev=keep_first_and_last_paragraph(last_utterance), | ||
content=raw_utterance, | ||
).utterance | ||
cited_searched_results = extract_cited_storm_info( | ||
response=utterance, index_to_storm_info=index_to_information_mapping | ||
) | ||
return dspy.Prediction( | ||
raw_utterance=raw_utterance, | ||
utterance=utterance, | ||
cited_info=cited_searched_results, | ||
) |
422 changes: 422 additions & 0 deletions
422
knowledge_storm/collaborative_storm/modules/information_insertion_module.py
Large diffs are not rendered by default.
Oops, something went wrong.
32 changes: 32 additions & 0 deletions
32
knowledge_storm/collaborative_storm/modules/knowledge_base_summary.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,32 @@ | ||
import dspy | ||
from typing import Union | ||
from ...dataclass import KnowledgeBase | ||
|
||
|
||
class KnowledgeBaseSummmary(dspy.Signature): | ||
"""Your job is to give brief summary of what's been discussed in a roundtable conversation. Contents are themantically organized into hierarchical sections. | ||
You will be presented with these sections where "#" denotes level of section. | ||
""" | ||
|
||
topic = dspy.InputField(prefix="topic: ", format=str) | ||
structure = dspy.InputField(prefix="Tree structure: \n", format=str) | ||
output = dspy.OutputField(prefix="Now give brief summary:\n", format=str) | ||
|
||
|
||
class KnowledgeBaseSummaryModule(dspy.Module): | ||
def __init__(self, engine: Union[dspy.dsp.LM, dspy.dsp.HFModel]): | ||
self.engine = engine | ||
self.gen_summary = dspy.Predict(KnowledgeBaseSummmary) | ||
|
||
def forward(self, knowledge_base: KnowledgeBase): | ||
structure = knowledge_base.get_node_hierarchy_string( | ||
include_indent=False, | ||
include_full_path=False, | ||
include_hash_tag=True, | ||
include_node_content_count=False, | ||
) | ||
with dspy.settings.context(lm=self.engine, show_guidelines=False): | ||
summary = self.gen_summary( | ||
topic=knowledge_base.topic, structure=structure | ||
).output | ||
return summary |
37 changes: 37 additions & 0 deletions
37
knowledge_storm/collaborative_storm/modules/simulate_user.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,37 @@ | ||
import dspy | ||
from typing import List, Union | ||
|
||
from .collaborative_storm_utils import extract_and_remove_citations | ||
from ...dataclass import ConversationTurn | ||
from ...storm_wiki.modules.knowledge_curation import AskQuestionWithPersona | ||
|
||
|
||
class GenSimulatedUserUtterance(dspy.Module): | ||
def __init__(self, engine: Union[dspy.dsp.LM, dspy.dsp.HFModel]): | ||
self.engine = engine | ||
self.ask_qeustion = dspy.Predict(AskQuestionWithPersona) | ||
|
||
def gen_conv_history_string(self, conversation_turns: List[ConversationTurn]): | ||
conv_history = [] | ||
total_turns = len(conversation_turns) | ||
|
||
for i, turn in enumerate(conversation_turns): | ||
utterance, _ = extract_and_remove_citations(turn.utterance) | ||
if i >= total_turns - 4: | ||
conv_history.append(f"{turn.role}: {utterance}") | ||
else: | ||
if turn.claim_to_make: | ||
conv_history.append(f"{turn.role}: {turn.claim_to_make}") | ||
else: | ||
conv_history.append(f"{turn.role}: {utterance}") | ||
|
||
return "\n".join(conv_history) | ||
|
||
def forward(self, topic: str, intent: str, conv_history: List[ConversationTurn]): | ||
conv_history_string = self.gen_conv_history_string(conv_history) | ||
with dspy.settings.context(lm=self.engine, show_guidelines=False): | ||
return self.ask_qeustion( | ||
topic=topic, | ||
persona=f"researcher with interest in {intent}", | ||
conv=conv_history_string, | ||
).question |
408 changes: 408 additions & 0 deletions
408
knowledge_storm/collaborative_storm/modules/warmstart_hierarchical_chat.py
Large diffs are not rendered by default.
Oops, something went wrong.
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,169 @@ | ||
import requests | ||
import os | ||
from typing import List, Tuple, Union, Optional, Dict, Literal | ||
import numpy as np | ||
|
||
from concurrent.futures import ThreadPoolExecutor, as_completed | ||
|
||
|
||
class EmbeddingModel: | ||
def __init__(): | ||
pass | ||
|
||
def get_embedding(self, text: str) -> Tuple[np.ndarray, int]: | ||
raise Exception("Not implemented") | ||
|
||
|
||
class OpenAIEmbeddingModel(EmbeddingModel): | ||
def __init__(self, model: str = "text-embedding-3-small", api_key: str = None): | ||
if not api_key: | ||
self.api_key = os.getenv("OPENAI_API_KEY") | ||
|
||
self.url = "https://api.openai.com/v1/embeddings" | ||
self.headers = { | ||
"Content-Type": "application/json", | ||
"Authorization": f"Bearer {self.api_key}", | ||
} | ||
self.model = model | ||
|
||
def get_embedding(self, text: str) -> Tuple[np.ndarray, int]: | ||
data = {"input": text, "model": "text-embedding-3-small"} | ||
|
||
response = requests.post(self.url, headers=self.headers, json=data) | ||
if response.status_code == 200: | ||
data = response.json() | ||
embedding = np.array(data["data"][0]["embedding"]) | ||
token = data["usage"]["prompt_tokens"] | ||
return embedding, token | ||
else: | ||
response.raise_for_status() | ||
|
||
|
||
class OpenAIEmbeddingModel(EmbeddingModel): | ||
def __init__(self, model: str = "text-embedding-3-small", api_key: str = None): | ||
if not api_key: | ||
api_key = os.getenv("OPENAI_API_KEY") | ||
|
||
self.url = "https://api.openai.com/v1/embeddings" | ||
self.headers = { | ||
"Content-Type": "application/json", | ||
"Authorization": f"Bearer {api_key}", | ||
} | ||
self.model = model | ||
|
||
def get_embedding(self, text: str) -> Tuple[np.ndarray, int]: | ||
data = {"input": text, "model": self.model} | ||
|
||
response = requests.post(self.url, headers=self.headers, json=data) | ||
if response.status_code == 200: | ||
data = response.json() | ||
embedding = np.array(data["data"][0]["embedding"]) | ||
token = data["usage"]["prompt_tokens"] | ||
return embedding, token | ||
else: | ||
response.raise_for_status() | ||
|
||
|
||
class TogetherEmbeddingModel: | ||
def __init__(self, model: str = "BAAI/bge-large-en-v1.5", api_key: str = None): | ||
import together | ||
|
||
self.model = model | ||
if not api_key: | ||
api_key = os.getenv("TOGETHER_API_KEY") | ||
self.together_client = together.Together(api_key=api_key) | ||
|
||
def get_embedding(self, text: str) -> Tuple[np.ndarray, int]: | ||
response = self.together_client.embeddings.create(input=text, model=self.model) | ||
return response.data[0].embedding, -1 | ||
|
||
|
||
class AzureOpenAIEmbeddingModel: | ||
def __init__(self, model: str = "text-embedding-3-small", api_key: str = None): | ||
from openai import AzureOpenAI | ||
|
||
self.model = model | ||
if not api_key: | ||
api_key = os.getenv("AZURE_API_KEY") | ||
|
||
self.client = AzureOpenAI( | ||
api_key=api_key, | ||
api_version=os.getenv("AZURE_API_VERSION"), | ||
azure_endpoint=os.getenv("AZURE_API_BASE"), | ||
) | ||
|
||
def get_embedding(self, text: str) -> Tuple[np.ndarray, int]: | ||
response = self.client.embeddings.create(input=text, model=self.model) | ||
|
||
embedding = np.array(response.data[0].embedding) | ||
token = response.usage.prompt_tokens | ||
return embedding, token | ||
|
||
|
||
def get_text_embeddings( | ||
texts: Union[str, List[str]], | ||
max_workers: int = 5, | ||
embedding_cache: Optional[Dict[str, np.ndarray]] = None, | ||
) -> Tuple[np.ndarray, int]: | ||
""" | ||
Get text embeddings using OpenAI's text-embedding-3-small model. | ||
Args: | ||
texts (Union[str, List[str]]): A single text string or a list of text strings to embed. | ||
max_workers (int): The maximum number of workers for parallel processing. | ||
api_key (str): The API key for accessing OpenAI's services. | ||
embedding_cache (Optional[Dict[str, np.ndarray]]): A cache to store previously computed embeddings. | ||
Returns: | ||
Tuple[np.ndarray, int]: The 2D array of embeddings and the total token usage. | ||
""" | ||
embedding_model = None | ||
encoder_type = os.getenv("ENCODER_API_TYPE") | ||
if encoder_type and encoder_type == "openai": | ||
embedding_model = OpenAIEmbeddingModel() | ||
elif encoder_type and encoder_type == "azure": | ||
embedding_model = AzureOpenAIEmbeddingModel() | ||
elif encoder_type == encoder_type == "together": | ||
embedding_model = TogetherEmbeddingModel() | ||
else: | ||
raise Exception( | ||
"No valid encoder type is provided. Check <repo root>/secrets.toml for the field ENCODER_API_TYPE" | ||
) | ||
|
||
def fetch_embedding(text: str) -> Tuple[str, np.ndarray, int]: | ||
if embedding_cache is not None and text in embedding_cache: | ||
return ( | ||
text, | ||
embedding_cache[text], | ||
0, | ||
) # Returning 0 tokens since no API call is made | ||
embedding, token_usage = embedding_model.get_embedding(text) | ||
return text, embedding, token_usage | ||
|
||
if isinstance(texts, str): | ||
_, embedding, tokens = fetch_embedding(texts) | ||
return np.array(embedding), tokens | ||
|
||
embeddings = [] | ||
total_tokens = 0 | ||
|
||
with ThreadPoolExecutor(max_workers=max_workers) as executor: | ||
futures = {executor.submit(fetch_embedding, text): text for text in texts} | ||
|
||
for future in as_completed(futures): | ||
try: | ||
text, embedding, tokens = future.result() | ||
embeddings.append((text, embedding, tokens)) | ||
total_tokens += tokens | ||
except Exception as e: | ||
print(f"An error occurred for text: {futures[future]}") | ||
print(e) | ||
|
||
# Sort results to match the order of the input texts | ||
embeddings.sort(key=lambda x: texts.index(x[0])) | ||
if embedding_cache is not None: | ||
for text, embedding, _ in embeddings: | ||
embedding_cache[text] = embedding | ||
embeddings = [result[1] for result in embeddings] | ||
|
||
return np.array(embeddings), total_tokens |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,212 @@ | ||
from contextlib import contextmanager | ||
import time | ||
import pytz | ||
from datetime import datetime | ||
|
||
# Define California timezone | ||
CALIFORNIA_TZ = pytz.timezone("America/Los_Angeles") | ||
|
||
|
||
class EventLog: | ||
def __init__(self, event_name): | ||
self.event_name = event_name | ||
self.start_time = None | ||
self.end_time = None | ||
self.child_events = {} | ||
|
||
def record_start_time(self): | ||
self.start_time = datetime.now( | ||
pytz.utc | ||
) # Store in UTC for consistent timezone conversion | ||
|
||
def record_end_time(self): | ||
self.end_time = datetime.now( | ||
pytz.utc | ||
) # Store in UTC for consistent timezone conversion | ||
|
||
def get_total_time(self): | ||
if self.start_time and self.end_time: | ||
return (self.end_time - self.start_time).total_seconds() | ||
return 0 | ||
|
||
def get_start_time(self): | ||
if self.start_time: | ||
# Format to milliseconds | ||
return self.start_time.astimezone(CALIFORNIA_TZ).strftime( | ||
"%Y-%m-%d %H:%M:%S.%f" | ||
)[:-3] | ||
return None | ||
|
||
def get_end_time(self): | ||
if self.end_time: | ||
# Format to milliseconds | ||
return self.end_time.astimezone(CALIFORNIA_TZ).strftime( | ||
"%Y-%m-%d %H:%M:%S.%f" | ||
)[:-3] | ||
return None | ||
|
||
def add_child_event(self, child_event): | ||
self.child_events[child_event.event_name] = child_event | ||
|
||
def get_child_events(self): | ||
return self.child_events | ||
|
||
|
||
class LoggingWrapper: | ||
def __init__(self, lm_config): | ||
self.logging_dict = {} | ||
self.lm_config = lm_config | ||
self.current_pipeline_stage = None | ||
self.event_stack = [] | ||
self.pipeline_stage_active = False | ||
|
||
def _pipeline_stage_start(self, pipeline_stage: str): | ||
if self.pipeline_stage_active: | ||
raise RuntimeError( | ||
"A pipeline stage is already active. End the current stage before starting a new one." | ||
) | ||
|
||
self.current_pipeline_stage = pipeline_stage | ||
self.logging_dict[pipeline_stage] = { | ||
"time_usage": {}, | ||
"lm_usage": {}, | ||
"lm_history": [], | ||
"query_count": 0, | ||
} | ||
self.pipeline_stage_active = True | ||
|
||
def _event_start(self, event_name: str): | ||
if not self.pipeline_stage_active: | ||
raise RuntimeError("No pipeline stage is currently active.") | ||
|
||
if not self.event_stack and self.current_pipeline_stage: | ||
# Top-level event (directly under the pipeline stage) | ||
if ( | ||
event_name | ||
not in self.logging_dict[self.current_pipeline_stage]["time_usage"] | ||
): | ||
event = EventLog(event_name=event_name) | ||
event.record_start_time() | ||
self.logging_dict[self.current_pipeline_stage]["time_usage"][ | ||
event_name | ||
] = event | ||
self.event_stack.append(event) | ||
else: | ||
self.logging_dict[self.current_pipeline_stage]["time_usage"][ | ||
event_name | ||
].record_start_time() | ||
elif self.event_stack: | ||
# Nested event (under another event) | ||
parent_event = self.event_stack[-1] | ||
if event_name not in parent_event.get_child_events(): | ||
event = EventLog(event_name=event_name) | ||
event.record_start_time() | ||
parent_event.add_child_event(event) | ||
self.logging_dict[self.current_pipeline_stage]["time_usage"][ | ||
event_name | ||
] = event | ||
self.event_stack.append(event) | ||
else: | ||
parent_event.get_child_events()[event_name].record_start_time() | ||
else: | ||
raise RuntimeError( | ||
"Cannot start an event without an active pipeline stage or parent event." | ||
) | ||
|
||
def _event_end(self, event_name: str): | ||
if not self.pipeline_stage_active: | ||
raise RuntimeError("No pipeline stage is currently active.") | ||
|
||
if not self.event_stack: | ||
raise RuntimeError("No parent event is currently active.") | ||
|
||
if self.event_stack: | ||
current_event_log = self.event_stack[-1] | ||
if event_name in current_event_log.get_child_events(): | ||
current_event_log.get_child_events()[event_name].record_end_time() | ||
elif ( | ||
event_name | ||
in self.logging_dict[self.current_pipeline_stage]["time_usage"] | ||
): | ||
self.logging_dict[self.current_pipeline_stage]["time_usage"][ | ||
event_name | ||
].record_end_time() | ||
else: | ||
raise AssertionError( | ||
f"Failure to record end time for event {event_name}. Start time is not recorded." | ||
) | ||
if current_event_log.event_name == event_name: | ||
self.event_stack.pop() | ||
else: | ||
raise RuntimeError("Cannot end an event without an active parent event.") | ||
|
||
def _pipeline_stage_end(self): | ||
if not self.pipeline_stage_active: | ||
raise RuntimeError("No pipeline stage is currently active to end.") | ||
|
||
self.logging_dict[self.current_pipeline_stage][ | ||
"lm_usage" | ||
] = self.lm_config.collect_and_reset_lm_usage() | ||
self.logging_dict[self.current_pipeline_stage][ | ||
"lm_history" | ||
] = self.lm_config.collect_and_reset_lm_history() | ||
self.pipeline_stage_active = False | ||
|
||
def add_query_count(self, count): | ||
if not self.pipeline_stage_active: | ||
raise RuntimeError( | ||
"No pipeline stage is currently active to add query count." | ||
) | ||
|
||
self.logging_dict[self.current_pipeline_stage]["query_count"] += count | ||
|
||
@contextmanager | ||
def log_event(self, event_name): | ||
if not self.pipeline_stage_active: | ||
raise RuntimeError("No pipeline stage is currently active.") | ||
|
||
self._event_start(event_name) | ||
yield | ||
self._event_end(event_name) | ||
|
||
@contextmanager | ||
def log_pipeline_stage(self, pipeline_stage): | ||
if self.pipeline_stage_active: | ||
print( | ||
"A pipeline stage is already active, ending the current stage safely." | ||
) | ||
self._pipeline_stage_end() | ||
|
||
start_time = time.time() | ||
try: | ||
self._pipeline_stage_start(pipeline_stage) | ||
yield | ||
except Exception as e: | ||
print(f"Error occurred during pipeline stage '{pipeline_stage}': {e}") | ||
finally: | ||
self.logging_dict[self.current_pipeline_stage]["total_wall_time"] = ( | ||
time.time() - start_time | ||
) | ||
self._pipeline_stage_end() | ||
|
||
def dump_logging_and_reset(self, reset_logging=True): | ||
log_dump = {} | ||
for pipeline_stage, pipeline_log in self.logging_dict.items(): | ||
time_stamp_log = { | ||
event_name: { | ||
"total_time_seconds": event.get_total_time(), | ||
"start_time": event.get_start_time(), | ||
"end_time": event.get_end_time(), | ||
} | ||
for event_name, event in pipeline_log["time_usage"].items() | ||
} | ||
log_dump[pipeline_stage] = { | ||
"time_usage": time_stamp_log, | ||
"lm_usage": pipeline_log["lm_usage"], | ||
"lm_history": pipeline_log["lm_history"], | ||
"query_count": pipeline_log["query_count"], | ||
"total_wall_time": pipeline_log["total_wall_time"], | ||
} | ||
if reset_logging: | ||
self.logging_dict.clear() | ||
return log_dump |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.