Skip to content

Commit cddf497

Browse files
CopilotVinciGit00
andcommitted
Add NVIDIA LLM integration support
- Created Nvidia wrapper model class in scrapegraphai/models/nvidia.py - Updated models/__init__.py to export Nvidia class - Updated abstract_graph.py to use Nvidia wrapper instead of direct ChatNVIDIA import - Added nvidia as optional dependency in pyproject.toml - Created example usage file for NVIDIA in examples/smart_scraper_graph/nvidia/ Co-authored-by: VinciGit00 <[email protected]>
1 parent cea8e6b commit cddf497

File tree

5 files changed

+80
-10
lines changed

5 files changed

+80
-10
lines changed
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
"""
2+
Basic example of scraping pipeline using SmartScraper with NVIDIA
3+
"""
4+
5+
import json
6+
import os
7+
8+
from dotenv import load_dotenv
9+
10+
from scrapegraphai.graphs import SmartScraperGraph
11+
from scrapegraphai.utils import prettify_exec_info
12+
13+
load_dotenv()
14+
15+
# ************************************************
16+
# Define the configuration for the graph
17+
# ************************************************
18+
19+
20+
graph_config = {
21+
"llm": {
22+
"api_key": os.getenv("NVIDIA_API_KEY"),
23+
"model": "nvidia/meta/llama3-70b-instruct",
24+
"model_provider": "nvidia",
25+
},
26+
"verbose": True,
27+
"headless": False,
28+
}
29+
30+
# ************************************************
31+
# Create the SmartScraperGraph instance and run it
32+
# ************************************************
33+
34+
smart_scraper_graph = SmartScraperGraph(
35+
prompt="Extract me the first article",
36+
source="https://www.wired.com",
37+
config=graph_config,
38+
)
39+
40+
result = smart_scraper_graph.run()
41+
print(json.dumps(result, indent=4))
42+
43+
# ************************************************
44+
# Get graph execution info
45+
# ************************************************
46+
47+
graph_exec_info = smart_scraper_graph.get_execution_info()
48+
print(prettify_exec_info(graph_exec_info))

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,7 @@ requires-python = ">=3.10,<4.0"
6969
[project.optional-dependencies]
7070
burr = ["burr[start]==0.22.1"]
7171
docs = ["sphinx==6.0", "furo==2024.5.6"]
72+
nvidia = ["langchain-nvidia-ai-endpoints>=0.1.0"]
7273
ocr = [
7374
"surya-ocr>=0.5.0",
7475
"matplotlib>=3.7.2",

scrapegraphai/graphs/abstract_graph.py

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
from pydantic import BaseModel
1515

1616
from ..helpers import models_tokens
17-
from ..models import CLoD, DeepSeek, OneApi, XAI
17+
from ..models import CLoD, DeepSeek, Nvidia, OneApi, XAI
1818
from ..utils.logging import set_verbosity_info, set_verbosity_warning, get_logger
1919
from ..telemetry import log_graph_execution
2020

@@ -264,14 +264,7 @@ def _create_llm(self, llm_config: dict) -> object:
264264
return ChatTogether(**llm_params)
265265

266266
elif model_provider == "nvidia":
267-
try:
268-
from langchain_nvidia_ai_endpoints import ChatNVIDIA
269-
except ImportError:
270-
raise ImportError(
271-
"""The langchain_nvidia_ai_endpoints module is not installed.
272-
Please install it using `pip install langchain-nvidia-ai-endpoints`."""
273-
)
274-
return ChatNVIDIA(**llm_params)
267+
return Nvidia(**llm_params)
275268

276269
except Exception as e:
277270
raise Exception(f"Error instancing model: {e}")

scrapegraphai/models/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,10 @@
44

55
from .clod import CLoD
66
from .deepseek import DeepSeek
7+
from .nvidia import Nvidia
78
from .oneapi import OneApi
89
from .openai_itt import OpenAIImageToText
910
from .openai_tts import OpenAITextToSpeech
1011
from .xai import XAI
1112

12-
__all__ = ["DeepSeek", "OneApi", "OpenAIImageToText", "OpenAITextToSpeech", "CLoD", "XAI"]
13+
__all__ = ["DeepSeek", "OneApi", "OpenAIImageToText", "OpenAITextToSpeech", "CLoD", "XAI", "Nvidia"]

scrapegraphai/models/nvidia.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
"""
2+
NVIDIA Module
3+
"""
4+
5+
6+
class Nvidia:
7+
"""
8+
A wrapper for the ChatNVIDIA class that provides default configuration
9+
and could be extended with additional methods if needed.
10+
11+
Args:
12+
llm_config (dict): Configuration parameters for the language model.
13+
"""
14+
15+
def __new__(cls, **llm_config):
16+
try:
17+
from langchain_nvidia_ai_endpoints import ChatNVIDIA
18+
except ImportError:
19+
raise ImportError(
20+
"""The langchain_nvidia_ai_endpoints module is not installed.
21+
Please install it using `pip install langchain-nvidia-ai-endpoints`."""
22+
)
23+
24+
if "api_key" in llm_config:
25+
llm_config["nvidia_api_key"] = llm_config.pop("api_key")
26+
27+
return ChatNVIDIA(**llm_config)

0 commit comments

Comments
 (0)