diff --git a/selfie-ui/src/app/components/Settings/Settings.tsx b/selfie-ui/src/app/components/Settings/Settings.tsx index 53e052b..afdbb12 100644 --- a/selfie-ui/src/app/components/Settings/Settings.tsx +++ b/selfie-ui/src/app/components/Settings/Settings.tsx @@ -25,13 +25,13 @@ const Settings = () => { enumNames: ['Local (llama.cpp)', 'Other (litellm)'], default: "llama.cpp" }, + verbose_logging: { type: "boolean", title: "Verbose logging", default: false }, // name: { type: "string", title: "Name" }, // description: { type: "string", title: "Description" }, // apiKey: { type: "string", title: "API Key" }, // host: { type: "string", title: "Host", default: "http://localhost" }, // port: { type: "integer", title: "Port", default: 8000 }, // share: { type: "boolean", title: "Share", default: false }, - // verbose: { type: "boolean", title: "Verbose", default: false }, }, allOf: [ { @@ -128,7 +128,7 @@ const Settings = () => { console.log(models) const uiSchema = { - 'ui:order': ['gpu', 'ngrok_enabled', 'ngrok_authtoken', 'ngrok_domain', '*'], + 'ui:order': ['gpu', 'ngrok_enabled', 'ngrok_authtoken', 'ngrok_domain', '*', 'verbose_logging'], method: { "ui:widget": "radio", diff --git a/selfie/__main__.py b/selfie/__main__.py index bcae778..f3b9b33 100644 --- a/selfie/__main__.py +++ b/selfie/__main__.py @@ -32,7 +32,7 @@ def deserialize_args_from_env(): 'api_port': ('SELFIE_API_PORT', int), 'gpu': ('SELFIE_GPU', to_bool), 'reload': ('SELFIE_RELOAD', to_bool), - 'verbose': ('SELFIE_VERBOSE', to_bool), + 'verbose_logging': ('SELFIE_VERBOSE_LOGGING', to_bool), 'headless': ('SELFIE_HEADLESS', to_bool), 'model': ('SELFIE_MODEL', str), } @@ -61,7 +61,7 @@ def parse_args(): parser.add_argument("--api_port", type=int, default=None, help="Specify the port to run on") parser.add_argument("--gpu", default=None, action="store_true", help="Enable GPU support") parser.add_argument("--reload", action="store_true", default=None, help="Enable hot-reloading") - parser.add_argument("--verbose", action="store_true", default=None, help="Enable verbose logging") + parser.add_argument("--verbose_logging", action="store_true", default=None, help="Enable verbose logging") parser.add_argument("--headless", action="store_true", default=None, help="Run in headless mode (no GUI)") parser.add_argument("--model", type=str, default=None, help="Specify the model to use") args = parser.parse_args() @@ -74,12 +74,15 @@ def get_configured_app(shareable=False): logger.info(f"Running with args: {args}") - if 'verbose' in args and args.verbose: + if 'verbose_logging' in args and args.verbose_logging: logging.getLogger("selfie").setLevel(level=logging.DEBUG) logger.info("Creating app configuration") app_config = create_app_config(**vars(args)) + if app_config.verbose_logging: + logging.getLogger("selfie").setLevel(level=logging.DEBUG) + if shareable and app_config.ngrok_enabled: if app_config.ngrok_authtoken is None: raise ValueError("ngrok_authtoken is required to share the API.") diff --git a/selfie/config.py b/selfie/config.py index 591fec6..081bbba 100644 --- a/selfie/config.py +++ b/selfie/config.py @@ -17,7 +17,7 @@ default_hosted_model = 'openai/gpt-3.5-turbo' default_local_gpu_model = 'TheBloke/Mistral-7B-OpenOrca-GPTQ' -ensure_set_in_db = ['gpu', 'method', 'model'] +ensure_set_in_db = ['gpu', 'method', 'model', 'verbose_logging'] # TODO: This is not accurate, e.g. if the user starts the app with --gpu @@ -36,7 +36,7 @@ class AppConfig(BaseModel): api_port: Optional[int] = Field(default=default_port, description="Specify the port to run on") share: bool = Field(default=False, description="Enable sharing via ngrok") gpu: bool = Field(default=get_default_gpu_mode(), description="Enable GPU support") - verbose: bool = Field(default=False, description="Enable verbose logging") + verbose_logging: bool = Field(default=False, description="Enable verbose logging") db_name: str = Field(default='selfie.db', description="Database name") method: str = Field(default=default_method, description="LLM provider method, llama.cpp or litellm") model: str = Field(default=default_local_model, description="Local model") @@ -125,16 +125,13 @@ def create_app_config(**kwargs): config_dict[key] = value runtime_overrides[key] = value - db_update_required = False - updates = {} for field in ensure_set_in_db: + updates = {} if field not in db_config or db_config[field] is None: - db_update_required = True - logger.info(f"No saved setting for {field}, saving {config_dict[field]}") # Corrected access method + logger.info(f"No saved setting for {field}, saving {config_dict[field]}") updates[field] = config_dict[field] - - if db_update_required: - update_config_in_database(updates) + if updates: + update_config_in_database(updates) global _singleton_instance logger.info(f"Creating AppConfig with: {config_dict}") @@ -150,9 +147,9 @@ def load_config_from_database(): return DataManager().get_settings() -def update_config_in_database(settings): +def update_config_in_database(settings, delete_others=False): from selfie.database import DataManager - DataManager().save_settings(settings, delete_others=True) + DataManager().save_settings(settings, delete_others=delete_others) def get_app_config(): diff --git a/selfie/embeddings/importance_scorer.py b/selfie/embeddings/importance_scorer.py index 439917d..961cd15 100644 --- a/selfie/embeddings/importance_scorer.py +++ b/selfie/embeddings/importance_scorer.py @@ -45,7 +45,7 @@ def calculate_raw_score(self, document: EmbeddingDocumentModel): llm = LLM( config.local_functionary_model, - verbose=config.verbose, + verbose=config.verbose_logging, n_gpu_layers=-1 if config.gpu else 0, method="llama.cpp", chat_format="functionary", diff --git a/selfie/logging.py b/selfie/logging.py index c166a14..a231fec 100644 --- a/selfie/logging.py +++ b/selfie/logging.py @@ -19,11 +19,10 @@ def setup_logging(): logger.setLevel(level) file_handler = RotatingFileHandler(log_path, maxBytes=1024*1024, backupCount=5) - file_handler.setLevel(level) + file_handler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') file_handler.setFormatter(formatter) logger.addHandler(file_handler) - setup_logging() diff --git a/selfie/text_generation/generation.py b/selfie/text_generation/generation.py index a11da8f..c74bc9b 100644 --- a/selfie/text_generation/generation.py +++ b/selfie/text_generation/generation.py @@ -56,7 +56,7 @@ async def completion(request: CompletionRequest | ChatCompletionRequest) -> Self if method == "llama.cpp": model = request.model or config.model logger.info(f"Using llama.cpp model {model}") - llm = get_llama_cpp_llm(model, config.verbose, config.gpu) + llm = get_llama_cpp_llm(model, config.verbose_logging, config.gpu) completion_fn = (llm.create_chat_completion if chat_mode else llm.create_completion)