Skip to content

Commit

Permalink
Configurable verbose logging
Browse files Browse the repository at this point in the history
  • Loading branch information
tnunamak committed Apr 3, 2024
1 parent 78569cd commit 5cbee2a
Show file tree
Hide file tree
Showing 6 changed files with 19 additions and 20 deletions.
4 changes: 2 additions & 2 deletions selfie-ui/src/app/components/Settings/Settings.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,13 @@ const Settings = () => {
enumNames: ['Local (llama.cpp)', 'Other (litellm)'],
default: "llama.cpp"
},
verbose_logging: { type: "boolean", title: "Verbose logging", default: false },
// name: { type: "string", title: "Name" },
// description: { type: "string", title: "Description" },
// apiKey: { type: "string", title: "API Key" },
// host: { type: "string", title: "Host", default: "http://localhost" },
// port: { type: "integer", title: "Port", default: 8000 },
// share: { type: "boolean", title: "Share", default: false },
// verbose: { type: "boolean", title: "Verbose", default: false },
},
allOf: [
{
Expand Down Expand Up @@ -128,7 +128,7 @@ const Settings = () => {
console.log(models)

const uiSchema = {
'ui:order': ['gpu', 'ngrok_enabled', 'ngrok_authtoken', 'ngrok_domain', '*'],
'ui:order': ['gpu', 'ngrok_enabled', 'ngrok_authtoken', 'ngrok_domain', '*', 'verbose_logging'],

method: {
"ui:widget": "radio",
Expand Down
9 changes: 6 additions & 3 deletions selfie/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def deserialize_args_from_env():
'api_port': ('SELFIE_API_PORT', int),
'gpu': ('SELFIE_GPU', to_bool),
'reload': ('SELFIE_RELOAD', to_bool),
'verbose': ('SELFIE_VERBOSE', to_bool),
'verbose_logging': ('SELFIE_VERBOSE_LOGGING', to_bool),
'headless': ('SELFIE_HEADLESS', to_bool),
'model': ('SELFIE_MODEL', str),
}
Expand Down Expand Up @@ -61,7 +61,7 @@ def parse_args():
parser.add_argument("--api_port", type=int, default=None, help="Specify the port to run on")
parser.add_argument("--gpu", default=None, action="store_true", help="Enable GPU support")
parser.add_argument("--reload", action="store_true", default=None, help="Enable hot-reloading")
parser.add_argument("--verbose", action="store_true", default=None, help="Enable verbose logging")
parser.add_argument("--verbose_logging", action="store_true", default=None, help="Enable verbose logging")
parser.add_argument("--headless", action="store_true", default=None, help="Run in headless mode (no GUI)")
parser.add_argument("--model", type=str, default=None, help="Specify the model to use")
args = parser.parse_args()
Expand All @@ -74,12 +74,15 @@ def get_configured_app(shareable=False):

logger.info(f"Running with args: {args}")

if 'verbose' in args and args.verbose:
if 'verbose_logging' in args and args.verbose_logging:
logging.getLogger("selfie").setLevel(level=logging.DEBUG)

logger.info("Creating app configuration")
app_config = create_app_config(**vars(args))

if app_config.verbose_logging:
logging.getLogger("selfie").setLevel(level=logging.DEBUG)

if shareable and app_config.ngrok_enabled:
if app_config.ngrok_authtoken is None:
raise ValueError("ngrok_authtoken is required to share the API.")
Expand Down
19 changes: 8 additions & 11 deletions selfie/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
default_hosted_model = 'openai/gpt-3.5-turbo'
default_local_gpu_model = 'TheBloke/Mistral-7B-OpenOrca-GPTQ'

ensure_set_in_db = ['gpu', 'method', 'model']
ensure_set_in_db = ['gpu', 'method', 'model', 'verbose_logging']


# TODO: This is not accurate, e.g. if the user starts the app with --gpu
Expand All @@ -36,7 +36,7 @@ class AppConfig(BaseModel):
api_port: Optional[int] = Field(default=default_port, description="Specify the port to run on")
share: bool = Field(default=False, description="Enable sharing via ngrok")
gpu: bool = Field(default=get_default_gpu_mode(), description="Enable GPU support")
verbose: bool = Field(default=False, description="Enable verbose logging")
verbose_logging: bool = Field(default=False, description="Enable verbose logging")
db_name: str = Field(default='selfie.db', description="Database name")
method: str = Field(default=default_method, description="LLM provider method, llama.cpp or litellm")
model: str = Field(default=default_local_model, description="Local model")
Expand Down Expand Up @@ -125,16 +125,13 @@ def create_app_config(**kwargs):
config_dict[key] = value
runtime_overrides[key] = value

db_update_required = False
updates = {}
for field in ensure_set_in_db:
updates = {}
if field not in db_config or db_config[field] is None:
db_update_required = True
logger.info(f"No saved setting for {field}, saving {config_dict[field]}") # Corrected access method
logger.info(f"No saved setting for {field}, saving {config_dict[field]}")
updates[field] = config_dict[field]

if db_update_required:
update_config_in_database(updates)
if updates:
update_config_in_database(updates)

global _singleton_instance
logger.info(f"Creating AppConfig with: {config_dict}")
Expand All @@ -150,9 +147,9 @@ def load_config_from_database():
return DataManager().get_settings()


def update_config_in_database(settings):
def update_config_in_database(settings, delete_others=False):
from selfie.database import DataManager
DataManager().save_settings(settings, delete_others=True)
DataManager().save_settings(settings, delete_others=delete_others)


def get_app_config():
Expand Down
2 changes: 1 addition & 1 deletion selfie/embeddings/importance_scorer.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def calculate_raw_score(self, document: EmbeddingDocumentModel):

llm = LLM(
config.local_functionary_model,
verbose=config.verbose,
verbose=config.verbose_logging,
n_gpu_layers=-1 if config.gpu else 0,
method="llama.cpp",
chat_format="functionary",
Expand Down
3 changes: 1 addition & 2 deletions selfie/logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,10 @@ def setup_logging():
logger.setLevel(level)

file_handler = RotatingFileHandler(log_path, maxBytes=1024*1024, backupCount=5)
file_handler.setLevel(level)
file_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)

logger.addHandler(file_handler)


setup_logging()
2 changes: 1 addition & 1 deletion selfie/text_generation/generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ async def completion(request: CompletionRequest | ChatCompletionRequest) -> Self
if method == "llama.cpp":
model = request.model or config.model
logger.info(f"Using llama.cpp model {model}")
llm = get_llama_cpp_llm(model, config.verbose, config.gpu)
llm = get_llama_cpp_llm(model, config.verbose_logging, config.gpu)

completion_fn = (llm.create_chat_completion if chat_mode else llm.create_completion)

Expand Down

0 comments on commit 5cbee2a

Please sign in to comment.