-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutils.py
198 lines (153 loc) · 6.48 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
import streamlit as st
import os
import pandas as pd
import json
import base64
import platform
import numpy as np
from datetime import datetime
from typing import List, Optional, Union
from streamlit_extras.app_logo import add_logo
from configparser import ConfigParser
from database.database import engine
from sqlalchemy import text
import torch
global_system_prompt = "你的名字叫小智,是AI-Labs团队的营销人员,也是一名经验丰富的服装营销人员,精通服装设计、服饰搭配、服装销售、服装信息咨询、售后服务等各类问题。你说话优雅、有艺术感、必要时可以引用典故,你总是称呼客户为朋友。说重点,别说废话。"
check_use_limit = bool(os.environ.get("config_check_use_limit", "False"))
use_cache_flag = False
cuda_size_24gb = 22000 # 24566
cuda_size_40gb = 40000 # 40950
main_container_height = 680 # int(os.environ.get("config_main_container_height", "685"))
zhipuai_models = ["glm-4-local"]
zhipuai_online_models = ["glm-4-flash"]
internlm_models = ["internlm2.5"]
internlm_online_models = ["internlm2.5-latest"]
yi_models = ["Yi-1.5"]
local_models = internlm_online_models + zhipuai_online_models
def init_page_header(title, icon):
st.set_page_config(
page_title=title,
page_icon=icon,
layout="wide",
menu_items={
"Get Help": "https://ai.gitee.com/wux-labs/smart-sales",
"Report a bug": "https://ai.gitee.com/wux-labs/smart-sales/issues",
"About": """
## 🏡智能营销助手
众所周知,获客、活客、留客是电商行业的三大难题,谁拥有跟客户最佳的沟通方式,谁就拥有客户。
随着用户消费逐渐转移至线上,电商行业面临以下一些问题:
* 用户交流体验差
* 商品推荐不精准
* 客户转化率低
* 退换货频率高
* 物流成本高
在这样的背景下,未来销售的引擎——大模型加持的智能营销助手就诞生了。
它能够与用户的对话,了解用户的需求,基于多模态的AIGC生成能力,持续输出更符合用户消费习惯的文本、图片和视频等营销内容,推荐符合用户的商品,将营销与经营结合。
""",
},
)
# add_logo("statics/doc/logo.png", height=160)
st.sidebar.header(f"{icon}{title}")
st.subheader(f"{icon}{title}", divider='rainbow')
def init_session_state():
if "userid" not in st.session_state.keys():
st.session_state.userid = 1
if "username" not in st.session_state.keys():
st.session_state.username = "guest"
if "fullname" not in st.session_state.keys():
st.session_state.fullname = "游客"
if "rolename" not in st.session_state.keys():
st.session_state.rolename = "买家"
if "aigc_temp_freq" not in st.session_state.keys():
st.session_state.aigc_temp_freq = 3
if "aigc_perm_freq" not in st.session_state.keys():
st.session_state.aigc_perm_freq = 0
if "showlimit" not in st.session_state.keys():
st.session_state.showlimit = 10
def load_lottiefile(filepath: str):
with open(filepath, "r") as f:
return json.load(f)
def get_config(conf_key):
conf = ConfigParser()
conf.read("config.ini")
return conf["Server"][conf_key]
def get_avatar(model_id):
if model_id in internlm_models + internlm_online_models:
avatar = "statics/avatar/internlm.png"
elif model_id in zhipuai_models + zhipuai_online_models:
avatar = "statics/avatar/chatglm.png"
elif model_id in yi_models:
avatar = "statics/avatar/yi.png"
elif model_id in ["stable-diffusion"]:
avatar = "statics/avatar/stablediffusion.png"
elif model_id in ["myshell/melotts"]:
avatar = "statics/avatar/melotts.png"
else:
avatar = "statics/avatar/sales.png"
return avatar
def select_aigc_left_freq():
count = None
try:
with engine.connect() as conn:
sql = text(f'''
select aigc_temp_freq, aigc_perm_freq from ai_labs_user where username = :username;
''')
count = conn.execute(sql, [{'username': st.session_state.username}]).fetchone()
except Exception as e:
st.exception(e)
return count
def update_aigc_perm_freq(count):
try:
with engine.connect() as conn:
sql = text(f'''
update ai_labs_user set aigc_perm_freq = aigc_perm_freq + {count} where username = :username and aigc_perm_freq > 0;
''')
conn.execute(sql, [{'username': st.session_state.username}])
conn.commit()
except Exception as e:
st.exception(e)
def update_aigc_temp_freq(count):
try:
with engine.connect() as conn:
sql = text(f'''
update ai_labs_user set aigc_temp_freq = aigc_temp_freq + {count} where username = :username;
''')
conn.execute(sql, [{'username': st.session_state.username}])
conn.commit()
except Exception as e:
st.exception(e)
def use_limited():
st.warning("您今日已达到使用次数限制!玩游戏放松一下,还可以获得免费使用次数哦~", icon="😭")
st.page_link("pages/71🎮休闲游戏.py", icon="🎮")
def is_cuda_available():
# return False
return torch.cuda.is_available()
def is_cuda_enough(needs):
if torch.cuda.device_count() > 1:
return True
else:
properties = torch.cuda.get_device_properties(0)
total_memory = int(f'{properties.total_memory / (1 << 20):.0f}')
return total_memory >= needs
def clear_cuda_cache():
if torch.cuda.is_available():
torch.cuda.empty_cache()
def clear_streamlit_cache(keeps):
all_caches = ["chat_tokenizer", "chat_model",
"stable_diffusion_model",
"xcomposer2_vl_tokenizer", "xcomposer2_vl_model",
"whisper_model_base", "whisper_model_small", "whisper_model_medium", "whisper_model_large",
"ask_product_history", "ask_product_llm", "ask_product_talker",
"sales_agent_model", "last_sale_talker",
"service_history"
]
for cache in all_caches:
if cache not in keeps and cache in st.session_state.keys():
del st.session_state[cache]
clear_cuda_cache()
def image_to_base64(image_path):
with open(image_path, 'rb') as image_file:
image_data = image_file.read()
# 将图像数据编码为Base64字符串
base64_data = base64.b64encode(image_data).decode('utf-8')
return base64_data