Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 52 additions & 6 deletions app/services/grok/services/video.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,44 @@
from app.services.token import EffortType, get_token_manager
from app.services.token.manager import BASIC_POOL_NAME

async def _generate_continuation_prompt(original_prompt: str, round_index: int, token: str) -> str:
"""Use local Grok API to generate a unique continuation prompt for each extension round."""
try:
from curl_cffi.requests import AsyncSession
api_key = get_config("app.api_key")
headers = {"Content-Type": "application/json"}
if api_key:
headers["Authorization"] = f"Bearer {api_key}"
system_msg = (
f"You are a video director. The original video prompt is: '{original_prompt}'. "
f"Generate a SHORT continuation prompt for extension round {round_index} that naturally "
f"continues the scene with new action or movement. "
f"Return ONLY the prompt text, no explanation, max 20 words."
)
payload = {
"model": "grok-3-fast",
"messages": [{"role": "user", "content": system_msg}],
"temperature": 0.8,
"max_tokens": 60,
"stream": False,
}
async with AsyncSession() as session:
resp = await session.post(
"http://localhost:8000/v1/chat/completions",
headers=headers,
json=payload,
timeout=15,
)
if resp.status_code == 200:
data = resp.json()
content = data.get("choices", [{}])[0].get("message", {}).get("content", "").strip()
if content:
logger.info(f"LLM continuation prompt (round {round_index}): {content}")
return content
except Exception as e:
logger.warning(f"LLM prompt generation failed (round {round_index}): {e}")
return original_prompt

_VIDEO_SEMAPHORE = None
_VIDEO_SEM_VALUE = 0
_APP_CHAT_MODEL = "grok-3"
Expand Down Expand Up @@ -880,19 +918,21 @@ async def _run_round_collect(
last_id: str,
original_id: Optional[str],
source: str,
round_prompt: str = None,
) -> VideoRoundResult:
rp = round_prompt if round_prompt is not None else prompt
config_override = _build_round_config(
plan,
seed_post_id=seed_id,
last_post_id=last_id,
original_post_id=original_id,
prompt=prompt,
prompt=rp,
aspect_ratio=aspect_ratio,
resolution_name=generation_resolution,
)
response = await _request_round_stream(
token=token,
message=message,
message=_build_message(rp, preset),
model_config_override=config_override,
)
return await _collect_round_result(response, model=model, source=source)
Expand All @@ -906,18 +946,21 @@ async def _stream_chain() -> AsyncGenerator[str, None]:

try:
for plan in round_plan:
round_prompt = prompt
if plan.is_extension:
round_prompt = await _generate_continuation_prompt(prompt, plan.round_index, token)
config_override = _build_round_config(
plan,
seed_post_id=seed_id,
last_post_id=last_id,
original_post_id=original_id,
prompt=prompt,
prompt=round_prompt,
aspect_ratio=aspect_ratio,
resolution_name=generation_resolution,
)
response = await _request_round_stream(
token=token,
message=message,
message=_build_message(round_prompt, preset),
model_config_override=config_override,
)

Expand All @@ -926,8 +969,7 @@ async def _stream_chain() -> AsyncGenerator[str, None]:
response,
model=model,
source=f"stream-round-{plan.round_index}",
):
if event_type == "progress":
): if event_type == "progress":
for chunk in writer.emit_progress(
round_index=plan.round_index,
total_rounds=plan.total_rounds,
Expand Down Expand Up @@ -1016,12 +1058,16 @@ async def _collect_chain() -> Dict[str, Any]:
final_result: Optional[VideoRoundResult] = None

for plan in round_plan:
round_prompt = prompt
if plan.is_extension:
round_prompt = await _generate_continuation_prompt(prompt, plan.round_index, token)
round_result = await _run_round_collect(
plan,
seed_id=seed_id,
last_id=last_id,
original_id=original_id,
source=f"collect-round-{plan.round_index}",
round_prompt=round_prompt,
)

_ensure_round_result(
Expand Down
2 changes: 1 addition & 1 deletion app/services/grok/services/video_extend.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from app.core.exceptions import AppException, ErrorType, UpstreamException, ValidationException
from app.core.logger import logger
from app.services.grok.services.model import ModelService
from app.services.grok.services.video import VideoCollectProcessor
from app.services.grok.services.video import VideoCollectProcessor, _generate_continuation_prompt
from app.services.reverse.app_chat import AppChatReverse
from app.services.reverse.utils.session import ResettableSession
from app.services.token import EffortType, get_token_manager
Expand Down
36 changes: 22 additions & 14 deletions app/services/token/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -626,23 +626,21 @@ async def sync_usage(
async def record_fail(
self, token_str: str, status_code: int = 401, reason: str = "", threshold: Optional[int] = None
) -> bool:
"""
记录 Token 失败

Args:
token_str: Token 字符串
status_code: HTTP Status Code
reason: 失败原因
threshold: 强制失败阈值

Returns:
是否成功
"""
raw_token = token_str.replace("sso=", "")

for pool in self.pools.values():
for pool_name, pool in self.pools.items():
token = pool.get(raw_token)
if token:
# 400 + email-domain-rejected → anında sil
if status_code == 400 and "email-domain-rejected" in reason:
pool.remove(raw_token)
self._track_token_delete(raw_token)
await self._save(force=True)
logger.warning(
f"Token {raw_token[:10]}...: email-domain-rejected, removed from pool '{pool_name}'"
)
return True

if status_code == 401:
if threshold is None:
threshold = get_config("token.fail_threshold", FAIL_THRESHOLD)
Expand All @@ -661,7 +659,7 @@ async def record_fail(
f"Token {raw_token[:10]}...: recorded {status_code} failure "
f"({token.fail_count}/{threshold}) - {reason} - status: {token.status}"
)
self._track_token_change(token, pool.name, "state")
self._track_token_change(token, pool_name, "state")
self._schedule_save()
else:
logger.info(
Expand Down Expand Up @@ -1025,6 +1023,16 @@ async def _refresh_one(item: tuple[str, TokenInfo]) -> dict:
)
return {"recovered": False, "expired": False}

if status == 400 and error and "email-domain-rejected" in str(error):
logger.warning(
f"Token {token_info.token[:10]}...: email-domain-rejected (400), removing from pool"
)
current_pool = self.get_pool_name_for_token(token_info.token)
if current_pool and current_pool in self.pools:
self.pools[current_pool].remove(token_info.token)
self._track_token_delete(token_info.token)
return {"recovered": False, "expired": True}

if error:
logger.warning(
f"Token {token_info.token[:10]}...: refresh failed ({error})"
Expand Down
6 changes: 6 additions & 0 deletions docs/README.en.md
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,12 @@ docker compose up -d
| `grok-imagine-1.0-edit` | - | Basic/Super | - | Yes | - |
| `grok-imagine-1.0-video` | - | Basic/Super | - | - | Yes |

> [!WARNING]
> **Image and video generation are currently disabled on free (Basic) Grok accounts.**
> X/xAI has restricted image and video generation features to paid/verified accounts only.
> Tokens registered with free accounts will receive errors when calling image or video models.
> Only use **Super** (paid/verified) account tokens for image and video generation.

<br>

## API
Expand Down
6 changes: 6 additions & 0 deletions readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,12 @@ docker compose up -d
| `grok-imagine-1.0-edit` | - | Basic/Super | - | 支持 | - |
| `grok-imagine-1.0-video` | - | Basic/Super | - | - | 支持 |

> [!WARNING]
> **图像生成和视频生成功能目前在免费(Basic)Grok 账号上已被禁用。**
> X/xAI 已将图像和视频生成功能限制为仅付费/已验证账号可用。
> 使用免费账号注册的 Token 在调用图像或视频模型时会收到错误。
> 请仅使用 **Super**(付费/已验证)账号的 Token 进行图像和视频生成。

<br>

## 接口说明
Expand Down
Loading