We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent c5478aa commit d50d76dCopy full SHA for d50d76d
clip/model.py
@@ -419,7 +419,7 @@ def build_model(state_dict: dict):
419
vocab_size = state_dict["token_embedding.weight"].shape[0]
420
transformer_width = state_dict["ln_final.weight"].shape[0]
421
transformer_heads = transformer_width // 64
422
- transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
+ transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith("transformer.resblocks")))
423
424
model = CLIP(
425
embed_dim,
0 commit comments