-
Notifications
You must be signed in to change notification settings - Fork 8
Expand file tree
/
Copy pathgpt2.py
More file actions
28 lines (22 loc) · 710 Bytes
/
gpt2.py
File metadata and controls
28 lines (22 loc) · 710 Bytes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
from __future__ import annotations
import torch
import utils
from transformers import AutoConfig, AutoModel, GPT2Model, GPT2LMHeadModel, GPT2Tokenizer
from flops_profiler.profiler import get_model_profile
name = 'gpt2'
use_cuda = True
device = torch.device('cuda:0') if torch.cuda.is_available(
) and use_cuda else torch.device('cpu')
config = GPT2Tokenizer.from_pretrained(name)
model = GPT2LMHeadModel.from_pretrained(name)
model = model.to(device)
batch_size = 1
seq_len = 128
input = utils.create_test_tokens(batch_size, seq_len, device=device)
flops, macs, params = get_model_profile(
model,
kwargs=input,
print_profile=True,
detailed=True,
)
utils.print_output(flops, macs, params)