Skip to content

Commit d2ebac5

Browse files
committed
Added zlib compression helper file and cleaned up zfp helper
1 parent 81b9386 commit d2ebac5

File tree

2 files changed

+58
-42
lines changed

2 files changed

+58
-42
lines changed

scripts/zfp_compress.py

+18-42
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,24 @@
66
import os
77
import zfpy
88
import copy
9-
import zipfile
10-
import numpy as np
11-
import io
9+
10+
11+
def zfp_compress(model, name="generic_model_type", tolerance=1e-3):
12+
zfp_model = copy.deepcopy(model)
13+
14+
org_size = save_pkl(f'models/{name}.pkl', model.state_dict())
15+
print(f"Original Size: {org_size}")
16+
17+
compressed_params = apply_zfp(model, tolerance)
18+
compressed_size = save_pkl(f"models/{name}.pkl", compressed_params)
19+
print(f"Compressed Size: {compressed_size}")
20+
21+
load_and_decompress(f"models/{name}.pkl", zfp_model)
22+
23+
return zfp_model, org_size, compressed_size
24+
25+
26+
#HELPERS
1227

1328
def save_pkl(file: str, state_dict: dict) -> float:
1429
with open(file, 'wb') as f:
@@ -35,43 +50,4 @@ def load_and_decompress(file: str, model) -> None:
3550
decompressed = zfpy.decompress_numpy(params[name])
3651
param.data = torch.tensor(decompressed).to(device)
3752

38-
def zfp_compress(model, name, tolerance=1e-3):
39-
zfp_model = copy.deepcopy(model)
40-
41-
org_size = save_pkl(f'models/{name}.pkl', model.state_dict())
42-
print(f"Original Size: {org_size}")
43-
44-
compressed_params = apply_zfp(model, tolerance)
45-
compressed_size = save_pkl(f"models/{name}.pkl", compressed_params)
46-
print(f"Compressed Size: {compressed_size}")
47-
48-
load_and_decompress(f"models/{name}.pkl", zfp_model)
49-
50-
return zfp_model, org_size, compressed_size
51-
52-
53-
54-
55-
# def apply_zip(model):
56-
# params = dict()
57-
# for name, param in model.named_parameters():
58-
# if param.requires_grad:
59-
# param_cpu = param.cpu().detach().numpy()
60-
61-
# array_buffer = io.BytesIO()
62-
# np.save(array_buffer, param_cpu)
63-
64-
65-
# # params[name] = zfpy.compress_numpy(param_cpu, tolerance=tolerance)
66-
# return params
67-
68-
# def zip_compress(model, name):
69-
# zip_model = copy.deepcopy(model)
70-
71-
# pkl_path = f'models/{name}.pkl'
72-
# org_size = save_pkl(pkl_path, model.state_dict())
73-
74-
75-
7653

77-

scripts/zlib_compress.py

+40
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
import copy
2+
from scripts.zfp_compress import save_pkl, load_pkl
3+
import torch
4+
import zlib
5+
import numpy as np
6+
7+
8+
def zlib_compress(model, name="generic_model_type"):
9+
zlib_model = copy.deepcopy(model)
10+
org_size = save_pkl(f'models/{name}.pkl', model.state_dict())
11+
print(f"Original Size: {org_size}")
12+
13+
compressed_params, shapes = apply_zlib(model)
14+
compressed_size = save_pkl(f"models/{name}.pkl", compressed_params)
15+
print(f"Compressed Size: {compressed_size}")
16+
17+
load_and_decompress(f"models/{name}.pkl", zlib_model, shapes)
18+
19+
return zlib_model, org_size, compressed_size
20+
21+
def apply_zlib(model):
22+
params = {}
23+
shapes = {}
24+
for name, param in model.named_parameters():
25+
if param.requires_grad:
26+
param_cpu = param.cpu().detach().numpy()
27+
params[name] = zlib.compress(param_cpu)
28+
shapes[name] = param.size()
29+
return params, shapes
30+
31+
32+
def load_and_decompress(file: str, model, shapes) -> None:
33+
device = torch.device('cpu')
34+
params = load_pkl(file)
35+
for name, param in model.named_parameters():
36+
if name in params:
37+
decompressed_bytes = zlib.decompress(params[name])
38+
decompressed = np.frombuffer(decompressed_bytes, dtype=np.float32)
39+
shape = shapes[name]
40+
param.data = torch.tensor(decompressed.reshape(shape)).to(device)

0 commit comments

Comments
 (0)