Skip to content

Commit 2d325c0

Browse files
committed
refactor diloco test
Summary: - move the training loop to a separate file - convert it into a class so that methods can be overridden without having to duplicate code
1 parent dd45c41 commit 2d325c0

File tree

2 files changed

+318
-179
lines changed

2 files changed

+318
-179
lines changed

torchft/diloco_trainer.py

Lines changed: 313 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,313 @@
1+
import copy
2+
import logging
3+
import os
4+
from contextlib import ExitStack
5+
from datetime import timedelta
6+
from typing import Any, Dict, List, cast
7+
8+
import torch
9+
from numpy import outer
10+
from torch import nn, optim
11+
from torch.distributed.tensor import DTensor
12+
from torch.optim.optimizer import Optimizer
13+
from torch.optim.sparse_adam import F
14+
15+
from torchft.device_mesh import ManagedDeviceMesh, ft_init_device_mesh
16+
from torchft.local_sgd import DiLoCo
17+
from torchft.manager import Manager
18+
from torchft.manager_integ_test import MyModel, Runner
19+
from torchft.process_group import (
20+
FakeProcessGroupWrapper,
21+
ProcessGroupBabyNCCL,
22+
ProcessGroupGloo,
23+
)
24+
25+
logger: logging.Logger = logging.getLogger(__name__)
26+
27+
28+
class MultiModel(torch.nn.Module):
29+
def __init__(self, in_dim: int = 3, out_dim: int = 4, n_layers: int = 1) -> None:
30+
super().__init__()
31+
self.layers = torch.nn.ModuleList()
32+
33+
def get_rand_inputs(
34+
self, batch_size: int, device: torch.device = torch.device("cpu")
35+
) -> torch.Tensor:
36+
raise
37+
38+
def get_rand_labels(
39+
self, batch_size: int, device: torch.device = torch.device("cpu")
40+
) -> torch.Tensor:
41+
raise
42+
43+
44+
class MultiMyModel(MultiModel):
45+
def __init__(self, in_dim: int = 3, out_dim: int = 4, n_layers: int = 1) -> None:
46+
super().__init__()
47+
self.in_dim = in_dim
48+
49+
for _ in range(n_layers):
50+
self.layers.append(MyModel(in_dim, out_dim))
51+
in_dim, out_dim = out_dim, in_dim
52+
53+
self.out_dim = in_dim
54+
55+
def forward(self, x: torch.Tensor) -> torch.Tensor:
56+
for layer in self.layers:
57+
x = layer(x)
58+
return x
59+
60+
def get_rand_inputs(
61+
self, batch_size: int, device: torch.device = torch.device("cpu")
62+
) -> torch.Tensor:
63+
return torch.rand(batch_size, self.in_dim, device=device)
64+
65+
def get_rand_labels(
66+
self, batch_size: int, device: torch.device = torch.device("cpu")
67+
) -> torch.Tensor:
68+
return torch.randint(self.out_dim, (batch_size,), device=device)
69+
70+
71+
class DiLoCoTrainer:
72+
"""
73+
A class that encapsulates the DiLoCo training process.
74+
"""
75+
76+
def __init__(
77+
self,
78+
rank: int,
79+
store_port: int,
80+
device: torch.device,
81+
runner: Runner,
82+
model_state_dict: dict[str, Any],
83+
n_fragments: int,
84+
diloco_args: dict[str, Any],
85+
) -> None:
86+
"""
87+
Initialize the DiLoCoTrainer.
88+
89+
Args:
90+
rank: The rank of the current process.
91+
store_port: The port for the store.
92+
device: The device to use for training.
93+
runner: The runner instance.
94+
train_loop_args: Additional arguments for the training loop.
95+
"""
96+
self.rank: int = rank
97+
self.store_port: int = store_port
98+
self.device: torch.device = device
99+
self.runner: Runner = runner
100+
101+
# Extract arguments from train_loop_args
102+
self.model_state_dict: Dict[str, Any] = model_state_dict
103+
self.n_fragments: int = n_fragments
104+
self.diloco_args: dict[str, Any] = diloco_args
105+
106+
# Initialize components
107+
self.model: MultiModel = self.setup_model()
108+
self.inner_optimizer: torch.optim.Optimizer = self.setup_inner_optimizer()
109+
self.outer_optimizers: list[torch.optim.Optimizer] = (
110+
self.setup_outer_optimizers()
111+
)
112+
113+
self.pg: FakeProcessGroupWrapper = self.setup_pg()
114+
# Set up the process group for the event injector
115+
self.runner.event_injector.set_pg(self.pg)
116+
117+
self.manager: Manager = self.setup_manager()
118+
119+
self.ft_device_mesh: None | ManagedDeviceMesh = None
120+
self.setup_distributed()
121+
122+
self.criterion: nn.CrossEntropyLoss = nn.CrossEntropyLoss()
123+
124+
self.diloco: DiLoCo | None = None
125+
126+
def setup_model(self) -> MultiModel:
127+
"""Set up the model and move it to the device."""
128+
model = MultiMyModel(2, 3, self.n_fragments)
129+
model.load_state_dict(self.model_state_dict)
130+
model.to(self.device)
131+
return model
132+
133+
def setup_inner_optimizer(self) -> torch.optim.Optimizer:
134+
"""Set up the inner optimizer."""
135+
return torch.optim.AdamW(
136+
self.model.parameters(), lr=4e-4, weight_decay=0.1, betas=(0.9, 0.95)
137+
)
138+
139+
def setup_outer_optimizers(self) -> list[torch.optim.Optimizer]:
140+
"""Set up outer optimizers."""
141+
# Setup inner optimizer
142+
# Create one outer optimizer per fragment
143+
outer_optimizers = []
144+
for _, layers in enumerate(self.model.layers):
145+
outer_optimizers.append(
146+
torch.optim.SGD(
147+
layers.parameters(), lr=0.7, momentum=0.9, nesterov=True
148+
)
149+
)
150+
return outer_optimizers
151+
152+
def setup_pg(self) -> FakeProcessGroupWrapper:
153+
if self.device.type == "cuda":
154+
return FakeProcessGroupWrapper(ProcessGroupBabyNCCL())
155+
else:
156+
return FakeProcessGroupWrapper(
157+
ProcessGroupGloo(timeout=timedelta(seconds=10))
158+
)
159+
160+
def setup_manager(self) -> Manager:
161+
"""Set up the process group and manager."""
162+
print(
163+
f"worker {self.runner.replica_id=} {self.rank=} {self.runner.world_size=} starting"
164+
)
165+
166+
# Create manager with all arguments passed directly
167+
return Manager(
168+
pg=self.pg,
169+
min_replica_size=2,
170+
use_async_quorum=False,
171+
load_state_dict=self.load_state_dict,
172+
state_dict=self.state_dict,
173+
replica_id=str(self.runner.replica_id),
174+
store_addr="localhost",
175+
store_port=self.store_port,
176+
rank=self.rank,
177+
world_size=self.runner.world_size,
178+
lighthouse_addr=self.runner.lighthouse_address,
179+
port=19530 + self.runner.replica_id,
180+
connect_timeout=timedelta(seconds=10),
181+
quorum_timeout=timedelta(seconds=10),
182+
timeout=timedelta(seconds=10),
183+
**self.runner.manager_args, # type: ignore
184+
)
185+
186+
def setup_distributed(self) -> None:
187+
"""Set up distributed training."""
188+
# Initialize default group for device mesh to work
189+
if not torch.distributed.is_initialized():
190+
# TODO: remove this try-except once pytorch is updated to 2.8.0 and can use localhost:0
191+
try:
192+
torch.distributed.init_process_group(
193+
init_method="tcp://localhost:0",
194+
rank=self.rank,
195+
world_size=self.runner.world_size,
196+
)
197+
except ValueError:
198+
os.environ["MASTER_ADDR"] = "localhost"
199+
os.environ["MASTER_PORT"] = "0"
200+
os.environ["WORLD_SIZE"] = str(self.runner.world_size)
201+
os.environ["RANK"] = str(self.rank)
202+
203+
self.ft_device_mesh = ft_init_device_mesh(
204+
device_type=self.device.type,
205+
mesh_shape=(self.runner.world_size, 1),
206+
mesh_dim_names=("replicate", "none"),
207+
replicate_dim=0,
208+
manager=self.manager,
209+
)
210+
211+
# Convert model parameters to DTensor
212+
for layer in self.model.layers:
213+
if isinstance(layer, nn.Linear):
214+
for param in layer.parameters():
215+
param = DTensor.from_local(
216+
param,
217+
device_mesh=self.ft_device_mesh,
218+
)
219+
220+
def load_state_dict(self, state_dict: Dict[str, Dict[str, object]]) -> None:
221+
"""
222+
Load the state dictionary.
223+
224+
Args:
225+
state_dict: The state dictionary to load.
226+
"""
227+
assert self.diloco is not None
228+
229+
self.model.load_state_dict(state_dict["model"])
230+
self.model.to(self.device)
231+
232+
# Load original parameters for each fragment
233+
for i, fragment in enumerate(cast(DiLoCo, self.diloco)._fragments):
234+
fragment.original_parameters = cast(
235+
Dict[str, torch.Tensor], state_dict["original_params"][f"{i}"]
236+
)
237+
238+
for fragment in cast(DiLoCo, self.diloco)._fragments:
239+
for name in fragment.original_parameters.keys():
240+
fragment.original_parameters[name] = fragment.original_parameters[
241+
name
242+
].to(self.device)
243+
244+
self.inner_optimizer.load_state_dict(state_dict["inner_optim"])
245+
for i, optimizer in enumerate(self.outer_optimizers):
246+
optimizer.load_state_dict(
247+
cast(dict[str, torch.Tensor], state_dict[f"outer_optim"][f"{i}"])
248+
)
249+
250+
def state_dict(self) -> Dict[str, Dict[str, object]]:
251+
"""
252+
Get the state dictionary.
253+
254+
Returns:
255+
The state dictionary.
256+
"""
257+
assert self.diloco is not None
258+
259+
return {
260+
"model": self.model.state_dict(),
261+
"original_params": {
262+
f"{i}": fragment.original_parameters
263+
for i, fragment in enumerate(cast(DiLoCo, self.diloco)._fragments)
264+
},
265+
"inner_optim": self.inner_optimizer.state_dict(),
266+
"outer_optim": {
267+
f"{i}": optimizer.state_dict()
268+
for i, optimizer in enumerate(self.outer_optimizers)
269+
},
270+
}
271+
272+
def train_loop(self) -> dict[str, Any]:
273+
"""Run the training loop."""
274+
# Ensure sync_every is set in diloco_args
275+
all_state_dicts = {}
276+
277+
if "sync_every" not in self.diloco_args:
278+
self.diloco_args["sync_every"] = 2
279+
280+
with DiLoCo(
281+
self.manager,
282+
[layer for layer in self.model.layers],
283+
self.inner_optimizer,
284+
self.outer_optimizers,
285+
backup_device=self.device,
286+
**self.diloco_args,
287+
) as self.diloco:
288+
while True:
289+
self.runner.event_injector.check(self.rank, self.manager.current_step())
290+
291+
manager_curr_step = self.manager.current_step()
292+
if manager_curr_step not in all_state_dicts:
293+
# Store the manager state dict, converting to the right type
294+
all_state_dicts[manager_curr_step] = copy.deepcopy(
295+
self.manager._manager_state_dict()
296+
)
297+
298+
batch_size = 1
299+
inputs = self.model.get_rand_inputs(batch_size, device=self.device)
300+
labels = self.model.get_rand_labels(batch_size, device=self.device)
301+
302+
out = self.model(inputs)
303+
loss = self.criterion(out, labels)
304+
305+
self.inner_optimizer.zero_grad()
306+
loss.backward()
307+
self.inner_optimizer.step()
308+
309+
# after 4 model updates then break
310+
if self.manager.current_step() >= 4:
311+
break
312+
313+
return all_state_dicts

0 commit comments

Comments
 (0)