Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 35 additions & 0 deletions examples/simple_generation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,41 @@
" uniq_resources=100)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{1: 'hello'}\n"
]
},
{
"data": {
"text/plain": "dict"
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"d = {1: 'hello'}\n",
"print(d)\n",
"\n",
"v = eval(str(d))\n",
"type(v)"
],
"metadata": {
"collapsed": false,
"ExecuteTime": {
"end_time": "2025-09-17T08:15:05.619946Z",
"start_time": "2025-09-17T08:15:05.611943300Z"
}
}
},
{
"cell_type": "markdown",
"metadata": {
Expand Down
2 changes: 1 addition & 1 deletion examples/simple_synthetic_graph_scheduling.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@

# Get information about created WorkGraph's attributes
works_count = len(wg.nodes)
work_names_count = len(set(n.work_unit.name for n in wg.nodes))
work_names_count = len(set(n.work_unit.model_name for n in wg.nodes))
res_kind_count = len(set(req.kind for req in chain(*[n.work_unit.worker_reqs for n in wg.nodes])))
print(works_count, work_names_count, res_kind_count)

Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "sampo"
version = "0.1.1.353"
version = "0.1.2"
description = "Open-source framework for adaptive manufacturing processes scheduling"
authors = ["iAirLab <iairlab@yandex.ru>"]
license = "BSD-3-Clause"
Expand Down
71 changes: 47 additions & 24 deletions sampo/generator/pipeline/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,18 @@ def _add_addition_work(probability: float, rand: Random | None = None) -> bool:
def _get_roads(parents: list[GraphNode], cluster_name: str, dist: float,
rand: Random | None = None) -> tuple[dict[str, GraphNode], int]:
road_nodes = dict()
min_r = WorkUnit(uuid_str(rand), 'minimal road',
scale_reqs(wr.MIN_ROAD, dist), group=f'{cluster_name}:road', volume=dist, volume_type='km')
min_r = WorkUnit(uuid_str(rand), {'granular_name': 'minimal road',
'measurement': 'km'},
scale_reqs(wr.MIN_ROAD, dist), group=f'{cluster_name}:road', volume=dist)
road_nodes['min'] = GraphNode(min_r, parents)
temp_r = WorkUnit(uuid_str(rand), 'temporary road',
scale_reqs(wr.TEMP_ROAD, dist), group=f'{cluster_name}:road', volume=dist, volume_type='km')
temp_r = WorkUnit(uuid_str(rand), {'granular_name': 'temporary road',
'measurement': 'km'},
scale_reqs(wr.TEMP_ROAD, dist), group=f'{cluster_name}:road', volume=dist)
road_nodes['temp'] = GraphNode(temp_r, [(road_nodes['min'], wr.ATOMIC_ROAD_LEN, EdgeType.LagFinishStart)])

final_r = WorkUnit(uuid_str(rand), 'final road', scale_reqs(wr.FINAL_ROAD, dist), group=f'{cluster_name}:road',
volume=dist, volume_type='km')
final_r = WorkUnit(uuid_str(rand), {'granular_name': 'final road',
'measurement': 'km'},
scale_reqs(wr.FINAL_ROAD, dist), group=f'{cluster_name}:road', volume=dist)
road_nodes['final'] = GraphNode(final_r, [(road_nodes['temp'], wr.ATOMIC_ROAD_LEN, EdgeType.LagFinishStart)])
return road_nodes, len(road_nodes)

Expand All @@ -43,21 +46,27 @@ def _get_engineering_preparation(parents: list[GraphNode], cluster_name: str, bo
def _get_power_lines(parents: list[GraphNode], cluster_name: str, dist_line: float,
dist_high_line: float | None = None, rand: Random | None = None) -> tuple[list[GraphNode], int]:
worker_req = wr.scale_reqs(wr.POWER_LINE, dist_line)
power_line_1 = WorkUnit(uuid_str(rand), 'power line', worker_req,
power_line_1 = WorkUnit(uuid_str(rand),
{'granular_name': 'power line', 'measurement': 'km'},
worker_req,
group=f'{cluster_name}:electricity',
volume=dist_line, volume_type='km')
power_line_2 = WorkUnit(uuid_str(rand), 'power line', worker_req,
volume=dist_line)
power_line_2 = WorkUnit(uuid_str(rand),
{'granular_name': 'power line', 'measurement': 'km'},
worker_req,
group=f'{cluster_name}:electricity',
volume=dist_line, volume_type='km')
volume=dist_line)

power_lines = [
GraphNode(power_line_1, parents),
GraphNode(power_line_2, parents),
]
if dist_high_line is not None:
worker_req_high = wr.scale_reqs(wr.POWER_LINE, dist_high_line)
high_power_line = WorkUnit(uuid_str(rand), 'high power line', worker_req_high,
group=f'{cluster_name}:electricity', volume=dist_high_line, volume_type='km')
high_power_line = WorkUnit(uuid_str(rand),
{'granular_name': 'high power line', 'measurement': 'km'},
worker_req_high,
group=f'{cluster_name}:electricity', volume=dist_high_line)
power_lines.append(GraphNode(high_power_line, parents))

return power_lines, len(power_lines)
Expand All @@ -66,23 +75,32 @@ def _get_power_lines(parents: list[GraphNode], cluster_name: str, dist_line: flo
def _get_pipe_lines(parents: list[GraphNode], cluster_name: str, pipe_dists: list[float],
rand: Random | None = None) -> tuple[list[GraphNode], int]:
worker_req_pipe = wr.scale_reqs(wr.PIPE_LINE, pipe_dists[0])
first_pipe = WorkUnit(uuid_str(rand), 'pipe', worker_req_pipe, group=f'{cluster_name}:oil_gas_long_pipes',
volume=pipe_dists[0], volume_type='km')
first_pipe = WorkUnit(uuid_str(rand),
{'granular_name': 'pipe', 'measurement': 'km'},
worker_req_pipe, group=f'{cluster_name}:oil_gas_long_pipes',
volume=pipe_dists[0])

graph_nodes = [GraphNode(first_pipe, parents)]
for i in range(1, len(pipe_dists)):
node_work = WorkUnit(uuid_str(rand), 'node', wr.PIPE_NODE,
node_work = WorkUnit(uuid_str(rand),
{'granular_name': 'node'},
wr.PIPE_NODE,
group=f'{cluster_name}:oil_gas_long_pipes')
graph_nodes.append(GraphNode(node_work, parents))
worker_req_pipe = wr.scale_reqs(wr.PIPE_LINE, pipe_dists[i])
pipe_work = WorkUnit(uuid_str(rand), 'pipe', worker_req_pipe,
pipe_work = WorkUnit(uuid_str(rand),
{'granular_name': 'pipe', 'measurement': 'km'},
worker_req_pipe,
group=f'{cluster_name}:oil_gas_long_pipes',
volume=pipe_dists[i], volume_type='km')
volume=pipe_dists[i])
graph_nodes.append(GraphNode(pipe_work, parents))

worker_req_loop = wr.scale_reqs(wr.PIPE_LINE, pipe_dists[0])
looping = WorkUnit(uuid_str(rand), 'looping', worker_req_loop, group=f'{cluster_name}:oil_gas_long_pipes',
volume=pipe_dists[0], volume_type='km')
looping = WorkUnit(uuid_str(rand),
{'granular_name': 'looping', 'measurement': 'km'},
worker_req_loop,
group=f'{cluster_name}:oil_gas_long_pipes',
volume=pipe_dists[0])
graph_nodes.append(GraphNode(looping, graph_nodes[0:1]))
return graph_nodes, len(graph_nodes)

Expand Down Expand Up @@ -147,18 +165,23 @@ def _get_boreholes_equipment_general(parents: list[GraphNode], cluster_name: str
dist = gen_c.DIST_BETWEEN_BOREHOLES.rand_float(rand)
dists_sum += dist
worker_req_pipe = scale_reqs(wr.POWER_NETWORK, dist)
pipe_net_work = WorkUnit(uuid_str(rand), 'elem of pipe_network', worker_req_pipe,
group=f'{cluster_name}:oil_gas_pipe_net', volume=dist, volume_type='km')
pipe_net_work = WorkUnit(uuid_str(rand),
{'granular_name': 'elem of pipe_network', 'measurement': 'km'},
worker_req_pipe,
group=f'{cluster_name}:oil_gas_pipe_net',
volume=dist)
nodes.append(GraphNode(pipe_net_work, parents))

worker_req_power = scale_reqs(wr.POWER_NETWORK, dists_sum)
power_net_work = WorkUnit(uuid_str(rand), 'power network', worker_req_power,
power_net_work = WorkUnit(uuid_str(rand), {'granular_name': 'power network', 'measurement': 'km'},
worker_req_power,
group=f'{cluster_name}:electricity',
volume=dists_sum, volume_type='km')
volume=dists_sum)
nodes.append(GraphNode(power_net_work, parents))

for i in range(masts_count):
light_mast_work = WorkUnit(uuid_str(rand), 'mast', wr.LIGHT_MAST,
light_mast_work = WorkUnit(uuid_str(rand), {'granular_name': 'mast', 'measurement': 'km'},
wr.LIGHT_MAST,
group=f'{cluster_name}:light_masts')
nodes.append(GraphNode(light_mast_work, parents))
return nodes, len(nodes)
Expand Down
2 changes: 1 addition & 1 deletion sampo/scheduler/multi_agency/block_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def generate_wg(mode, i):
bg.add_edge(global_start, node)
bg.add_edge(node, global_end)

logger(f'{graph_type.name} ' + ' '.join([str(mode.name) for i, mode in enumerate(modes)
logger(f'{graph_type.name} ' + ' '.join([str(mode.model_name) for i, mode in enumerate(modes)
if nodes[i].vertex_count != EMPTY_GRAPH_VERTEX_COUNT]))
return bg

Expand Down
16 changes: 8 additions & 8 deletions sampo/scheduler/utils/local_optimization.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,16 +247,16 @@ def optimize(self, scheduled_works: dict[GraphNode, ScheduledWork], node_order:
satisfy = True

for candidate_worker in candidate_schedule.workers:
my_worker = my_workers.get(candidate_worker.name, None)
my_worker = my_workers.get(candidate_worker.model_name, None)
if my_worker is None: # these two works are not compete for this worker
continue

need_me = my_workers[candidate_worker.name].count
need_me = my_workers[candidate_worker.model_name].count
need_candidate = candidate_worker.count

total = need_me + need_candidate
my_req = my_schedule_reqs[candidate_worker.name]
candidate_req = candidate_schedule_reqs[candidate_worker.name]
my_req = my_schedule_reqs[candidate_worker.model_name]
candidate_req = candidate_schedule_reqs[candidate_worker.model_name]
needed_min = my_req.min_count + candidate_req.min_count

if needed_min > total: # these two works can't run in parallel
Expand All @@ -273,17 +273,17 @@ def optimize(self, scheduled_works: dict[GraphNode, ScheduledWork], node_order:
my_worker_count += add_me
candidate_worker_count += add_candidate

new_my_workers[candidate_worker.name] = my_worker_count
new_candidate_workers[candidate_worker.name] = candidate_worker_count
new_my_workers[candidate_worker.model_name] = my_worker_count
new_candidate_workers[candidate_worker.model_name] = candidate_worker_count

if satisfy: # replacement found, apply changes and leave candidates bruteforce
print(f'Found! {candidate.work_unit.name} {node.work_unit.name}')
for worker in my_schedule.workers:
worker_count = new_my_workers.get(worker.name, None)
worker_count = new_my_workers.get(worker.model_name, None)
if worker_count is not None:
worker.count = worker_count
for worker in candidate_schedule.workers:
worker_count = new_candidate_workers.get(worker.name, None)
worker_count = new_candidate_workers.get(worker.model_name, None)
if worker_count is not None:
worker.count = worker_count
# candidate_schedule.start_time = my_schedule.start_time
Expand Down
49 changes: 30 additions & 19 deletions sampo/schemas/schedule.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,34 @@
from copy import deepcopy
from datetime import datetime
from functools import lru_cache
from typing import Iterable, Union
from operator import itemgetter
from typing import Iterable, Union, Any

from pandas import DataFrame

from sampo.schemas.graph import WorkGraph, GraphNode
from sampo.schemas.scheduled_work import ScheduledWork
from sampo.schemas.serializable import JSONSerializable, T
from sampo.schemas.time import Time
from sampo.utilities.collections_util import first
from sampo.utilities.schedule import fix_split_tasks, offset_schedule

ResourceSchedule = dict[str, list[tuple[Time, Time]]]
ScheduleWorkDict = dict[str, ScheduledWork]


def _get_granular_name_columns(sworks: Iterable[ScheduledWork] | None = None):
return list(sorted(first(sworks).model_name.keys()))


# TODO: Rebase object onto ScheduleWorkDict and ordered ScheduledWork list
class Schedule(JSONSerializable['Schedule']):
"""
Represents work schedule. Is a wrapper around DataFrame with specific structure.
"""

_data_columns: list[str] = ['idx', 'task_id', 'task_name', 'task_name_mapped', 'contractor', 'cost',
'volume', 'measurement', 'start',
_data_columns: list[str] = ['idx', 'task_id', 'task_name', 'contractor', 'cost',
'volume', 'start',
'finish', 'duration', 'workers']
_scheduled_work_column: str = 'scheduled_work_object'

Expand All @@ -47,7 +53,7 @@ def pure_schedule_df(self) -> DataFrame:
return self._schedule[~self._schedule.apply(
lambda row: row[self._scheduled_work_column].is_service_unit,
axis=1
)][self._data_columns]
)][self._data_columns + _get_granular_name_columns(self._schedule[self._scheduled_work_column])]

@property
def works(self) -> Iterable[ScheduledWork]:
Expand Down Expand Up @@ -118,7 +124,7 @@ def unite_stages(self) -> 'Schedule':
def f(row):
swork: ScheduledWork = deepcopy(row[self._scheduled_work_column])
row[self._scheduled_work_column] = swork
swork.name = row['task_name_mapped']
swork.model_name['granular_name'] = row['granular_name']
swork.display_name = row['task_name']
swork.volume = float(row['volume'])
swork.start_end_time = Time(int(row['start'])), Time(int(row['finish']))
Expand All @@ -141,7 +147,7 @@ def from_scheduled_works(works: Iterable[ScheduledWork],
"""
ordered_task_ids = order_nodes_by_start_time(works, wg) if wg else None

def sed(time1, time2, swork) -> tuple:
def sed(time1, time2) -> tuple:
"""
Sorts times and calculates difference.
:param time1: time 1.
Expand All @@ -151,19 +157,24 @@ def sed(time1, time2, swork) -> tuple:
start, end = tuple(sorted((time1, time2)))
return start, end, end - start

data_frame = [(i, # idx
w.id, # task_id
w.display_name, # task_name
w.name, # task_name_mapped
w.contractor, # contractor info
w.cost, # work cost
w.volume, # work volume
w.volume_type, # work volume type
*sed(*(t.value for t in w.start_end_time), w), # start, end, duration
repr(dict((i.name, i.count) for i in w.workers)), # workers
w # full ScheduledWork info
model_name_columns = _get_granular_name_columns(works)

def make_model_name_columns(swork: ScheduledWork) -> list[Any]:
return list(map(itemgetter(1), sorted(swork.model_name.items(), key=itemgetter(0))))

data_frame = [(i, # idx
w.id, # task_id
w.display_name, # task_name
w.contractor, # contractor info
w.cost, # work cost
w.volume, # work volume
*sed(*(t.value for t in w.start_end_time)), # start, end, duration
repr(dict((i.name, i.count) for i in w.workers)), # workers
w, # full ScheduledWork info
*make_model_name_columns(w), # model_name columns
) for i, w in enumerate(works)]
data_frame = DataFrame.from_records(data_frame, columns=Schedule._columns)

data_frame = DataFrame.from_records(data_frame, columns=Schedule._columns + model_name_columns)

data_frame = data_frame.set_index('idx', drop=False)

Expand All @@ -173,7 +184,7 @@ def sed(time1, time2, swork) -> tuple:
data_frame = data_frame.sort_values(['task_id'])
data_frame.task_id = data_frame.task_id.astype(str)

data_frame = data_frame.reindex(columns=Schedule._columns)
data_frame = data_frame.reindex(columns=Schedule._columns + model_name_columns)
data_frame = data_frame.reset_index(drop=True)

return Schedule(data_frame)
Expand Down
5 changes: 2 additions & 3 deletions sampo/schemas/scheduled_work.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,10 @@ def __init__(self,
materials: MaterialDelivery | None = None,
c_object: ConstructionObject | None = None):
self.id = work_unit.id
self.name = work_unit.name
self.model_name = work_unit.model_name
self.display_name = work_unit.display_name
self.is_service_unit = work_unit.is_service_unit
self.volume = work_unit.volume
self.volume_type = work_unit.volume_type
self.priority = work_unit.priority
self.start_end_time = start_end_time
self.workers = workers if workers is not None else []
Expand Down Expand Up @@ -123,7 +122,7 @@ def duration(self) -> Time:
def to_dict(self) -> dict[str, Any]:
return {
'task_id': self.id,
'task_name': self.name,
'task_name': self.model_name,
'start': self.start_time.value,
'finish': self.finish_time.value,
'contractor_id': self.contractor,
Expand Down
Loading