diff --git a/examples/simple_generation.ipynb b/examples/simple_generation.ipynb index 8c311485..f7592848 100644 --- a/examples/simple_generation.ipynb +++ b/examples/simple_generation.ipynb @@ -99,6 +99,41 @@ " uniq_resources=100)" ] }, + { + "cell_type": "code", + "execution_count": 4, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{1: 'hello'}\n" + ] + }, + { + "data": { + "text/plain": "dict" + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "d = {1: 'hello'}\n", + "print(d)\n", + "\n", + "v = eval(str(d))\n", + "type(v)" + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "end_time": "2025-09-17T08:15:05.619946Z", + "start_time": "2025-09-17T08:15:05.611943300Z" + } + } + }, { "cell_type": "markdown", "metadata": { diff --git a/examples/simple_synthetic_graph_scheduling.py b/examples/simple_synthetic_graph_scheduling.py index 0fdf701b..8d2f2891 100644 --- a/examples/simple_synthetic_graph_scheduling.py +++ b/examples/simple_synthetic_graph_scheduling.py @@ -31,7 +31,7 @@ # Get information about created WorkGraph's attributes works_count = len(wg.nodes) -work_names_count = len(set(n.work_unit.name for n in wg.nodes)) +work_names_count = len(set(n.work_unit.model_name for n in wg.nodes)) res_kind_count = len(set(req.kind for req in chain(*[n.work_unit.worker_reqs for n in wg.nodes]))) print(works_count, work_names_count, res_kind_count) diff --git a/pyproject.toml b/pyproject.toml index 395a724d..26bbe10a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "sampo" -version = "0.1.1.353" +version = "0.1.2" description = "Open-source framework for adaptive manufacturing processes scheduling" authors = ["iAirLab "] license = "BSD-3-Clause" diff --git a/sampo/generator/pipeline/cluster.py b/sampo/generator/pipeline/cluster.py index 7383f1ad..9b9146c1 100644 --- a/sampo/generator/pipeline/cluster.py +++ b/sampo/generator/pipeline/cluster.py @@ -17,15 +17,18 @@ def _add_addition_work(probability: float, rand: Random | None = None) -> bool: def _get_roads(parents: list[GraphNode], cluster_name: str, dist: float, rand: Random | None = None) -> tuple[dict[str, GraphNode], int]: road_nodes = dict() - min_r = WorkUnit(uuid_str(rand), 'minimal road', - scale_reqs(wr.MIN_ROAD, dist), group=f'{cluster_name}:road', volume=dist, volume_type='km') + min_r = WorkUnit(uuid_str(rand), {'granular_name': 'minimal road', + 'measurement': 'km'}, + scale_reqs(wr.MIN_ROAD, dist), group=f'{cluster_name}:road', volume=dist) road_nodes['min'] = GraphNode(min_r, parents) - temp_r = WorkUnit(uuid_str(rand), 'temporary road', - scale_reqs(wr.TEMP_ROAD, dist), group=f'{cluster_name}:road', volume=dist, volume_type='km') + temp_r = WorkUnit(uuid_str(rand), {'granular_name': 'temporary road', + 'measurement': 'km'}, + scale_reqs(wr.TEMP_ROAD, dist), group=f'{cluster_name}:road', volume=dist) road_nodes['temp'] = GraphNode(temp_r, [(road_nodes['min'], wr.ATOMIC_ROAD_LEN, EdgeType.LagFinishStart)]) - final_r = WorkUnit(uuid_str(rand), 'final road', scale_reqs(wr.FINAL_ROAD, dist), group=f'{cluster_name}:road', - volume=dist, volume_type='km') + final_r = WorkUnit(uuid_str(rand), {'granular_name': 'final road', + 'measurement': 'km'}, + scale_reqs(wr.FINAL_ROAD, dist), group=f'{cluster_name}:road', volume=dist) road_nodes['final'] = GraphNode(final_r, [(road_nodes['temp'], wr.ATOMIC_ROAD_LEN, EdgeType.LagFinishStart)]) return road_nodes, len(road_nodes) @@ -43,12 +46,16 @@ def _get_engineering_preparation(parents: list[GraphNode], cluster_name: str, bo def _get_power_lines(parents: list[GraphNode], cluster_name: str, dist_line: float, dist_high_line: float | None = None, rand: Random | None = None) -> tuple[list[GraphNode], int]: worker_req = wr.scale_reqs(wr.POWER_LINE, dist_line) - power_line_1 = WorkUnit(uuid_str(rand), 'power line', worker_req, + power_line_1 = WorkUnit(uuid_str(rand), + {'granular_name': 'power line', 'measurement': 'km'}, + worker_req, group=f'{cluster_name}:electricity', - volume=dist_line, volume_type='km') - power_line_2 = WorkUnit(uuid_str(rand), 'power line', worker_req, + volume=dist_line) + power_line_2 = WorkUnit(uuid_str(rand), + {'granular_name': 'power line', 'measurement': 'km'}, + worker_req, group=f'{cluster_name}:electricity', - volume=dist_line, volume_type='km') + volume=dist_line) power_lines = [ GraphNode(power_line_1, parents), @@ -56,8 +63,10 @@ def _get_power_lines(parents: list[GraphNode], cluster_name: str, dist_line: flo ] if dist_high_line is not None: worker_req_high = wr.scale_reqs(wr.POWER_LINE, dist_high_line) - high_power_line = WorkUnit(uuid_str(rand), 'high power line', worker_req_high, - group=f'{cluster_name}:electricity', volume=dist_high_line, volume_type='km') + high_power_line = WorkUnit(uuid_str(rand), + {'granular_name': 'high power line', 'measurement': 'km'}, + worker_req_high, + group=f'{cluster_name}:electricity', volume=dist_high_line) power_lines.append(GraphNode(high_power_line, parents)) return power_lines, len(power_lines) @@ -66,23 +75,32 @@ def _get_power_lines(parents: list[GraphNode], cluster_name: str, dist_line: flo def _get_pipe_lines(parents: list[GraphNode], cluster_name: str, pipe_dists: list[float], rand: Random | None = None) -> tuple[list[GraphNode], int]: worker_req_pipe = wr.scale_reqs(wr.PIPE_LINE, pipe_dists[0]) - first_pipe = WorkUnit(uuid_str(rand), 'pipe', worker_req_pipe, group=f'{cluster_name}:oil_gas_long_pipes', - volume=pipe_dists[0], volume_type='km') + first_pipe = WorkUnit(uuid_str(rand), + {'granular_name': 'pipe', 'measurement': 'km'}, + worker_req_pipe, group=f'{cluster_name}:oil_gas_long_pipes', + volume=pipe_dists[0]) graph_nodes = [GraphNode(first_pipe, parents)] for i in range(1, len(pipe_dists)): - node_work = WorkUnit(uuid_str(rand), 'node', wr.PIPE_NODE, + node_work = WorkUnit(uuid_str(rand), + {'granular_name': 'node'}, + wr.PIPE_NODE, group=f'{cluster_name}:oil_gas_long_pipes') graph_nodes.append(GraphNode(node_work, parents)) worker_req_pipe = wr.scale_reqs(wr.PIPE_LINE, pipe_dists[i]) - pipe_work = WorkUnit(uuid_str(rand), 'pipe', worker_req_pipe, + pipe_work = WorkUnit(uuid_str(rand), + {'granular_name': 'pipe', 'measurement': 'km'}, + worker_req_pipe, group=f'{cluster_name}:oil_gas_long_pipes', - volume=pipe_dists[i], volume_type='km') + volume=pipe_dists[i]) graph_nodes.append(GraphNode(pipe_work, parents)) worker_req_loop = wr.scale_reqs(wr.PIPE_LINE, pipe_dists[0]) - looping = WorkUnit(uuid_str(rand), 'looping', worker_req_loop, group=f'{cluster_name}:oil_gas_long_pipes', - volume=pipe_dists[0], volume_type='km') + looping = WorkUnit(uuid_str(rand), + {'granular_name': 'looping', 'measurement': 'km'}, + worker_req_loop, + group=f'{cluster_name}:oil_gas_long_pipes', + volume=pipe_dists[0]) graph_nodes.append(GraphNode(looping, graph_nodes[0:1])) return graph_nodes, len(graph_nodes) @@ -147,18 +165,23 @@ def _get_boreholes_equipment_general(parents: list[GraphNode], cluster_name: str dist = gen_c.DIST_BETWEEN_BOREHOLES.rand_float(rand) dists_sum += dist worker_req_pipe = scale_reqs(wr.POWER_NETWORK, dist) - pipe_net_work = WorkUnit(uuid_str(rand), 'elem of pipe_network', worker_req_pipe, - group=f'{cluster_name}:oil_gas_pipe_net', volume=dist, volume_type='km') + pipe_net_work = WorkUnit(uuid_str(rand), + {'granular_name': 'elem of pipe_network', 'measurement': 'km'}, + worker_req_pipe, + group=f'{cluster_name}:oil_gas_pipe_net', + volume=dist) nodes.append(GraphNode(pipe_net_work, parents)) worker_req_power = scale_reqs(wr.POWER_NETWORK, dists_sum) - power_net_work = WorkUnit(uuid_str(rand), 'power network', worker_req_power, + power_net_work = WorkUnit(uuid_str(rand), {'granular_name': 'power network', 'measurement': 'km'}, + worker_req_power, group=f'{cluster_name}:electricity', - volume=dists_sum, volume_type='km') + volume=dists_sum) nodes.append(GraphNode(power_net_work, parents)) for i in range(masts_count): - light_mast_work = WorkUnit(uuid_str(rand), 'mast', wr.LIGHT_MAST, + light_mast_work = WorkUnit(uuid_str(rand), {'granular_name': 'mast', 'measurement': 'km'}, + wr.LIGHT_MAST, group=f'{cluster_name}:light_masts') nodes.append(GraphNode(light_mast_work, parents)) return nodes, len(nodes) diff --git a/sampo/scheduler/multi_agency/block_generator.py b/sampo/scheduler/multi_agency/block_generator.py index 8efda217..60382250 100644 --- a/sampo/scheduler/multi_agency/block_generator.py +++ b/sampo/scheduler/multi_agency/block_generator.py @@ -93,7 +93,7 @@ def generate_wg(mode, i): bg.add_edge(global_start, node) bg.add_edge(node, global_end) - logger(f'{graph_type.name} ' + ' '.join([str(mode.name) for i, mode in enumerate(modes) + logger(f'{graph_type.name} ' + ' '.join([str(mode.model_name) for i, mode in enumerate(modes) if nodes[i].vertex_count != EMPTY_GRAPH_VERTEX_COUNT])) return bg diff --git a/sampo/scheduler/utils/local_optimization.py b/sampo/scheduler/utils/local_optimization.py index fb72797b..194e5fd1 100644 --- a/sampo/scheduler/utils/local_optimization.py +++ b/sampo/scheduler/utils/local_optimization.py @@ -247,16 +247,16 @@ def optimize(self, scheduled_works: dict[GraphNode, ScheduledWork], node_order: satisfy = True for candidate_worker in candidate_schedule.workers: - my_worker = my_workers.get(candidate_worker.name, None) + my_worker = my_workers.get(candidate_worker.model_name, None) if my_worker is None: # these two works are not compete for this worker continue - need_me = my_workers[candidate_worker.name].count + need_me = my_workers[candidate_worker.model_name].count need_candidate = candidate_worker.count total = need_me + need_candidate - my_req = my_schedule_reqs[candidate_worker.name] - candidate_req = candidate_schedule_reqs[candidate_worker.name] + my_req = my_schedule_reqs[candidate_worker.model_name] + candidate_req = candidate_schedule_reqs[candidate_worker.model_name] needed_min = my_req.min_count + candidate_req.min_count if needed_min > total: # these two works can't run in parallel @@ -273,17 +273,17 @@ def optimize(self, scheduled_works: dict[GraphNode, ScheduledWork], node_order: my_worker_count += add_me candidate_worker_count += add_candidate - new_my_workers[candidate_worker.name] = my_worker_count - new_candidate_workers[candidate_worker.name] = candidate_worker_count + new_my_workers[candidate_worker.model_name] = my_worker_count + new_candidate_workers[candidate_worker.model_name] = candidate_worker_count if satisfy: # replacement found, apply changes and leave candidates bruteforce print(f'Found! {candidate.work_unit.name} {node.work_unit.name}') for worker in my_schedule.workers: - worker_count = new_my_workers.get(worker.name, None) + worker_count = new_my_workers.get(worker.model_name, None) if worker_count is not None: worker.count = worker_count for worker in candidate_schedule.workers: - worker_count = new_candidate_workers.get(worker.name, None) + worker_count = new_candidate_workers.get(worker.model_name, None) if worker_count is not None: worker.count = worker_count # candidate_schedule.start_time = my_schedule.start_time diff --git a/sampo/schemas/schedule.py b/sampo/schemas/schedule.py index 2322a174..de53fbfe 100644 --- a/sampo/schemas/schedule.py +++ b/sampo/schemas/schedule.py @@ -1,7 +1,8 @@ from copy import deepcopy from datetime import datetime from functools import lru_cache -from typing import Iterable, Union +from operator import itemgetter +from typing import Iterable, Union, Any from pandas import DataFrame @@ -9,20 +10,25 @@ from sampo.schemas.scheduled_work import ScheduledWork from sampo.schemas.serializable import JSONSerializable, T from sampo.schemas.time import Time +from sampo.utilities.collections_util import first from sampo.utilities.schedule import fix_split_tasks, offset_schedule ResourceSchedule = dict[str, list[tuple[Time, Time]]] ScheduleWorkDict = dict[str, ScheduledWork] +def _get_granular_name_columns(sworks: Iterable[ScheduledWork] | None = None): + return list(sorted(first(sworks).model_name.keys())) + + # TODO: Rebase object onto ScheduleWorkDict and ordered ScheduledWork list class Schedule(JSONSerializable['Schedule']): """ Represents work schedule. Is a wrapper around DataFrame with specific structure. """ - _data_columns: list[str] = ['idx', 'task_id', 'task_name', 'task_name_mapped', 'contractor', 'cost', - 'volume', 'measurement', 'start', + _data_columns: list[str] = ['idx', 'task_id', 'task_name', 'contractor', 'cost', + 'volume', 'start', 'finish', 'duration', 'workers'] _scheduled_work_column: str = 'scheduled_work_object' @@ -47,7 +53,7 @@ def pure_schedule_df(self) -> DataFrame: return self._schedule[~self._schedule.apply( lambda row: row[self._scheduled_work_column].is_service_unit, axis=1 - )][self._data_columns] + )][self._data_columns + _get_granular_name_columns(self._schedule[self._scheduled_work_column])] @property def works(self) -> Iterable[ScheduledWork]: @@ -118,7 +124,7 @@ def unite_stages(self) -> 'Schedule': def f(row): swork: ScheduledWork = deepcopy(row[self._scheduled_work_column]) row[self._scheduled_work_column] = swork - swork.name = row['task_name_mapped'] + swork.model_name['granular_name'] = row['granular_name'] swork.display_name = row['task_name'] swork.volume = float(row['volume']) swork.start_end_time = Time(int(row['start'])), Time(int(row['finish'])) @@ -141,7 +147,7 @@ def from_scheduled_works(works: Iterable[ScheduledWork], """ ordered_task_ids = order_nodes_by_start_time(works, wg) if wg else None - def sed(time1, time2, swork) -> tuple: + def sed(time1, time2) -> tuple: """ Sorts times and calculates difference. :param time1: time 1. @@ -151,19 +157,24 @@ def sed(time1, time2, swork) -> tuple: start, end = tuple(sorted((time1, time2))) return start, end, end - start - data_frame = [(i, # idx - w.id, # task_id - w.display_name, # task_name - w.name, # task_name_mapped - w.contractor, # contractor info - w.cost, # work cost - w.volume, # work volume - w.volume_type, # work volume type - *sed(*(t.value for t in w.start_end_time), w), # start, end, duration - repr(dict((i.name, i.count) for i in w.workers)), # workers - w # full ScheduledWork info + model_name_columns = _get_granular_name_columns(works) + + def make_model_name_columns(swork: ScheduledWork) -> list[Any]: + return list(map(itemgetter(1), sorted(swork.model_name.items(), key=itemgetter(0)))) + + data_frame = [(i, # idx + w.id, # task_id + w.display_name, # task_name + w.contractor, # contractor info + w.cost, # work cost + w.volume, # work volume + *sed(*(t.value for t in w.start_end_time)), # start, end, duration + repr(dict((i.name, i.count) for i in w.workers)), # workers + w, # full ScheduledWork info + *make_model_name_columns(w), # model_name columns ) for i, w in enumerate(works)] - data_frame = DataFrame.from_records(data_frame, columns=Schedule._columns) + + data_frame = DataFrame.from_records(data_frame, columns=Schedule._columns + model_name_columns) data_frame = data_frame.set_index('idx', drop=False) @@ -173,7 +184,7 @@ def sed(time1, time2, swork) -> tuple: data_frame = data_frame.sort_values(['task_id']) data_frame.task_id = data_frame.task_id.astype(str) - data_frame = data_frame.reindex(columns=Schedule._columns) + data_frame = data_frame.reindex(columns=Schedule._columns + model_name_columns) data_frame = data_frame.reset_index(drop=True) return Schedule(data_frame) diff --git a/sampo/schemas/scheduled_work.py b/sampo/schemas/scheduled_work.py index 5b53db21..a0ed635e 100644 --- a/sampo/schemas/scheduled_work.py +++ b/sampo/schemas/scheduled_work.py @@ -38,11 +38,10 @@ def __init__(self, materials: MaterialDelivery | None = None, c_object: ConstructionObject | None = None): self.id = work_unit.id - self.name = work_unit.name + self.model_name = work_unit.model_name self.display_name = work_unit.display_name self.is_service_unit = work_unit.is_service_unit self.volume = work_unit.volume - self.volume_type = work_unit.volume_type self.priority = work_unit.priority self.start_end_time = start_end_time self.workers = workers if workers is not None else [] @@ -123,7 +122,7 @@ def duration(self) -> Time: def to_dict(self) -> dict[str, Any]: return { 'task_id': self.id, - 'task_name': self.name, + 'task_name': self.model_name, 'start': self.start_time.value, 'finish': self.finish_time.value, 'contractor_id': self.contractor, diff --git a/sampo/schemas/time_estimator.py b/sampo/schemas/time_estimator.py index cf6fc988..441124b0 100644 --- a/sampo/schemas/time_estimator.py +++ b/sampo/schemas/time_estimator.py @@ -2,7 +2,7 @@ from enum import Enum from operator import attrgetter from random import Random -from typing import Optional, Type +from typing import Optional, Type, Any import numpy.random import math @@ -36,10 +36,14 @@ def set_productivity_mode(self, mode: WorkerProductivityMode = WorkerProductivit ... @abstractmethod - def find_work_resources(self, work_name: str, work_volume: float, - resource_name: list[str] | None = None, - measurement: str | None = None) \ - -> list[WorkerReq]: + def get_model_name_keys(self) -> list[str]: + ... + + @abstractmethod + def find_work_resources(self, + model_name: dict[str, Any], + work_volume: float, + resource_name: list[str] | None = None) -> list[WorkerReq]: ... @abstractmethod @@ -62,9 +66,10 @@ def __init__(self, self._productivity = {worker: {'__ALL__': IntervalGaussian(1, 0.2, 1, 0)} for worker in ['driver', 'fitter', 'manager', 'handyman', 'electrician', 'engineer']} - def find_work_resources(self, work_name: str, work_volume: float, measurement: str | None = None, - resource_name: list[str] | None = None) \ - -> list[WorkerReq]: + def find_work_resources(self, + model_name: dict[str, Any], + work_volume: float, + resource_name: list[str] | None = None) -> list[WorkerReq]: if resource_name is None: resource_name = ['driver', 'fitter', 'manager', 'handyman', 'electrician', 'engineer'] dist = numpy.random.poisson(work_volume * 3, len(resource_name)) @@ -74,6 +79,9 @@ def find_work_resources(self, work_name: str, work_volume: float, measurement: s max_count=int(dist[i] * 2)) for i, name in enumerate(resource_name)] + def get_model_name_keys(self) -> list[str]: + return ['granular_name', 'measurement'] + def set_estimation_mode(self, use_idle: bool = True, mode: WorkEstimationMode = WorkEstimationMode.Realistic): self._use_idle = use_idle self._estimation_mode = mode diff --git a/sampo/schemas/works.py b/sampo/schemas/works.py index 9ee82423..8af0bf0a 100644 --- a/sampo/schemas/works.py +++ b/sampo/schemas/works.py @@ -1,4 +1,5 @@ from dataclasses import dataclass +from typing import Any from sampo.schemas.identifiable import Identifiable from sampo.schemas.requirements import WorkerReq, EquipmentReq, MaterialReq, ConstructionObjectReq, ZoneReq @@ -14,7 +15,7 @@ class WorkUnit(AutoJSONSerializable['WorkUnit'], Identifiable): """ def __init__(self, id: str, - name: str, + model_name: dict[str, Any] | str, worker_reqs: list[WorkerReq] = None, equipment_reqs: list[EquipmentReq] = None, material_reqs: list[MaterialReq] = None, @@ -25,10 +26,12 @@ def __init__(self, priority: int = 1, is_service_unit: bool = False, volume: float = 0, - volume_type: str = 'unit', display_name: str = "", workground_size: int = 100): """ + :param model_name: dict with information that describes type of work for resource model. + In minimal it should contain 'granular_name' and 'measurement' entries. + `str` model_type is equal to {'granular_name': your_str_value, 'measurement': 'unit'} :param worker_reqs: list of required professions (i.e. workers) :param equipment_reqs: list of required equipment :param material_reqs: list of required materials (e.g. logs, stones, gravel etc.) @@ -38,10 +41,17 @@ def __init__(self, :param group: union block of works :param is_service_unit: service units are additional vertexes :param volume: scope of work - :param volume_type: unit of scope of work :param display_name: name of work """ - super(WorkUnit, self).__init__(id, name) + if isinstance(model_name, str): + model_name = {'granular_name': model_name} + if 'measurement' not in model_name: + model_name['measurement'] = 'unit' + + self.model_name = model_name + + super(WorkUnit, self).__init__(id, 'dummy') + if material_reqs is None: material_reqs = [] if object_reqs is None: @@ -61,8 +71,7 @@ def __init__(self, self.group = group self.is_service_unit = is_service_unit self.volume = float(volume) - self.volume_type = volume_type - self.display_name = display_name if display_name else name + self.display_name = display_name if display_name else model_name['granular_name'] self.priority = priority def __del__(self): @@ -141,10 +150,9 @@ def __setstate__(self, state): self.material_reqs = new_work_unit.material_reqs self.zone_reqs = new_work_unit.zone_reqs self.id = new_work_unit.id - self.name = new_work_unit.name + self.model_name = new_work_unit.model_name self.is_service_unit = new_work_unit.is_service_unit self.volume = new_work_unit.volume - self.volume_type = new_work_unit.volume_type self.group = new_work_unit.group self.display_name = new_work_unit.display_name self.priority = new_work_unit.priority diff --git a/sampo/structurator/base.py b/sampo/structurator/base.py index 21d274e2..444cc668 100644 --- a/sampo/structurator/base.py +++ b/sampo/structurator/base.py @@ -184,9 +184,12 @@ def make_new_stage_node(volume_proportion: float, # update id attribute with current stage node id wu_attrs['id'] = stage_node_id # update name attribute with current index of stage - wu_attrs['name'] = f'{wu.name}{STAGE_SEP}{stage_i}' + wu_attrs['model_name']['granular_name'] = f'{wu.model_name["granular_name"]}{STAGE_SEP}{stage_i}' # update volume attribute with passed proportion wu_attrs['volume'] = wu.volume * volume_proportion + # remove because it's unused field from Identifiable + if 'name' in wu_attrs: + del wu_attrs['name'] # make new work unit for new stage node with updated attributes new_wu = WorkUnit(**wu_attrs) # make new graph node for new stage with created work unit and with passed edge to previous stage node diff --git a/sampo/structurator/prepare_wg_copy.py b/sampo/structurator/prepare_wg_copy.py index 6b5b2719..66706cf8 100644 --- a/sampo/structurator/prepare_wg_copy.py +++ b/sampo/structurator/prepare_wg_copy.py @@ -20,14 +20,15 @@ def copy_graph_node(node: GraphNode, new_id: int | str | None = None, else: new_id = node.work_unit.id wu = node.work_unit - new_wu = WorkUnit(id=new_id, name=wu.name, + new_wu = WorkUnit(id=new_id, + model_name=wu.model_name, worker_reqs=deepcopy(wu.worker_reqs), material_reqs=deepcopy(wu.material_reqs), equipment_reqs=deepcopy(wu.equipment_reqs), object_reqs=deepcopy(wu.object_reqs), zone_reqs=deepcopy(wu.zone_reqs), group=wu.group, - is_service_unit=wu.is_service_unit, volume=wu.volume, volume_type=wu.volume_type) + is_service_unit=wu.is_service_unit, volume=wu.volume) return GraphNode(new_wu, []), (wu.id, new_id) diff --git a/sampo/userinput/parser/csv_parser.py b/sampo/userinput/parser/csv_parser.py index c4ea67d1..aae71e49 100644 --- a/sampo/userinput/parser/csv_parser.py +++ b/sampo/userinput/parser/csv_parser.py @@ -124,10 +124,9 @@ def work_graph(works_info: pd.DataFrame, works_info.activity_name = works_info.activity_name.apply(lambda name: name_mapper[name]) resources = [dict((worker_req.kind, int(worker_req.volume)) - for worker_req in work_resource_estimator.find_work_resources(work_name=w[0], - work_volume=float(w[1]), - measurement=w[2])) - for w in works_info.loc[:, ['granular_name', 'volume', 'measurement']].to_numpy()] + for worker_req in work_resource_estimator.find_work_resources(model_name=w[0], + work_volume=float(w[1]))) + for w in works_info.loc[:, ['model_name', 'volume']].to_numpy()] unique_res = list(set(chain(*[r.keys() for r in resources]))) diff --git a/sampo/userinput/parser/general_build.py b/sampo/userinput/parser/general_build.py index b727b002..23fb7bc1 100644 --- a/sampo/userinput/parser/general_build.py +++ b/sampo/userinput/parser/general_build.py @@ -167,8 +167,16 @@ def normalize_if_number(s): if col not in frame.columns: frame[col] = temp_lst - if 'granular_name' not in frame.columns: - frame['granular_name'] = [name_mapper[activity_name] for activity_name in frame['activity_name']] + if 'model_name' not in frame.columns: + frame['model_name'] = '' + + def map_activity(row): + model_name_dict = eval(row['model_name']) + if 'granular_name' not in model_name_dict: + model_name_dict['granular_name'] = name_mapper[row['activity_name']] + return str(model_name_dict) + + frame['model_name'] = frame[['activity_name', 'model_name']].apply(map_activity, axis=1) frame['activity_id'] = frame['activity_id'].astype(str) frame['volume'] = [float(x.replace(',', '.')) if isinstance(x, str) else float(x) for x in frame['volume']] @@ -245,9 +253,8 @@ def build_work_graph(frame: pd.DataFrame, resource_names: list[str], work_estima row['min_req'][res_name], row['max_req'][res_name])) else: - reqs = work_estimator.find_work_resources(work_name=row['granular_name'], - work_volume=float(row['volume']), - measurement=row['measurement']) + reqs = work_estimator.find_work_resources(model_name=eval(row['model_name']), + work_volume=float(row['volume'])) is_service_unit = len(reqs) == 0 zone_reqs = [ZoneReq(*v) for v in eval(row['required_statuses']).items()] \ @@ -257,8 +264,8 @@ def build_work_graph(frame: pd.DataFrame, resource_names: list[str], work_estima group = row['group'] if 'group' in frame.columns else 'main project' priority = row['priority'] if 'priority' in frame.columns else 1 - work_unit = WorkUnit(row['activity_id'], row['granular_name'], reqs, group=group, - description=description, volume=row['volume'], volume_type=row['measurement'], + work_unit = WorkUnit(row['activity_id'], row['model_name'], reqs, group=group, + description=description, volume=row['volume'], is_service_unit=is_service_unit, display_name=row['activity_name_original'], zone_reqs=zone_reqs, priority=priority) parents = [(id_to_node[p_id], lag, conn_type) for p_id, conn_type, lag in row.edges] diff --git a/sampo/utilities/collections_util.py b/sampo/utilities/collections_util.py index 566dde1b..f5525188 100644 --- a/sampo/utilities/collections_util.py +++ b/sampo/utilities/collections_util.py @@ -35,3 +35,9 @@ def build_index(items: Iterable[T], key_getter: Callable[[T], K], value_getter: def reverse_dictionary(dictionary: dict[K, V]) -> dict[V, K]: return {value: key for key, value in dictionary.items()} + + +def first(the_iterable, condition=lambda x: True): + for i in the_iterable: + if condition(i): + return i diff --git a/sampo/utilities/resource_usage.py b/sampo/utilities/resource_usage.py index 05c13e4d..faab5133 100644 --- a/sampo/utilities/resource_usage.py +++ b/sampo/utilities/resource_usage.py @@ -47,7 +47,7 @@ def resources_sum(schedule: Schedule, resources_names: Iterable[str] | None = No resources_names = set(resources_names) if not is_none else {} res_sum = sum([sum([worker.count * work.duration.value for worker in work.workers - if worker.name in resources_names or is_none], start=0) + if worker.model_name in resources_names or is_none], start=0) for work in schedule.works]) return res_sum diff --git a/sampo/utilities/sampler/__init__.py b/sampo/utilities/sampler/__init__.py index f8dab27d..d29bb2f3 100644 --- a/sampo/utilities/sampler/__init__.py +++ b/sampo/utilities/sampler/__init__.py @@ -1,5 +1,5 @@ import random -from typing import Optional, List, Tuple, Hashable +from typing import Optional, List, Tuple, Hashable, Any from sampo.schemas.graph import GraphNode, EdgeType from sampo.schemas.requirements import WorkerReq @@ -11,54 +11,46 @@ class Sampler: def __init__(self, - seed: Optional[Hashable] = None - ): + seed: Hashable | None = None): self.rand = random.Random(seed) def worker_reqs(self, - volume: Optional[MinMax[int]] = MinMax[int](1, 50), - worker_count: Optional[MinMax[int]] = MinMax[int](1, 100) - ) -> List[WorkerReq]: + volume: MinMax[int] = MinMax[int](1, 50), + worker_count: MinMax[int] = MinMax[int](1, 100)) -> List[WorkerReq]: return get_worker_reqs_list(self.rand, volume, worker_count) def work_unit(self, - name: str, - work_id: Optional[str] = '', - volume_type: Optional[str] = 'unit', - group: Optional[str] = 'default', - work_volume: Optional[MinMax[float]] = MinMax[float](0.1, 100.0), - req_volume: Optional[MinMax[int]] = MinMax[int](1, 50), - req_worker_count: Optional[MinMax[int]] = MinMax[int](1, 100) - ) -> WorkUnit: - return get_work_unit(self.rand, name, work_id, volume_type, group, work_volume, req_volume, req_worker_count) + model_name: dict[str, Any] | str, + work_id: str = '', + group: str = 'default', + work_volume: MinMax[float] = MinMax[float](0.1, 100.0), + req_volume: MinMax[int] = MinMax[int](1, 50), + req_worker_count: MinMax[int] = MinMax[int](1, 100)) -> WorkUnit: + return get_work_unit(self.rand, model_name, work_id, group, work_volume, req_volume, req_worker_count) def similar_work_unit(self, exemplar: WorkUnit, - scalar: Optional[float] = 1.0, - name: Optional[str] = '', - work_id: Optional[str] = '' - ) -> WorkUnit: - return get_similar_work_unit(self.rand, exemplar, scalar, name, work_id) + scalar: float = 1.0, + model_name: dict[str, Any] | str = '', + work_id: str = '') -> WorkUnit: + return get_similar_work_unit(self.rand, exemplar, scalar, model_name, work_id) def graph_node(self, - name: str, - edges: List[Tuple[GraphNode, float, EdgeType]], - work_id: Optional[str] = '', - volume_type: Optional[str] = 'unit', - group: Optional[str] = 'default', - work_volume: Optional[MinMax[float]] = MinMax[float](0.1, 100.0), - req_volume: Optional[MinMax[int]] = MinMax[int](1, 50), - req_worker_count: Optional[MinMax[int]] = MinMax[int](1, 100) - ) -> GraphNode: - wu = get_work_unit(self.rand, name, work_id, volume_type, group, work_volume, req_volume, req_worker_count) + model_name: dict[str, Any] | str, + edges: list[tuple[GraphNode, float, EdgeType]], + work_id: str = '', + group: str = 'default', + work_volume: MinMax[float] = MinMax[float](0.1, 100.0), + req_volume: MinMax[int] = MinMax[int](1, 50), + req_worker_count: MinMax[int] = MinMax[int](1, 100)) -> GraphNode: + wu = get_work_unit(self.rand, model_name, work_id, group, work_volume, req_volume, req_worker_count) return GraphNode(wu, edges) def similar_graph_node(self, exemplar: GraphNode, - edges: List[Tuple[GraphNode, float, EdgeType]], - scalar: Optional[float] = 1.0, - name: Optional[str] = '', - work_id: Optional[str] = '' - ) -> GraphNode: - wu = get_similar_work_unit(self.rand, exemplar.work_unit, scalar, name, work_id) + edges: list[tuple[GraphNode, float, EdgeType]], + scalar: float = 1.0, + model_name: dict[str, Any] | str = '', + work_id: str = '') -> GraphNode: + wu = get_similar_work_unit(self.rand, exemplar.work_unit, scalar, model_name, work_id) return GraphNode(wu, edges) diff --git a/sampo/utilities/sampler/works.py b/sampo/utilities/sampler/works.py index a1d1fa1f..eecb27de 100644 --- a/sampo/utilities/sampler/works.py +++ b/sampo/utilities/sampler/works.py @@ -1,5 +1,5 @@ import random -from typing import Optional +from typing import Optional, Any from sampo.schemas.utils import uuid_str from sampo.schemas.works import WorkUnit @@ -7,9 +7,9 @@ from sampo.utilities.sampler.types import MinMax -def get_work_unit(rand: random.Random, name: str, +def get_work_unit(rand: random.Random, + model_name: dict[str, Any] | str, work_id: Optional[str] = '', - volume_type: Optional[str] = 'unit', group: Optional[str] = 'default', work_volume: Optional[MinMax[float]] = MinMax[float](0.1, 100.0), req_volume: Optional[MinMax[int]] = MinMax[int](1, 50), @@ -18,17 +18,16 @@ def get_work_unit(rand: random.Random, name: str, reqs = get_worker_reqs_list(rand, req_volume, req_worker_count) work_id = work_id or uuid_str(rand) volume = rand.random() * (work_volume.max - work_volume.min) + work_volume.min - return WorkUnit(work_id, name, worker_reqs=reqs, volume=volume, volume_type=volume_type, group=group) + return WorkUnit(work_id, model_name, worker_reqs=reqs, volume=volume, group=group) def get_similar_work_unit(rand: random.Random, exemplar: WorkUnit, - scalar: Optional[float] = 1.0, - name: Optional[str] = '', - work_id: Optional[str] = '' - ) -> WorkUnit: + scalar: float = 1.0, + model_name: dict[str, Any] | str = '', + work_id: str = '') -> WorkUnit: reqs = [req.scale_all(scalar) for req in exemplar.worker_reqs] work_id = work_id or uuid_str(rand) - name = name or exemplar.name - return WorkUnit(work_id, name, worker_reqs=reqs, group=exemplar.group, - volume=exemplar.volume * scalar, volume_type=exemplar.volume_type) + model_name = model_name or exemplar.model_name + return WorkUnit(work_id, model_name, worker_reqs=reqs, group=exemplar.group, + volume=exemplar.volume * scalar) diff --git a/sampo/utilities/schedule.py b/sampo/utilities/schedule.py index 88ace600..05322bec 100644 --- a/sampo/utilities/schedule.py +++ b/sampo/utilities/schedule.py @@ -66,12 +66,12 @@ def get_stage_num(name: str): if len(task_df) > 1: df = task_df.copy() - df['stage_num'] = df['task_name_mapped'].apply(get_stage_num) + df['stage_num'] = df['granular_name'].apply(get_stage_num) df = df.sort_values(by='stage_num') df = df.reset_index(drop=True) df = df.iloc[-1:].reset_index(drop=True) - for column in ['task_id', 'task_name', 'task_name_mapped']: + for column in ['task_id', 'task_name', 'granular_name']: df.loc[0, column] = df.loc[0, column].split(STAGE_SEP)[0] # fix task id and name # sum up volumes through all stages diff --git a/sampo/utilities/visualization/resources.py b/sampo/utilities/visualization/resources.py index 0e0efe93..e42e3937 100644 --- a/sampo/utilities/visualization/resources.py +++ b/sampo/utilities/visualization/resources.py @@ -196,7 +196,7 @@ def get_schedule_df(schedule: ScheduleWorkDict, fig_type: EmploymentFigType, pro resource_schedule: dict[str, list[tuple[int, str, int]]] = {} for i, (work, item) in enumerate(sorted(list(schedule.items()), key=lambda x: x[1].start_time)): resources: list[Worker] = item.workers - w_name = item.name + w_name = item.model_name for worker in resources: if worker.count > 0: if worker.name not in resource_schedule: diff --git a/sampo/utilities/visualization/schedule.py b/sampo/utilities/visualization/schedule.py index bf42614f..4fe814cb 100644 --- a/sampo/utilities/visualization/schedule.py +++ b/sampo/utilities/visualization/schedule.py @@ -92,7 +92,7 @@ def create_zone_row(i, zone_names, zone) -> dict: idx = schedule_dataframe['idx'].copy() def get_zone_usage_info(swork) -> str: - return '
' + '
'.join([f'{zone.name}: {zone.to_status}' for zone in swork.zones_pre]) + return '
' + '
'.join([f'{zone.model_name}: {zone.to_status}' for zone in swork.zones_pre]) schedule_dataframe['zone_information'] = sworks.apply(get_zone_usage_info) @@ -100,10 +100,10 @@ def get_zone_usage_info(swork) -> str: # create zone information for i, swork in zip(idx, sworks): - zone_names = '
' + '
'.join([zone.name for zone in swork.zones_pre]) + zone_names = '
' + '
'.join([zone.model_name for zone in swork.zones_pre]) for zone in swork.zones_pre: access_cards.append(create_zone_row(i, zone_names, zone)) - zone_names = '
' + '
'.join([zone.name for zone in swork.zones_post]) + zone_names = '
' + '
'.join([zone.model_name for zone in swork.zones_post]) for zone in swork.zones_post: access_cards.append(create_zone_row(i, zone_names, zone)) diff --git a/sampo/utilities/visualization/work_graph.py b/sampo/utilities/visualization/work_graph.py index ec276e1e..45a546a4 100644 --- a/sampo/utilities/visualization/work_graph.py +++ b/sampo/utilities/visualization/work_graph.py @@ -99,10 +99,10 @@ def collect_jobs(start: GraphNode, max_deep: Optional[int] = None) -> tuple[list max_volume = max(max_volume, volume) id_to_job[unit.id] = len(jobs) jobs.append( - dict(job_id=len(jobs), work_id=str(unit.id), task=unit.name, start=X_PERIOD * deep, + dict(job_id=len(jobs), work_id=str(unit.id), task=unit.model_name, start=X_PERIOD * deep, children=node.children, parents=node.parents, group=unit.group, color=color_from_str(unit.group), volume=volume, - cluster=extract_cluster_name(unit.name))) + cluster=extract_cluster_name(unit.model_name))) colors[unit.group] = color_from_str(unit.group) for child in node.children: work_id = child.id diff --git a/tests/parser/test_wg.csv b/tests/parser/test_wg.csv index 1537155f..fdbfe1f6 100644 --- a/tests/parser/test_wg.csv +++ b/tests/parser/test_wg.csv @@ -1,8 +1,8 @@ -activity_id;activity_name;granular_name;measurement;volume;predecessor_ids;connection_types;lags -1;Работа1;Работа1;-1;0.0;;; -2;Работа2;Работа2;шт;1496.0;1;FFS;0.01 -3;Работа3;Работа3;шт;1496.0;2;FFS;0.01 -4;Работа4;Работа4;шт;1496.0;2;FFS;0.01 -5;Работа5;Работа5;шт;1500.0;2;FFS;0.01 -6;Работа6;Работа6;шт;1496.0;2,3,4,5;FFS,FFS,FFS,FFS;0.01,0.01,0.01,0.01 -7;Работа7;Работа7;м3;82.8;3;FFS;0.17 \ No newline at end of file +activity_id;activity_name;model_name;volume;predecessor_ids;connection_types;lags +1;Работа1;{"granular_name": "Работа1", "measurement": "шт"};0;;; +2;Работа2;{"granular_name": "Работа2", "measurement": "шт"};1496.0;1;FFS;0.01 +3;Работа3;{"granular_name": "Работа3", "measurement": "шт"};1496.0;2;FFS;0.01 +4;Работа4;{"granular_name": "Работа4", "measurement": "шт"};1496.0;2;FFS;0.01 +5;Работа5;{"granular_name": "Работа5", "measurement": "шт"};1500.0;2;FFS;0.01 +6;Работа6;{"granular_name": "Работа6", "measurement": "шт"};1496.0;2,3,4,5;FFS,FFS,FFS,FFS;0.01,0.01,0.01,0.01 +7;Работа7;{"granular_name": "Работа7", "measurement": "м3"};82.8;3;FFS;0.17 \ No newline at end of file diff --git a/tests/scheduler/timeline/momentum_timeline_test.py b/tests/scheduler/timeline/momentum_timeline_test.py index ab7d8c8e..dda256ef 100644 --- a/tests/scheduler/timeline/momentum_timeline_test.py +++ b/tests/scheduler/timeline/momentum_timeline_test.py @@ -45,10 +45,12 @@ def test_insert_works_with_one_worker_kind(setup_timeline_context): nodes = [] for i in range(10): - work_unit = WorkUnit(id=str(i), name=f'Work {str(i)}', worker_reqs=[WorkerReq(kind=worker_kind, - volume=Time(50), - min_count=10, - max_count=50)]) + work_unit = WorkUnit(id=str(i), + model_name=f'Work {str(i)}', + worker_reqs=[WorkerReq(kind=worker_kind, + volume=Time(50), + min_count=10, + max_count=50)]) nodes.append(GraphNode(work_unit=work_unit, parent_works=[])) node2swork = {} diff --git a/tests/structurator/prepare_wg_copy_test.py b/tests/structurator/prepare_wg_copy_test.py index 8b1b2d92..b12e0276 100644 --- a/tests/structurator/prepare_wg_copy_test.py +++ b/tests/structurator/prepare_wg_copy_test.py @@ -8,6 +8,6 @@ def test_prepare_wg_copy(setup_wg): is_copied_wg_equals_setup_wg = True for node in setup_wg.nodes: - if not (node.work_unit.name == copied_nodes[old_to_new_ids[node.id]].work_unit.name): + if not (node.work_unit.model_name == copied_nodes[old_to_new_ids[node.id]].work_unit.model_name): is_copied_wg_equals_setup_wg = False assert is_copied_wg_equals_setup_wg