Skip to content

Commit

Permalink
Enhance motivation model
Browse files Browse the repository at this point in the history
This commit brings some enhancements:

- Better routing configs: Initiate agents with the nearest door
- Motivation is now bounded to max 1 with a logistic function.
  • Loading branch information
chraibi committed Oct 11, 2024
1 parent cfc95e3 commit edab256
Show file tree
Hide file tree
Showing 8 changed files with 122 additions and 61 deletions.
2 changes: 1 addition & 1 deletion app.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
st.session_state.data = {}

if "all_files" not in st.session_state:
st.session_state.all_files = ["files/inifile.json"]
st.session_state.all_files = ["files/inifile.json", "files/bottleneck.json"]

tab = init_sidebar()

Expand Down
2 changes: 2 additions & 0 deletions debug.csv
Original file line number Diff line number Diff line change
@@ -1 +1,3 @@
-3.1730935085122131,19.4537532938564
0.0,19.4537532938564
3.1730935085122131,19.4537532938564
14 changes: 7 additions & 7 deletions files/inifile.json
Original file line number Diff line number Diff line change
Expand Up @@ -65,16 +65,16 @@
"motivation_strategy": "EVC",
"normal_v_0": 1.2,
"normal_time_gap": 1.0,
"width": 2.0,
"height": 2.0,
"width": 3.0,
"height": 1.0,
"seed": 1.0,
"max_value_high": 1.0,
"min_value_high": 0.8,
"max_value_high": 2.0,
"min_value_high": 1,
"max_value_low": 0.5,
"min_value_low": 0.2,
"number_high_value": 1,
"min_value_low": 0.3,
"number_high_value": 40,
"competition_max": 1,
"competition_decay_reward": 5,
"competition_decay_reward": 10,
"percent": 0.8,
"motivation_doors": [
{
Expand Down
33 changes: 19 additions & 14 deletions simulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,20 +243,19 @@ def run_simulation(
motivation_i = motivation_model.motivation_strategy.motivation(
params
)
# d_ped =
if motivation_i > 1:
logging.info(
f"{simulation.iteration_count()}: {agent.id}: {motivation_i = }"
)
v_0, time_gap = motivation_model.calculate_motivation_state(
motivation_i, agent.id
)

agent.model.strength_neighbor_repulsion = adjust_parameter_linearly(
motivation_i=motivation_i,
min_value=a_ped_min,
default_value=default_strength,
max_value=a_ped_max,
)
# print(
# f"{ agent.model.strength_neighbor_repulsion=}, {a_ped_min=}, {a_ped_max=}\n"
# )
# # D
agent.model.range_neighbor_repulsion = adjust_parameter_linearly(
motivation_i=motivation_i,
Expand All @@ -269,10 +268,10 @@ def run_simulation(
# print(
# f"{ agent.model.range_neighbor_repulsion=}, {d_ped_min=}, {d_ped_max}"
# )
if agent.id == -1:
logging.info(
f"{simulation.iteration_count()}, Agent={agent.id}, {agent.model.strength_neighbor_repulsion =}, {agent.model.v0 = :.2f}, {time_gap = :.2f}, {motivation_i = }, Pos: {position[0]:.2f} {position[1]:.2f}"
)
# if agent.id == -3:
# logging.info(
# f"{simulation.iteration_count()}, Agent={agent.id}, {agent.model.strength_neighbor_repulsion =}, {agent.model.v0 = :.2f}, {time_gap = :.2f}, {motivation_i = }, Pos: {position[0]:.2f} {position[1]:.2f}"
# )

write_value_to_file(
file_handle,
Expand Down Expand Up @@ -310,7 +309,7 @@ def create_agent_parameters(
)
agent_parameters_list.append(agent_parameters)

return agent_parameters_list
return agent_parameters_list, destinations


def init_positions(_data: Dict[str, Any], _number_agents: int) -> List[Point]:
Expand Down Expand Up @@ -388,11 +387,16 @@ def main(
a_ped, d_ped, a_wall, d_wall, a_ped_min, a_ped_max, d_ped_min, d_ped_max = (
parse_velocity_init_parameters(_data)
)
agent_parameters_list = create_agent_parameters(_data, simulation)
agent_parameters_list, exit_positions = create_agent_parameters(_data, simulation)
# positions = init_positions(_data, _number_agents)
# positions = read_positions_from_csv(file_path="1C060_frame_3951.csv")
positions = read_positions_from_csv(file_path="debug.csv")
ped_ids = distribute_and_add_agents(simulation, agent_parameters_list, positions)
positions = read_positions_from_csv(file_path="1C060_frame_3951.csv")
# positions = read_positions_from_csv(file_path="debug.csv")
ped_ids = distribute_and_add_agents(
simulation=simulation,
agent_parameters_list=agent_parameters_list,
positions=positions,
exit_positions=exit_positions,
)
motivation_model = init_motivation_model(_data, ped_ids)
logging.info(f"Running simulation for {len(ped_ids)} agents:")
logging.info(f"{motivation_model.motivation_strategy.width = }")
Expand Down Expand Up @@ -451,6 +455,7 @@ def modify_and_save_config(base_config, modification_dict, new_config_path):
if __name__ == "__main__":
init_logger()
base_config = "files/inifile.json"
logging.info(f"{base_config = }")
# Load base configuration
with open(base_config, "r", encoding="utf8") as f:
base_config = json.load(f)
Expand Down
25 changes: 10 additions & 15 deletions src/docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,21 +73,16 @@ def main() -> None:
## Update agents
For an agent $i$ we calculate $m_i$ by one of the methods above and update its parameters as follows:
$\tilde v_i^0 = (v_i^0\cdot V_i)(1 + m_i)$ and $\tilde T_i = \frac{T_i}{1 + m_i}$
The first part of the equation is equalivalent to
$\tilde v_i^0 = v_i^0\cdot V_i(1 + E_i\cdot V_i\cdot C_i)$.
Here we see that the influence of $V_i$ is squared.
Therefore, the second variation of the model reads
## EC-V
$$
\begin{cases}
\tilde v_i^0 = v_i^0(1 + E_i\cdot C_i)\cdot V_i, \\
\tilde T_i = \frac{T_i}{1 + E_i\cdot C_i}.
\end{cases}
$$
\tilde v_i^0 = 2\cdot v_i^0\cdot V_i\cdot\sigma(m_i)
$$
and
$$
\tilde T_i = 2T_i\Big(1-\sigma(m_i)\Big),
$$
where $\sigma$ is the logistic function:
$$
\sigma(m) = \frac{1}{1 + e^{-m}}
$$
""",
unsafe_allow_html=True,
Expand Down
42 changes: 38 additions & 4 deletions src/motivation_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,46 @@

import matplotlib.pyplot as plt
import numpy as np
import numpy.typing as npt
from matplotlib.figure import Figure

from .logger_config import log_debug

Point: TypeAlias = Tuple[float, float]


def shifted_logistic(
x: float, M_max: float = 1.0, k: float = 1.0, shift: float = 0.0
) -> np.ndarray:
"""
Computes the shifted logistic function.
This function serves as a mean to normalize the motivation values to the range [0, 1].
The shifted logistic function is defined as:
.. math::
\\text{motivation} = \\frac{M_{max}}{1 + e^{-k \\cdot (x - \\text{shift})}}
Parameters:
----------
x : float
Input value for which to compute the shifted logistic function.
M_max : float, optional
The maximum value of the logistic function. Default is 1.0.
k : float, optional
The steepness of the curve. Default is 1.0.
shift : float, optional
The value to shift the input by. Default is 5.0.
Returns:
-------
float
The computed value of the shifted logistic function for the input `x`.
"""
return M_max / (1 + np.exp(-k * (x - shift)))


class MotivationStrategy(ABC):
"""Abstract class for strategy model."""

Expand Down Expand Up @@ -107,7 +140,7 @@ class EVCStrategy(MotivationStrategy):
evc: bool = True

def __post_init__(self) -> None:
"""Intialise array pedestrian_value with random values in min max interval."""
"""Initialize array pedestrian_value with random values in min max interval."""
if self.seed is not None:
random.seed(self.seed)

Expand Down Expand Up @@ -194,7 +227,7 @@ def motivation(self, params: dict[str, Any]) -> float:
params["seed"] = None

value = self.pedestrian_value[agent_id] if self.evc else 1.0
return float(
M = float(
value
* EVCStrategy.competition(
N=got_reward,
Expand All @@ -209,6 +242,7 @@ def motivation(self, params: dict[str, Any]) -> float:
self.height,
)
)
return shifted_logistic(M)

def plot(self) -> List[Figure]:
"""Plot functions for inspection."""
Expand Down Expand Up @@ -403,8 +437,8 @@ def calculate_motivation_state(
"""Return v0, T tuples depending on Motivation. (v0,T)=(1.2,1)."""
v_0 = self.normal_v_0 * self.motivation_strategy.get_value(agent_id=agent_id)
time_gap = self.normal_time_gap
v_0_new = (1 + motivation_i) * v_0
time_gap_new = time_gap / (1 + motivation_i)
v_0_new = 2 * v_0 * motivation_i
time_gap_new = time_gap / (2 * motivation_i)
return v_0_new, time_gap_new

def plot(self) -> Tuple[Figure, Figure]:
Expand Down
14 changes: 8 additions & 6 deletions src/ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,13 +249,15 @@ def ui_value_parameters(data: Dict[str, Any]) -> None:
"**Value high**",
key="value_high",
options=np.arange(
1.0, # float(data["motivation_parameters"]["min_value_high"]),
5.0, # float(data["motivation_parameters"]["max_value_high"]),
1.0,
2.6,
0.1,
),
#
#
value=[
2.0, # float(data["motivation_parameters"]["min_value_high"] + 0.2),
4.0, # float(data["motivation_parameters"]["max_value_high"] - 0.2),
float(data["motivation_parameters"]["max_value_high"]),
float(data["motivation_parameters"]["min_value_high"]),
],
format_func=lambda x: f"{x:.2f}",
help="Upper/Lower limit of high Value people.",
Expand All @@ -271,8 +273,8 @@ def ui_value_parameters(data: Dict[str, Any]) -> None:
0.1,
),
value=[
0.2, # float(data["motivation_parameters"]["min_value_low"] + 0.2),
0.8, # float(data["motivation_parameters"]["max_value_low"] - 0.2),
float(data["motivation_parameters"]["min_value_low"]),
float(data["motivation_parameters"]["max_value_low"]),
],
help="Upper/Lower limit of low Value people.",
format_func=lambda x: f"{x:.2f}",
Expand Down
51 changes: 37 additions & 14 deletions src/utilities.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from shapely import GeometryCollection, Polygon
from shapely.ops import unary_union
from .logger_config import log_error, log_info
from math import sqrt

Point: TypeAlias = Tuple[float, float]

Expand Down Expand Up @@ -104,11 +105,11 @@ def init_journey(
wp_ids = []
journey = jps.JourneyDescription()
distance = 1
for way_point in way_points:
# log_info(f"add way_point: {way_point}")
wp_id = simulation.add_waypoint_stage((way_point[0], way_point[1]), distance)
wp_ids.append(wp_id)
journey.add(wp_id)
# for way_point in way_points:
# # log_info(f"add way_point: {way_point}")
# wp_id = simulation.add_waypoint_stage((way_point[0], way_point[1]), distance)
# wp_ids.append(wp_id)
# journey.add(wp_id)

for e in exits:
log_info(f"add {e}")
Expand All @@ -129,24 +130,46 @@ def init_journey(
return journey_id, exit_ids


def calculate_distance(p1: Point, p2: Point) -> float:
"""Calculate Euclidean distance between two points."""
return sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)


def calculate_centroid(points: List[Point]) -> Point:
"""Calculate the centroid of a polygon (list of points)."""
x_coords = [p[0] for p in points]
y_coords = [p[1] for p in points]
centroid_x = sum(x_coords) / len(points)
centroid_y = sum(y_coords) / len(points)
return (centroid_x, centroid_y)


def distribute_and_add_agents(
simulation: jps.Simulation,
agent_parameters_list: List[jps.CollisionFreeSpeedModelAgentParameters],
agent_parameters_list: List[jps.CollisionFreeSpeedModelV2AgentParameters],
positions: List[Point],
exit_positions: List[List[Point]],
) -> List[int]:
"""Initialize positions of agents and insert them into the simulation.
"""Initialize positions of agents, assign each one to the nearest exit (based on centroid).
:param simulation:
:param agent_parameters:
:param positions:
:returns:
and insert them into the simulation.
:param simulation: The simulation object.
:param agent_parameters_list: List of agent parameters to be used for each agent.
:param positions: List of initial positions for agents.
:param exit_positions: List of positions for each exit (as polygons).
:returns: List of pedestrian IDs after being added to the simulation.
"""
# log_info("Distribute and Add Agent")
ped_ids = []
size = len(agent_parameters_list)
exit_centroids = [calculate_centroid(exit_points) for exit_points in exit_positions]

for i, (pos_x, pos_y) in enumerate(positions):
agent_parameters = agent_parameters_list[i % size]
agent_position = (pos_x, pos_y)
nearest_exit_index = min(
range(len(exit_centroids)),
key=lambda j: calculate_distance(agent_position, exit_centroids[j]),
)
agent_parameters = agent_parameters_list[nearest_exit_index]
agent_parameters.position = (pos_x, pos_y)
ped_id = simulation.add_agent(agent_parameters)
ped_ids.append(ped_id)
Expand Down

0 comments on commit edab256

Please sign in to comment.