Skip to content

Commit

Permalink
Update motivation model to origin
Browse files Browse the repository at this point in the history
  • Loading branch information
chraibi committed Oct 14, 2024
1 parent 3377221 commit 1971458
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 20 deletions.
6 changes: 3 additions & 3 deletions src/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,9 +321,9 @@ def handle_distance_to_entrance(traj, measurement_line, motivation_file) -> None
["speed", "time_seconds", "distance"],
]
norm = Normalize(speed.min().speed, speed.max().speed)
st.info(
f"Min: {speed.min().speed}, Max: {speed.max().speed}, first frame: {speed[FRAME_COL].min()}"
)
# st.info(
# f"Min: {speed.min().speed}, Max: {speed.max().speed}, first frame: {speed[FRAME_COL].min()}"
# )

cmap = cm.jet # type: ignore
# ---------------
Expand Down
38 changes: 21 additions & 17 deletions src/motivation_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ class DefaultMotivationStrategy(MotivationStrategy):

width: float = 1.0
height: float = 1.0
alpha: float = 1.0

@staticmethod
def name() -> str:
Expand Down Expand Up @@ -137,6 +138,7 @@ class EVCStrategy(MotivationStrategy):
number_high_value: int = 10
nagents: int = 10
evc: bool = True
alpha = 1.0

def __post_init__(self) -> None:
"""Initialize array pedestrian_value with random values in min max interval."""
Expand Down Expand Up @@ -226,22 +228,22 @@ def motivation(self, params: dict[str, Any]) -> float:
params["seed"] = None

value = self.pedestrian_value[agent_id] if self.evc else 1.0
M = float(
value
* EVCStrategy.competition(
N=got_reward,
c0=self.competition_max,
N0=self.competition_decay_reward,
percent=self.percent,
Nmax=self.max_reward,
)
* EVCStrategy.expectancy(
distance,
self.width,
self.height,
)
V_unit = value / self.max_value_high
C_unit = EVCStrategy.competition(
N=got_reward,
c0=self.competition_max,
N0=self.competition_decay_reward,
percent=self.percent,
Nmax=self.max_reward,
)
E_unit = EVCStrategy.expectancy(
distance,
self.width,
self.height,
)
return shifted_logistic(M)

M = V_unit * E_unit * C_unit
return M

def plot(self) -> List[Figure]:
"""Plot functions for inspection."""
Expand Down Expand Up @@ -434,10 +436,12 @@ def calculate_motivation_state(
self, motivation_i: float, agent_id: int
) -> Tuple[float, float]:
"""Return v0, T tuples depending on Motivation. (v0,T)=(1.2,1)."""

v_0 = self.normal_v_0 * self.motivation_strategy.get_value(agent_id=agent_id)

time_gap = self.normal_time_gap
v_0_new = 2 * v_0 * motivation_i
time_gap_new = time_gap / (2 * motivation_i)
v_0_new = v_0 * (1 + self.motivation_strategy.alpha * motivation_i)
time_gap_new = time_gap / (1 + self.motivation_strategy.alpha * motivation_i)
return v_0_new, time_gap_new

def plot(self) -> Tuple[Figure, Figure]:
Expand Down

0 comments on commit 1971458

Please sign in to comment.