Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 43 additions & 3 deletions MCPForUnity/Editor/Services/EditorStateCache.cs
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,15 @@ internal static class EditorStateCache
private const double MinUpdateIntervalSeconds = 0.25;

private static JObject _cached;
private static JObject _cachedClone;
private static long _cachedSequence;

// State tracking to detect when snapshot actually changes
private static string _lastTrackedScenePath;
private static bool? _lastTrackedIsFocused;
private static bool? _lastTrackedIsPlaying;
private static bool? _lastTrackedIsUpdating;
private static string _lastTrackedActivityPhase;

private sealed class EditorStateSnapshot
{
Expand Down Expand Up @@ -286,7 +295,6 @@ private static void ForceUpdate(string reason)

private static JObject BuildSnapshot(string reason)
{
_sequence++;
_observedUnixMs = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds();

bool isCompiling = EditorApplication.isCompiling;
Comment on lines -289 to 292
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

question (bug_risk): Timestamps and other ephemeral fields will now remain constant while state is idle, which may break time-based consumers.

Because _sequence no longer increments on every snapshot, timestamps like _observedUnixMs (and any derived values) will now stay frozen while the logical state is idle. Any consumer relying on these timestamps for heartbeats, timeouts/latency, or "last seen" diagnostics will observe a static time instead of a continuously advancing one. If those use cases matter, consider either a separate monotonic heartbeat field or forcing _sequence to advance (and the snapshot to update) after some time threshold, even when the tracked state is unchanged.

Expand All @@ -308,6 +316,8 @@ private static JObject BuildSnapshot(string reason)
var testsMode = TestRunStatus.Mode?.ToString();
string currentJobId = TestJobManager.CurrentJobId;
bool isFocused = InternalEditorUtility.isApplicationActive;
bool isPlaying = EditorApplication.isPlaying;
bool isUpdating = EditorApplication.isUpdating;

var activityPhase = "idle";
if (testsRunning)
Expand All @@ -322,7 +332,7 @@ private static JObject BuildSnapshot(string reason)
{
activityPhase = "domain_reload";
}
else if (EditorApplication.isUpdating)
else if (isUpdating)
{
activityPhase = "asset_import";
}
Expand All @@ -331,6 +341,26 @@ private static JObject BuildSnapshot(string reason)
activityPhase = "playmode_transition";
}

// Only increment sequence if something actually changed.
// This prevents unnecessary DeepClone() calls in GetSnapshot() when state is idle.
bool hasChanges = _lastTrackedScenePath != scenePath
|| _lastTrackedIsFocused != isFocused
|| _lastTrackedIsPlaying != isPlaying
|| _lastTrackedIsUpdating != isUpdating
|| _lastTrackedActivityPhase != activityPhase;

if (hasChanges)
{
_sequence++;
}
Comment on lines +346 to +355
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

issue (bug_risk): Sequence increment is now decoupled from actual snapshot changes, which can cause stale snapshots to be served.

_sequence now only changes when the tracked fields change, but BuildSnapshot includes additional fields (e.g., compile state, test status, job IDs, timestamps). Those can change without updating _sequence, so _cachedClone may not be regenerated and callers can get a stale snapshot that doesn’t match _cached or the editor state.

If _sequence is meant to represent “snapshot content changed”, it should either be incremented on every BuildSnapshot call, or hasChanges should be expanded to include all snapshot fields that affect consumers. Otherwise _cached and _cachedClone can diverge in ways that are likely to break callers expecting current state.


// Update tracked state
_lastTrackedScenePath = scenePath;
_lastTrackedIsFocused = isFocused;
_lastTrackedIsPlaying = isPlaying;
_lastTrackedIsUpdating = isUpdating;
_lastTrackedActivityPhase = activityPhase;

var snapshot = new EditorStateSnapshot
{
SchemaVersion = "unity-mcp/editor_state@2",
Expand Down Expand Up @@ -424,7 +454,17 @@ public static JObject GetSnapshot()
{
_cached = BuildSnapshot("rebuild");
}
return (JObject)_cached.DeepClone();

// Cache the cloned version to avoid allocating a new JObject on every GetSnapshot call.
// This fixes the GC spikes that occurred when uvx server polled editor state frequently.
// Only regenerate the clone when _cached changes (detected via sequence number).
if (_cachedClone == null || _cachedSequence != _sequence)
{
_cachedClone = (JObject)_cached.DeepClone();
_cachedSequence = _sequence;
}

return _cachedClone;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

issue (bug_risk): Returning the shared cached clone allows external mutation of the cache state.

Previously GetSnapshot() always returned a fresh DeepClone(), so callers effectively saw an immutable snapshot. Reusing a single _cachedClone means any mutation of the returned JObject (intentional or accidental) will corrupt the shared cache for all callers and may desynchronize it from _cached and the editor state.

Unless we can guarantee all consumers treat the snapshot as read-only, this optimization is risky. A safer approach is to keep _cached as the authoritative state and still DeepClone() on each GetSnapshot(), only avoiding rebuilding _cached when _sequence hasn’t changed. If you keep the shared clone, please explicitly document/enforce an immutable-view contract for GetSnapshot() and audit current usages.

}
}
}
Expand Down
31 changes: 21 additions & 10 deletions MCPForUnity/Editor/Services/Transport/TransportCommandDispatcher.cs
Original file line number Diff line number Diff line change
Expand Up @@ -72,13 +72,11 @@ static TransportCommandDispatcher()

EnsureInitialised();

// Always keep the update hook installed so commands arriving from background
// websocket tasks don't depend on a background-thread event subscription.
if (!updateHooked)
{
updateHooked = true;
EditorApplication.update += ProcessQueue;
}
// Note: We no longer permanently hook the update. Commands are woken via:
// 1. RequestMainThreadPump() -> SynchronizationContext.Post() [guaranteed to be called when commands arrive]
// 2. EditorApplication.QueuePlayerLoopUpdate() [nudges Unity to run immediately]
// 3. EditorApplication.update hook [re-added as needed by ProcessQueue]
// This prevents the 28ms/1sec GC pressure from allocating List objects every frame while idle.
}

/// <summary>
Expand Down Expand Up @@ -211,9 +209,13 @@ private static void HookUpdate()

private static void UnhookUpdateIfIdle()
{
// Intentionally no-op: keep update hook installed so background commands always process.
// This avoids "must focus Unity to re-establish contact" edge cases.
return;
// Only unhook if there are no pending commands. RequestMainThreadPump() guarantees
// re-hooking via SynchronizationContext.Post + QueuePlayerLoopUpdate when commands arrive.
if (Pending.Count == 0 && updateHooked)
{
updateHooked = false;
EditorApplication.update -= ProcessQueue;
}
}
Comment on lines 210 to 219
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

suggestion (bug_risk): UnhookUpdateIfIdle reads Pending.Count without locking, which may race if called from multiple code paths.

Within ProcessQueue, UnhookUpdateIfIdle() is called while holding PendingLock, so Pending.Count is safe there. The risk is future or alternate call sites that invoke UnhookUpdateIfIdle() without PendingLock, which would make the unsynchronized Pending.Count read race with writers and potentially throw InvalidOperationException.

If this method must always be called under PendingLock, consider either documenting that explicitly (e.g., name/comment) or taking PendingLock inside UnhookUpdateIfIdle() before accessing Pending to prevent incorrect future usage.

Suggested change
private static void UnhookUpdateIfIdle()
{
// Intentionally no-op: keep update hook installed so background commands always process.
// This avoids "must focus Unity to re-establish contact" edge cases.
return;
// Only unhook if there are no pending commands. RequestMainThreadPump() guarantees
// re-hooking via SynchronizationContext.Post + QueuePlayerLoopUpdate when commands arrive.
if (Pending.Count == 0 && updateHooked)
{
updateHooked = false;
EditorApplication.update -= ProcessQueue;
}
}
private static void UnhookUpdateIfIdle()
{
// Only unhook if there are no pending commands. RequestMainThreadPump() guarantees
// re-hooking via SynchronizationContext.Post + QueuePlayerLoopUpdate when commands arrive.
// Must hold PendingLock while inspecting Pending to avoid races with writers.
lock (PendingLock)
{
if (Pending.Count == 0 && updateHooked)
{
updateHooked = false;
EditorApplication.update -= ProcessQueue;
}
}
}


private static void ProcessQueue()
Expand All @@ -229,6 +231,15 @@ private static void ProcessQueue()

lock (PendingLock)
{
// Early exit: if no pending commands, don't allocate a list. This prevents
// the memory allocations that occur every frame, which trigger GC every ~1 second
// and cause the 28ms stuttering reported in GitHub issue #577.
if (Pending.Count == 0)
{
UnhookUpdateIfIdle();
return;
}

ready = new List<(string, PendingCommand)>(Pending.Count);
foreach (var kvp in Pending)
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -821,6 +821,14 @@ private static void ProcessCommands()
List<(string id, QueuedCommand command)> work;
lock (lockObj)
{
// Early exit: if no commands queued, don't allocate a list. This prevents
// the memory allocations that occur every frame, which trigger GC every ~1 second
// and cause the 28ms stuttering reported in GitHub issue #577.
if (commandQueue.Count == 0)
{
return;
}

work = new List<(string, QueuedCommand)>(commandQueue.Count);
foreach (var kvp in commandQueue)
{
Expand Down
14 changes: 4 additions & 10 deletions MCPForUnity/Editor/Windows/MCPForUnityEditorWindow.cs
Original file line number Diff line number Diff line change
Expand Up @@ -234,13 +234,15 @@ private void EnsureToolsLoaded()

private void OnEnable()
{
EditorApplication.update += OnEditorUpdate;
// No longer hook EditorApplication.update - it was calling UpdateConnectionStatus() every frame,
// which made expensive network socket checks (IsLocalHttpServerReachable) 60+ times per second.
// Connection status now updates only on explicit events (user actions, bridge state changes).
OpenWindows.Add(this);
}

private void OnDisable()
{
EditorApplication.update -= OnEditorUpdate;
// OnEditorUpdate hook was removed; see OnEnable comments.
OpenWindows.Remove(this);
guiCreated = false;
toolsLoaded = false;
Expand All @@ -255,14 +257,6 @@ private void OnFocus()
RefreshAllData();
}

private void OnEditorUpdate()
{
if (rootVisualElement == null || rootVisualElement.childCount == 0)
return;

connectionSection?.UpdateConnectionStatus();
}

private void RefreshAllData()
{
// Debounce rapid successive calls (e.g., from OnFocus being called multiple times)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
using System;
using System.Threading;
using System.Threading.Tasks;
using NUnit.Framework;
using MCPForUnity.Editor.Services.Transport;

namespace MCPForUnityTests.Editor.Services
{
/// <summary>
/// Tests verifying the optimization fix for GitHub issue #577.
/// Confirms that the early-exit optimization in ProcessQueue prevents
/// excessive memory allocations when the dispatcher is idle.
/// </summary>
public class TransportCommandDispatcherOptimizationTests
{
[Test]
public void ExecuteCommandJsonAsync_WithSimplePingCommand_CompletesSuccessfully()
{
// Arrange
var commandJson = """{"type": "ping", "params": {}}""";
var cts = new CancellationTokenSource();
cts.CancelAfter(TimeSpan.FromSeconds(5));

// Act
var task = TransportCommandDispatcher.ExecuteCommandJsonAsync(commandJson, cts.Token);
var completed = task.Wait(TimeSpan.FromSeconds(2));

// Assert
Assert.IsTrue(completed, "Ping command should complete within 2 seconds");
Assert.IsTrue(task.IsCompletedSuccessfully, "Command task should succeed");
Assert.IsNotEmpty(task.Result, "Response should not be empty");
}

[Test]
public void ExecuteCommandJsonAsync_WithCancellation_CancelsProperly()
{
// Arrange
var commandJson = """{"type": "ping", "params": {}}""";
var cts = new CancellationTokenSource();

// Act: Create command but cancel before it processes
var task = TransportCommandDispatcher.ExecuteCommandJsonAsync(commandJson, cts.Token);
cts.Cancel();

var completed = task.Wait(TimeSpan.FromSeconds(1));

// Assert
Assert.IsTrue(completed, "Cancelled task should complete");
Assert.IsTrue(task.IsCanceled || task.IsCompletedSuccessfully,
"Task should be either cancelled or completed");
}

[Test]
public void ExecuteCommandJsonAsync_MultipleCommands_ProcessesAllCorrectly()
{
// Arrange
var cts = new CancellationTokenSource();
cts.CancelAfter(TimeSpan.FromSeconds(5));

// Act: Execute multiple commands in rapid succession
var tasks = new Task<string>[3];
for (int i = 0; i < 3; i++)
{
var commandJson = """{"type": "ping", "params": {}}""";
tasks[i] = TransportCommandDispatcher.ExecuteCommandJsonAsync(commandJson, cts.Token);
}

var allCompleted = Task.WaitAll(tasks, TimeSpan.FromSeconds(3));

// Assert
Assert.IsTrue(allCompleted, "All commands should complete");
foreach (var task in tasks)
{
Assert.IsTrue(task.IsCompletedSuccessfully, "Each command should complete successfully");
}
}

[Test]
public void ExecuteCommandJsonAsync_WithInvalidJson_ReturnsError()
{
// Arrange
var invalidCommandJson = "this is not json";
var cts = new CancellationTokenSource();
cts.CancelAfter(TimeSpan.FromSeconds(5));

// Act
var task = TransportCommandDispatcher.ExecuteCommandJsonAsync(invalidCommandJson, cts.Token);
var completed = task.Wait(TimeSpan.FromSeconds(2));

// Assert
Assert.IsTrue(completed, "Invalid command should complete with error");
Assert.IsTrue(task.IsCompletedSuccessfully, "Task should complete (with error result)");
StringAssert.Contains("error", task.Result.ToLower(),
"Response should indicate an error");
}

[Test]
public void ExecuteCommandJsonAsync_WithEmptyCommand_ReturnsError()
{
// Arrange
var emptyJson = "";
var cts = new CancellationTokenSource();
cts.CancelAfter(TimeSpan.FromSeconds(5));

// Act
var task = TransportCommandDispatcher.ExecuteCommandJsonAsync(emptyJson, cts.Token);
var completed = task.Wait(TimeSpan.FromSeconds(2));

// Assert
Assert.IsTrue(completed, "Empty command should complete with error");
Assert.IsTrue(task.IsCompletedSuccessfully, "Task should complete");
StringAssert.Contains("error", task.Result.ToLower(),
"Response should indicate an error for empty command");
}

[Test]
public void ExecuteCommandJsonAsync_WithNullThrowsArgumentNullException()
{
// Arrange
string nullJson = null;
var cts = new CancellationTokenSource();

// Act & Assert
Assert.Throws<ArgumentNullException>(() =>
{
TransportCommandDispatcher.ExecuteCommandJsonAsync(nullJson, cts.Token);
});
}

[Test]
public void ExecuteCommandJsonAsync_RapidFireCommands_MaintainsOrdering()
{
// Arrange: A sequence of numbered ping commands
var cts = new CancellationTokenSource();
cts.CancelAfter(TimeSpan.FromSeconds(10));

// Act: Fire multiple pings rapidly
var tasks = new Task<string>[5];
for (int i = 0; i < 5; i++)
{
var cmd = """{"type": "ping", "params": {}}""";
tasks[i] = TransportCommandDispatcher.ExecuteCommandJsonAsync(cmd, cts.Token);
}

var allCompleted = Task.WaitAll(tasks, TimeSpan.FromSeconds(5));

// Assert
Assert.IsTrue(allCompleted, "All rapid-fire commands should complete");
int successCount = 0;
foreach (var task in tasks)
{
if (task.IsCompletedSuccessfully && task.Result.Contains("pong"))
{
successCount++;
}
}
Assert.That(successCount, Is.EqualTo(5), "All 5 ping commands should succeed");
}
}
}
Loading