⚡️ Speed up method IPAdapterInvocation.get_clip_image_encoder by 7%
#149
+21
−20
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
📄 7% (0.07x) speedup for
IPAdapterInvocation.get_clip_image_encoderininvokeai/app/invocations/ip_adapter.py⏱️ Runtime :
241 microseconds→226 microseconds(best of54runs)📝 Explanation and details
The optimization achieves a 6% speedup by restructuring the conditional logic in the
get_clip_image_encodermethod to eliminate redundant operations and improve the common case performance.Key Changes:
if not len(image_encoder_models) > 0:toif image_encoder_models: return image_encoder_models[0]. This allows immediate return when models are found (the common case), avoiding the entire installation code path.len() > 0check with Python's truthiness evaluation, which is more efficient as it doesn't require computing the full length.Performance Impact:
test_encoder_found_returns_first, because the function can return immediately without evaluating complex conditionsThe optimization particularly benefits workloads where the CLIP image encoder model is already installed (the typical case), as it can bypass the entire installation pathway with minimal computational overhead. Since IP-Adapter functionality is commonly used in AI image generation pipelines, this improvement will have a cumulative positive impact on inference performance.
✅ Correctness verification report:
🌀 Generated Regression Tests and Runtime
from typing import List, Optional
imports
import pytest
from invokeai.app.invocations.ip_adapter import IPAdapterInvocation
Mocks and minimal stubs for dependencies
class DummyLogger:
def init(self):
self.warnings = []
self.errors = []
def warning(self, msg):
self.warnings.append(msg)
def error(self, msg):
self.errors.append(msg)
class DummyInstallerJob:
def init(self, done=True):
self.done = done
class DummyInstaller:
def init(self, should_install=True, job_done=True):
self.should_install = should_install
self.job_done = job_done
self.heuristic_import_called = False
self.wait_for_job_called = False
self.last_import_args = None
def heuristic_import(self, src, changes):
self.heuristic_import_called = True
self.last_import_args = (src, changes)
return DummyInstallerJob(done=self.job_done)
def wait_for_job(self, job, timeout=600):
self.wait_for_job_called = True
if not job.done:
raise TimeoutError("Job did not complete in time")
class DummyModelConfig:
def init(self, name):
self.name = name
class DummyModelManagerStore:
def init(self, models):
self.models = models
def search_by_attr(self, model_name=None, base_model=None, model_type=None, model_format=None):
# Filter by name and type
results = []
for m in self.models:
if model_name is not None and getattr(m, "name", None) != model_name:
continue
results.append(m)
return results
class DummyModelManager:
def init(self, models, installer=None):
self.store = DummyModelManagerStore(models)
self.install = installer or DummyInstaller()
class DummyServices:
def init(self, models, logger=None, installer=None):
self.model_manager = DummyModelManager(models, installer=installer)
self.logger = logger or DummyLogger()
class DummyModelsInterface:
def init(self, services, util=None):
self._services = services
self._util = util
def search_by_attrs(self, name=None, base=None, type=None, format=None):
# Forward to DummyModelManagerStore
return self._services.model_manager.store.search_by_attr(model_name=name, base_model=base, model_type=type, model_format=format)
class DummyContext:
def init(self, models=None, logger=None, installer=None):
self._services = DummyServices(models or [], logger=logger, installer=installer)
self.models = DummyModelsInterface(self._services)
self.logger = self._services.logger
ModelType and BaseModelType stubs
class ModelType:
CLIPVision = "CLIPVision"
class BaseModelType:
Any = "Any"
class ModelRecordChanges:
def init(self, name, type):
self.name = name
self.type = type
from invokeai.app.invocations.ip_adapter import IPAdapterInvocation
---------- UNIT TESTS ----------
1. BASIC TEST CASES
def test_encoder_found_returns_first():
"""Basic: If model is found, returns the first one."""
model = DummyModelConfig("clip_vit_h")
context = DummyContext(models=[model])
codeflash_output = IPAdapterInvocation.get_clip_image_encoder(context, "id1", "clip_vit_h"); result = codeflash_output # 4.83μs -> 3.65μs (32.4% faster)
def test_encoder_found_multiple_returns_first():
"""Basic: If multiple models are found, returns the first one."""
model1 = DummyModelConfig("clip_vit_h")
model2 = DummyModelConfig("clip_vit_h")
context = DummyContext(models=[model1, model2])
codeflash_output = IPAdapterInvocation.get_clip_image_encoder(context, "id1", "clip_vit_h"); result = codeflash_output # 2.88μs -> 2.86μs (0.839% faster)
def test_encoder_name_is_empty_string():
"""Edge: Empty string as model name."""
model = DummyModelConfig("")
context = DummyContext(models=[model])
codeflash_output = IPAdapterInvocation.get_clip_image_encoder(context, "id1", ""); result = codeflash_output # 3.73μs -> 3.56μs (4.69% faster)
def test_large_number_of_models_returns_first():
"""Large Scale: Many models in context, returns the first matching."""
models = [DummyModelConfig(f"clip_vit_h_{i}") for i in range(999)]
# Add one matching model at the end
matching_model = DummyModelConfig("clip_vit_h")
models.append(matching_model)
context = DummyContext(models=models)
# Should only return the matching model, not the first in the list
codeflash_output = IPAdapterInvocation.get_clip_image_encoder(context, "id1", "clip_vit_h"); result = codeflash_output # 55.3μs -> 53.1μs (4.31% faster)
def test_large_number_of_models_with_multiple_matches_returns_first():
"""Large Scale: Many models, multiple matches, returns first match."""
models = [DummyModelConfig("clip_vit_h") for _ in range(500)]
context = DummyContext(models=models)
codeflash_output = IPAdapterInvocation.get_clip_image_encoder(context, "id1", "clip_vit_h"); result = codeflash_output # 39.8μs -> 38.8μs (2.69% faster)
#------------------------------------------------
import pytest
from invokeai.app.invocations.ip_adapter import IPAdapterInvocation
--- Minimal stubs and helpers to allow testing without external dependencies ---
Taxonomy enums
class BaseModelType:
Any = "Any"
StableDiffusion1 = "StableDiffusion1"
StableDiffusionXL = "StableDiffusionXL"
class ModelType:
CLIPVision = "CLIPVision"
IPAdapter = "IPAdapter"
Main = "Main"
Vae = "Vae"
Dummy ModelConfig
class AnyModelConfig:
def init(self, name, type_):
self.name = name
self.type = type_
ModelRecordChanges stub
class ModelRecordChanges:
def init(self, name, type):
self.name = name
self.type = type
Dummy job for install
class DummyJob:
def init(self, completed=True):
self.completed = completed
Dummy installer
class DummyInstaller:
def init(self, install_should_succeed=True):
self.install_should_succeed = install_should_succeed
self.install_called = False
self.last_job = None
Dummy logger
class DummyLogger:
def init(self):
self.warnings = []
self.errors = []
Dummy model store
class DummyModelStore:
def init(self, models=None):
self.models = models or []
Dummy model manager
class DummyModelManager:
def init(self, store, installer):
self.store = store
self.install = installer
Dummy services
class DummyServices:
def init(self, logger, model_manager):
self.logger = logger
self.model_manager = model_manager
InvocationContext stub
class DummyModelsInterface:
def init(self, store):
self._store = store
class DummyInvocationContext:
def init(self, models, logger, services):
self.models = models
self.logger = logger
self._services = services
from invokeai.app.invocations.ip_adapter import IPAdapterInvocation
--- Unit tests ---
----------- BASIC TEST CASES ------------
def test_basic_model_absent_installs_and_returns():
"""Test: Model is absent, installer installs it, should return the new model."""
# Model not present initially
store = DummyModelStore(models=[])
logger = DummyLogger()
installer = DummyInstaller()
model_manager = DummyModelManager(store, installer)
services = DummyServices(logger, model_manager)
models_interface = DummyModelsInterface(store)
context = DummyInvocationContext(models_interface, logger, services)
def test_basic_model_absent_install_fails():
"""Test: Model is absent, installer runs but model never appears. Should log error and assert."""
store = DummyModelStore(models=[])
logger = DummyLogger()
installer = DummyInstaller()
model_manager = DummyModelManager(store, installer)
services = DummyServices(logger, model_manager)
models_interface = DummyModelsInterface(store)
context = DummyInvocationContext(models_interface, logger, services)
def test_edge_model_name_case_sensitive():
"""Test: Model name is case sensitive, should not find if case differs."""
model = AnyModelConfig(name="vit-h", type_=ModelType.CLIPVision)
store = DummyModelStore(models=[model])
logger = DummyLogger()
installer = DummyInstaller()
model_manager = DummyModelManager(store, installer)
services = DummyServices(logger, model_manager)
models_interface = DummyModelsInterface(store)
context = DummyInvocationContext(models_interface, logger, services)
def test_edge_model_type_mismatch():
"""Test: Model present but wrong type, should not match."""
model = AnyModelConfig(name="ViT-H", type_=ModelType.Main)
store = DummyModelStore(models=[model])
logger = DummyLogger()
installer = DummyInstaller()
model_manager = DummyModelManager(store, installer)
services = DummyServices(logger, model_manager)
models_interface = DummyModelsInterface(store)
context = DummyInvocationContext(models_interface, logger, services)
def test_edge_install_timeout():
"""Test: Installer job does not complete, should raise TimeoutError."""
store = DummyModelStore(models=[])
logger = DummyLogger()
installer = DummyInstaller()
# Make the job not completed
installer.heuristic_import = lambda source, config, access_token=None, inplace=False: DummyJob(completed=False)
installer.wait_for_job = lambda job, timeout=600: (_ for _ in ()).throw(TimeoutError("Job did not complete"))
model_manager = DummyModelManager(store, installer)
services = DummyServices(logger, model_manager)
models_interface = DummyModelsInterface(store)
context = DummyInvocationContext(models_interface, logger, services)
def test_edge_no_models_at_all():
"""Test: No models in store, install fails, should log error and assert."""
store = DummyModelStore(models=[])
logger = DummyLogger()
installer = DummyInstaller()
model_manager = DummyModelManager(store, installer)
services = DummyServices(logger, model_manager)
models_interface = DummyModelsInterface(store)
context = DummyInvocationContext(models_interface, logger, services)
def test_edge_model_installer_called_with_correct_args():
"""Test: Installer is called with correct arguments."""
store = DummyModelStore(models=[])
logger = DummyLogger()
installer = DummyInstaller()
installer.heuristic_import_called_args = []
def heuristic_import(source, config, access_token=None, inplace=False):
installer.heuristic_import_called_args.append((source, config.name, config.type))
return DummyJob(completed=True)
installer.heuristic_import = heuristic_import
model_manager = DummyModelManager(store, installer)
services = DummyServices(logger, model_manager)
models_interface = DummyModelsInterface(store)
context = DummyInvocationContext(models_interface, logger, services)
----------- LARGE SCALE TEST CASES ------------
def test_large_scale_install_among_many_models():
"""Test: Store contains many models, none match, install adds correct one."""
models = [AnyModelConfig(name=f"Other-{i}", type_=ModelType.CLIPVision) for i in range(500)]
store = DummyModelStore(models=models)
logger = DummyLogger()
installer = DummyInstaller()
model_manager = DummyModelManager(store, installer)
services = DummyServices(logger, model_manager)
models_interface = DummyModelsInterface(store)
context = DummyInvocationContext(models_interface, logger, services)
def test_large_scale_install_fails_among_many_models():
"""Test: Store contains many models, install fails to add correct one."""
models = [AnyModelConfig(name=f"Other-{i}", type_=ModelType.CLIPVision) for i in range(500)]
store = DummyModelStore(models=models)
logger = DummyLogger()
installer = DummyInstaller()
model_manager = DummyModelManager(store, installer)
services = DummyServices(logger, model_manager)
models_interface = DummyModelsInterface(store)
context = DummyInvocationContext(models_interface, logger, services)
To edit these changes
git checkout codeflash/optimize-IPAdapterInvocation.get_clip_image_encoder-mhvt6v7pand push.