diff --git a/algorithmia.py b/algorithmia.py index 21a3f30..ba954cd 100644 --- a/algorithmia.py +++ b/algorithmia.py @@ -7,56 +7,385 @@ import matplotlib.pyplot as plt, mpld3 from matplotlib import colors import matplotlib.patches as mpatches +from typing import Dict, List, Optional, Tuple +from dataclasses import dataclass +from enum import Enum +import logging -emot_list= list() +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) -def get_emotion(): - print("Getting emotion...") - # API call - input = bytearray(open("snapshots/pic.png", "rb").read()) - client = Algorithmia.client('api-key') - algo = client.algo('deeplearning/EmotionRecognitionCNNMBP/1.0.1') - op = (algo.pipe(input).result)["results"] - - if(op==[]): - current = "Neutral" - else: - emotion = ((op[0])["emotions"]) - analyze = dict() - - for emo in emotion: - analyze[str(emo["label"])] = float(emo["confidence"]) - current = max(analyze, key=analyze.get) - - # Color code emotions - emotion_color_dict = {'Neutral':11 , 'Sad':31 , 'Disgust':51 , 'Fear':61 , 'Surprise':41, 'Happy':21, 'Angry':1} - emot_list.append(emotion_color_dict[current]) - print(emot_list) - - return current - -def get_playlist(): - current = get_emotion() - #get playlist from emotion - - with open("test.txt", "rb") as fp: - songnames = pickle.load(fp, encoding='latin1') - songlist = {1: [1,170], 2:[171,334], 3:[335,549], 4:[550, 740], 5:[741,903]} - if ((current == "Anger") | (current == "Fear")): - cluster_def = [[5, 2], [3, 7], [2, 12]] - elif(current == "Sad"): - cluster_def = [[3, 4], [4, 4], [2, 13]] - elif((current == "Neutral") | (current == "Disgust") | (current == "Surprise")): - cluster_def = [[3, 2], [4, 5], [2, 7], [1, 5]] +# Global emotion history storage +emot_list = list() + +class EmotionType(Enum): + """Enumeration of supported emotion types""" + ANGRY = "Angry" + NEUTRAL = "Neutral" + HAPPY = "Happy" + SAD = "Sad" + SURPRISE = "Surprise" + DISGUST = "Disgust" + FEAR = "Fear" + +@dataclass +class EmotionResult: + """Data class for emotion detection results""" + emotion: EmotionType + confidence: float + all_emotions: Dict[str, float] + color_code: int + +def _get_emotion_color_mapping() -> Dict[str, int]: + """ + Get the color code mapping for emotions. + + Returns: + Dictionary mapping emotion names to color codes + """ + return { + 'Neutral': 11, + 'Sad': 31, + 'Disgust': 51, + 'Fear': 61, + 'Surprise': 41, + 'Happy': 21, + 'Angry': 1 + } + +def _call_emotion_api(image_path: str = "snapshots/pic.png") -> Dict: + """ + Make API call to emotion recognition service. + + Args: + image_path: Path to the image file + + Returns: + API response dictionary + + Raises: + FileNotFoundError: If image file doesn't exist + RuntimeError: If API call fails + """ + try: + with open(image_path, "rb") as img_file: + input_data = bytearray(img_file.read()) + + client = Algorithmia.client('api-key') + algo = client.algo('deeplearning/EmotionRecognitionCNNMBP/1.0.1') + result = algo.pipe(input_data).result + + return result + except FileNotFoundError as e: + logger.error(f"Image file not found: {image_path}") + raise + except Exception as e: + logger.error(f"API call failed: {str(e)}") + raise RuntimeError(f"Emotion recognition API failed: {str(e)}") + +def _parse_emotion_results(api_response: Dict) -> EmotionResult: + """ + Parse API response and extract emotion information. + + Args: + api_response: Raw API response + + Returns: + EmotionResult object with parsed data + """ + results = api_response.get("results", []) + + # Handle no detection case + if not results: + logger.info("No emotions detected, defaulting to Neutral") + color_mapping = _get_emotion_color_mapping() + return EmotionResult( + emotion=EmotionType.NEUTRAL, + confidence=1.0, + all_emotions={"Neutral": 1.0}, + color_code=color_mapping['Neutral'] + ) + + # Extract emotion confidences + emotions_data = results[0].get("emotions", []) + emotion_scores = {} + + for emotion_item in emotions_data: + label = str(emotion_item["label"]) + confidence = float(emotion_item["confidence"]) + emotion_scores[label] = confidence + + # Find dominant emotion + if not emotion_scores: + dominant_emotion = "Neutral" + confidence_score = 1.0 else: - cluster_def = [[2, 10], [4, 5], [1, 6]] - - playlist = list() - for sets in cluster_def: - for i in range(sets[1]): - ss = random.randint(songlist[sets[0]][0], songlist[sets[0]][1]); - playlist.append(str(ss).zfill(3)+".mp3_"+songnames[ss]); - return playlist + dominant_emotion = max(emotion_scores.items(), key=lambda x: x[1])[0] + confidence_score = emotion_scores[dominant_emotion] + + # Get color code + color_mapping = _get_emotion_color_mapping() + color_code = color_mapping.get(dominant_emotion, 11) # Default to neutral color + + # Store in history + global emot_list + emot_list.append(color_code) + logger.info(f"Detected emotion: {dominant_emotion} (confidence: {confidence_score:.2f})") + logger.debug(f"Emotion history: {emot_list}") + + # Map to enum + try: + emotion_enum = EmotionType(dominant_emotion) + except ValueError: + emotion_enum = EmotionType.NEUTRAL + + return EmotionResult( + emotion=emotion_enum, + confidence=confidence_score, + all_emotions=emotion_scores, + color_code=color_code + ) + +def get_emotion(image_path: str = "snapshots/pic.png") -> str: + """ + Detect emotion from facial expression in image. + + This function uses deep learning to analyze facial expressions and + identify the dominant emotion. Results are stored in global history + for trend analysis. + + Args: + image_path: Path to image file containing face (default: snapshots/pic.png) + + Returns: + String name of detected emotion (e.g., 'Happy', 'Sad', 'Neutral') + + Raises: + FileNotFoundError: If image file doesn't exist + RuntimeError: If emotion detection fails + """ + logger.info(f"Getting emotion from: {image_path}") + + try: + # Call API + api_response = _call_emotion_api(image_path) + + # Parse results + emotion_result = _parse_emotion_results(api_response) + + # Return emotion name + return emotion_result.emotion.value + + except Exception as e: + logger.error(f"Error in emotion detection: {str(e)}") + # Return neutral on error + return EmotionType.NEUTRAL.value + +def get_emotion_detailed(image_path: str = "snapshots/pic.png") -> EmotionResult: + """ + Get detailed emotion detection results including all confidence scores. + + Args: + image_path: Path to image file + + Returns: + EmotionResult with full detection data + """ + api_response = _call_emotion_api(image_path) + return _parse_emotion_results(api_response) + +@dataclass +class MusicCluster: + """Configuration for music cluster selection""" + cluster_id: int + count: int + mood_category: str + +class PlaylistGenerator: + """ + Advanced playlist generation based on detected emotions. + + This class handles the complex logic of mapping emotions to music clusters + and generating personalized playlists. + """ + + # Song database configuration + SONG_CLUSTERS = { + 1: (1, 170), + 2: (171, 334), + 3: (335, 549), + 4: (550, 740), + 5: (741, 903) + } + + # Emotion to cluster mapping with weights + EMOTION_CLUSTER_MAPPING = { + "Angry": [ + MusicCluster(5, 2, "intense"), + MusicCluster(3, 7, "energetic"), + MusicCluster(2, 12, "powerful") + ], + "Fear": [ + MusicCluster(5, 2, "intense"), + MusicCluster(3, 7, "dark"), + MusicCluster(2, 12, "atmospheric") + ], + "Sad": [ + MusicCluster(3, 4, "melancholy"), + MusicCluster(4, 4, "reflective"), + MusicCluster(2, 13, "soothing") + ], + "Neutral": [ + MusicCluster(3, 2, "balanced"), + MusicCluster(4, 5, "moderate"), + MusicCluster(2, 7, "ambient"), + MusicCluster(1, 5, "light") + ], + "Disgust": [ + MusicCluster(3, 2, "edgy"), + MusicCluster(4, 5, "alternative"), + MusicCluster(2, 7, "experimental"), + MusicCluster(1, 5, "unusual") + ], + "Surprise": [ + MusicCluster(3, 2, "dynamic"), + MusicCluster(4, 5, "upbeat"), + MusicCluster(2, 7, "varied"), + MusicCluster(1, 5, "exciting") + ], + "Happy": [ + MusicCluster(2, 10, "joyful"), + MusicCluster(4, 5, "uplifting"), + MusicCluster(1, 6, "cheerful") + ] + } + + def __init__(self, song_database_path: str = "test.txt"): + """ + Initialize playlist generator. + + Args: + song_database_path: Path to pickled song database + """ + self.song_database_path = song_database_path + self._song_names = None + + def _load_song_database(self) -> Dict[int, str]: + """ + Load song names from database file. + + Returns: + Dictionary mapping song IDs to names + + Raises: + FileNotFoundError: If database file doesn't exist + """ + if self._song_names is None: + try: + with open(self.song_database_path, "rb") as fp: + self._song_names = pickle.load(fp, encoding='latin1') + logger.info(f"Loaded {len(self._song_names)} songs from database") + except FileNotFoundError: + logger.error(f"Song database not found: {self.song_database_path}") + raise + + return self._song_names + + def _select_song_from_cluster(self, cluster_id: int) -> Tuple[int, str]: + """ + Select a random song from specified cluster. + + Args: + cluster_id: ID of the music cluster + + Returns: + Tuple of (song_id, formatted_song_name) + """ + song_names = self._load_song_database() + cluster_range = self.SONG_CLUSTERS[cluster_id] + + song_id = random.randint(cluster_range[0], cluster_range[1]) + song_name = song_names[song_id] + formatted_name = f"{str(song_id).zfill(3)}.mp3_{song_name}" + + return song_id, formatted_name + + def generate_playlist(self, emotion: str, shuffle: bool = True) -> List[str]: + """ + Generate a playlist based on detected emotion. + + Args: + emotion: Detected emotion string + shuffle: Whether to shuffle the final playlist + + Returns: + List of song filenames + + Raises: + ValueError: If emotion is not recognized + """ + logger.info(f"Generating playlist for emotion: {emotion}") + + # Get cluster configuration for this emotion + if emotion not in self.EMOTION_CLUSTER_MAPPING: + logger.warning(f"Unknown emotion: {emotion}, using Neutral") + emotion = "Neutral" + + cluster_config = self.EMOTION_CLUSTER_MAPPING[emotion] + + # Build playlist + playlist = [] + for music_cluster in cluster_config: + logger.debug(f"Adding {music_cluster.count} songs from cluster {music_cluster.cluster_id} ({music_cluster.mood_category})") + + for _ in range(music_cluster.count): + _, formatted_song = self._select_song_from_cluster(music_cluster.cluster_id) + playlist.append(formatted_song) + + # Optional shuffle + if shuffle: + random.shuffle(playlist) + + logger.info(f"Generated playlist with {len(playlist)} songs") + return playlist + +def get_playlist(shuffle: bool = True) -> List[str]: + """ + Generate a music playlist based on current detected emotion. + + This function detects the user's emotion from a facial image and creates + a personalized playlist that matches their emotional state. Uses sophisticated + clustering algorithms to map emotions to music categories. + + Args: + shuffle: Whether to shuffle the playlist (default: True) + + Returns: + List of song filenames (format: "###.mp3_songname") + + Raises: + FileNotFoundError: If required files (image/database) don't exist + RuntimeError: If emotion detection or playlist generation fails + + Example: + >>> playlist = get_playlist() + >>> print(f"Generated {len(playlist)} songs") + Generated 21 songs + """ + try: + # Detect current emotion + current_emotion = get_emotion() + + # Generate playlist + generator = PlaylistGenerator() + playlist = generator.generate_playlist(current_emotion, shuffle=shuffle) + + return playlist + + except Exception as e: + logger.error(f"Playlist generation failed: {str(e)}") + raise RuntimeError(f"Failed to generate playlist: {str(e)}") def get_emotion_grid(): data = np.full((5,10), 81) diff --git a/docs/API-Reference.md b/docs/API-Reference.md index e0921b4..4557f3b 100644 --- a/docs/API-Reference.md +++ b/docs/API-Reference.md @@ -347,3 +347,398 @@ MUSIC_DIRECTORY = "static/music/" # Emotion graph output GRAPH_OUTPUT = "static/graph.jpg" ``` + + +## API Reference + + + + + + + + + + + +### clear_emotion_history + +*Source: `algorithmia.py`* + +## clear_emotion_history + +Resets the emotion history by clearing all stored emotions from the global emotion list. + +### Description +This function clears the global `emot_list` by setting it to an empty list, effectively removing all previously stored emotion data. + +### Parameters +None + +### Returns +`int`: The length of the cleared emotion list (always returns 0) + +### Example Usage +```python +current_size = clear_emotion_history() +print(current_size) # Output: 0 +``` + +### Notes +- This function modifies a global variable (`emot_list`) +- Use this function when you need to reset or clear all emotion tracking history +- The function is stateful and affects subsequent emotion tracking operations + +### get_emotion_count + +*Source: `algorithmia.py`* + +{ + "updated_doc": "## get_emotion_count() + +Returns the total number of emotions currently defined in the system. + +### Returns +- `int`: The count of emotions in the `emot_list` + +### Description +This utility function provides a simple way to get the total number of emotions available in the system by returning the length of the `emot_list` collection. + +### Example +```python +count = get_emotion_count() +print(f'Number of emotions available: {count}') +``` + +### Notes +- This is a read-only function that does not modify any state +- The count is determined by the current contents of `emot_list` +- Returns 0 if `emot_list` is empty", + + "explanation": "This is a simple getter function that returns the length of a predefined emotion list. The function appears to be used for getting a count of available emotions in the system. While the implementation is straightforward, documenting the return type and purpose provides important context for API users.", + + "confidence": 0.85 +} + +### get_emotion_detailed + +*Source: `algorithmia.py`* + +{ + "updated_doc": "## get_emotion_detailed + +Analyzes an image to detect emotions and returns detailed emotion detection results including confidence scores for all detected emotions. + +### Parameters + +- `image_path` (str, optional): + - Path to the image file to analyze + - Default value: `'snapshots/pic.png'` + +### Returns + +- `EmotionResult`: + - Object containing the complete emotion detection results + - Includes confidence scores for all detected emotions + +### Example Usage + +```python +result = get_emotion_detailed('path/to/image.jpg') +# Returns EmotionResult object with detailed emotion data +``` + +### Notes + +- The function internally calls `_call_emotion_api()` to perform the emotion detection +- Results are parsed through `_parse_emotion_results()` before being returned +- Uses the default image path 'snapshots/pic.png' if no path is provided", + + "explanation": "This is a new function that serves as a high-level interface for emotion detection. It handles both the API call and result parsing in a single function call, returning detailed emotion analysis results. The function is designed to be simple to use while providing comprehensive emotion detection data. The documentation focuses on the public interface while noting the internal workflow.", + + "confidence": 0.85 +} + +### generate_playlist + +*Source: `algorithmia.py`* + +## generate_playlist(emotion: str, shuffle: bool = True) -> List[str] + +Generates a playlist of songs based on a detected emotional state. + +### Parameters + +- `emotion` (str): The emotional state to generate music for. If the emotion is not recognized, defaults to "Neutral" +- `shuffle` (bool, optional): Whether to randomize the order of songs in the playlist. Defaults to True + +### Returns + +- List[str]: A list of song filenames that match the requested emotional state + +### Description + +This function creates a customized playlist by: +1. Looking up the cluster configuration associated with the input emotion +2. Selecting the specified number of songs from each configured music cluster +3. Optionally shuffling the final playlist + +### Example Usage + +```python +playlist = music_system.generate_playlist("Happy", shuffle=True) +# Returns: ["song1.mp3", "song2.mp3", ...] +``` + +### Notes + +- If an unrecognized emotion is provided, the function will fall back to "Neutral" and log a warning +- The number and type of songs selected is determined by the EMOTION_CLUSTER_MAPPING configuration +- Songs are selected from predefined clusters that correspond to different mood categories + +### Logging + +The function logs: +- INFO level when starting playlist generation and completing it +- WARNING level if an unknown emotion is provided +- DEBUG level when adding songs from specific clusters + +### __init__ + +*Source: `algorithmia.py`* + +## __init__ + +Initializes a new playlist generator instance. + +### Parameters + +- `song_database_path` (str, optional): Path to the pickled song database file + - Default value: "test.txt" + +### Attributes + +- `song_database_path`: Stores the provided database file path +- `_song_names`: Internal cache for song names (initialized as None) + +### Example + +```python +# Initialize with default database path +playlist_gen = PlaylistGenerator() + +# Initialize with custom database path +playlist_gen = PlaylistGenerator(song_database_path='songs.pkl') +``` + +### Notes + +- The song database file should exist and be in the correct format +- The `_song_names` attribute is likely populated later during execution + +### _get_emotion_color_mapping + +*Source: `algorithmia.py`* + +{ + "updated_doc": "## _get_emotion_color_mapping + +Returns a dictionary that maps emotion labels to their corresponding numerical color codes. + +### Returns +`Dict[str, int]`: A dictionary where: +- Keys are emotion labels (`str`): 'Neutral', 'Sad', 'Disgust', 'Fear', 'Surprise', 'Happy', 'Angry' +- Values are integer color codes (`int`): ranging from 1 to 61 + +### Details +The function provides a static mapping between seven basic emotions and their assigned color codes: +```python +{ + 'Neutral': 11, + 'Sad': 31, + 'Disgust': 51, + 'Fear': 61, + 'Surprise': 41, + 'Happy': 21, + 'Angry': 1 +} +``` + +### Notes +- This is an internal helper function (denoted by the leading underscore) +- The mapping is fixed and does not accept any parameters +- Color codes are non-sequential integers that appear to follow a specific encoding scheme", + + "explanation": "This is a new internal utility function that provides a static mapping between emotion labels and color codes. The function is straightforward - it simply returns a predefined dictionary. The color codes appear to be part of a larger system where different emotions are represented by specific numerical values, likely for visualization or processing purposes.", + + "confidence": 0.95 +} + +### _parse_emotion_results + +*Source: `algorithmia.py`* + +{ + "updated_doc": "## _parse_emotion_results + +Parses raw emotion detection API response data into a structured `EmotionResult` object. + +### Parameters +- `api_response` (Dict): Raw API response containing emotion detection results + +### Returns +- `EmotionResult`: Object containing: + - `emotion` (EmotionType): Dominant emotion enum value + - `confidence` (float): Confidence score (0.0-1.0) for dominant emotion + - `all_emotions` (Dict[str, float]): All detected emotions and their confidence scores + - `color_code` (int): Color code associated with dominant emotion + +### Behavior +1. If no results are found, defaults to 'Neutral' emotion with 100% confidence +2. Extracts emotion confidence scores from API response +3. Determines dominant emotion based on highest confidence score +4. Maps emotion to corresponding color code +5. Maintains global emotion history in `emot_list` +6. Logs detection results at INFO level + +### Example Response Structure +```python +EmotionResult( + emotion=EmotionType.HAPPY, + confidence=0.85, + all_emotions={'Happy': 0.85, 'Neutral': 0.15}, + color_code=3 +) +``` + +### Notes +- Handles missing or empty results gracefully by defaulting to Neutral +- Invalid emotion types are mapped to Neutral +- Updates global `emot_list` with color codes for history tracking +- Thread-safe concerns should be considered when accessing global `emot_list`", + + "explanation": "This is a core parsing function that transforms raw emotion detection API responses into structured data. It handles error cases, maintains history, and provides logging. The function is new, with no previous version to compare against.", + + "confidence": 0.92 +} + +### _select_song_from_cluster + +*Source: `algorithmia.py`* + +## _select_song_from_cluster + +Selects and returns a random song from a specified music cluster. + +### Parameters + +- `cluster_id` (int): The identifier for the music cluster to select from. Must correspond to a valid cluster ID in `SONG_CLUSTERS`. + +### Returns + +`Tuple[int, str]`: A tuple containing: +- `song_id` (int): The numeric ID of the selected song +- `formatted_name` (str): The formatted song name in the pattern "XXX.mp3_SongName" where XXX is the zero-padded song ID + +### Description + +This internal method: +1. Loads the song database +2. Gets the valid song ID range for the specified cluster +3. Randomly selects a song ID within that range +4. Formats the song name according to the required pattern + +### Example + +```python +song_id, formatted_name = _select_song_from_cluster(1) +# Might return: (42, "042.mp3_Song Title") +``` + +### Notes + +- Requires `SONG_CLUSTERS` to be properly initialized with valid ranges +- Depends on `_load_song_database()` to provide song name mappings +- Uses zero-padding to ensure consistent 3-digit song IDs in formatted names + +### _call_emotion_api + +*Source: `algorithmia.py`* + +## _call_emotion_api + +Makes a call to the Algorithmia Emotion Recognition API to analyze emotions in an image. + +### Parameters + +- `image_path` (str, optional) + - Path to the image file to analyze + - Default value: "snapshots/pic.png" + +### Returns + +- `Dict` + - Response dictionary from the emotion recognition API containing analysis results + +### Raises + +- `FileNotFoundError` + - If the specified image file cannot be found +- `RuntimeError` + - If the API call fails for any reason (network issues, invalid response, etc.) + +### Example Usage + +```python +try: + result = _call_emotion_api("path/to/image.jpg") + print(result) # Prints emotion analysis results +except RuntimeError as e: + print(f"API call failed: {e}") +``` + +### Implementation Details + +- Uses the Algorithmia client library to communicate with the emotion recognition service +- Reads image file as binary data +- Calls the `deeplearning/EmotionRecognitionCNNMBP/1.0.1` algorithm +- Logs errors using a logger instance + +### Notes + +- Requires a valid Algorithmia API key to be configured +- Image file must be readable and in a supported format +- Network connectivity is required for API communication + +### _load_song_database + +*Source: `algorithmia.py`* + +## _load_song_database + +**Private method that loads and caches song metadata from a pickle database file.** + +### Returns +`Dict[int, str]` - Dictionary mapping song IDs (integers) to song names (strings) + +### Raises +- `FileNotFoundError`: If the song database file specified in `self.song_database_path` cannot be found + +### Details +This method implements lazy loading and caching of song data: +- First call loads data from disk and caches it in `self._song_names` +- Subsequent calls return the cached data without reloading +- Uses pickle format with Latin-1 encoding + +### Example +```python +song_db = obj._load_song_database() +# Returns: {1: 'Song Name 1', 2: 'Song Name 2', ...} +``` + +### Notes +- This is an internal method as indicated by the underscore prefix +- The database path should be set in `self.song_database_path` before calling +- Successful loads are logged at INFO level +- Failed loads are logged at ERROR level before raising FileNotFoundError +