diff --git a/.github/workflows/CI.yaml b/.github/workflows/CI.yaml
index 0c80283d..4506ef2a 100644
--- a/.github/workflows/CI.yaml
+++ b/.github/workflows/CI.yaml
@@ -48,7 +48,7 @@ jobs:
run: |
brew install redis
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
@@ -81,7 +81,7 @@ jobs:
with:
fetch-depth: 0
- name: Set up Python 3.10
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
python-version: '3.10'
diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml
index 94ba3e74..1d72c671 100644
--- a/.github/workflows/docs.yaml
+++ b/.github/workflows/docs.yaml
@@ -21,7 +21,7 @@ jobs:
# Install dependencies
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
diff --git a/.github/workflows/pypi.yaml b/.github/workflows/pypi.yaml
index 19b92eaf..12296a57 100644
--- a/.github/workflows/pypi.yaml
+++ b/.github/workflows/pypi.yaml
@@ -5,8 +5,6 @@ on:
push:
tags:
- 'v[0-9]+.[0-9]+.[0-9]+'
- branches:
- - 'main'
jobs:
build:
@@ -18,7 +16,7 @@ jobs:
with:
fetch-depth: 0
- name: Set up Python
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install pypa/build
diff --git a/.github/workflows/test-pypi.yaml b/.github/workflows/test-pypi.yaml
index e13ab432..0121a1dd 100644
--- a/.github/workflows/test-pypi.yaml
+++ b/.github/workflows/test-pypi.yaml
@@ -18,7 +18,7 @@ jobs:
with:
fetch-depth: 0
- name: Set up Python
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install pypa/build
diff --git a/README.md b/README.md
index 2cd4bc14..e5a25164 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,9 @@
-# improv
+
+
+
+Adaptive experiments for neuroscience
+---------
+
[](https://pypi.org/project/improv)
[](https://pypi.org/project/improv)
[](https://project-improv.github.io/)
@@ -7,9 +12,8 @@
[](https://opensource.org/licenses/MIT)
[](https://github.com/psf/black)
-A flexible software platform for real-time and adaptive neuroscience experiments.
-_improv_ is a streaming software platform designed to enable adaptive experiments. By analyzing data, such as 2-photon calcium images, as it comes in, we can obtain information about the current brain state in real time and use it to adaptively modify an experiment as data collection is ongoing.
+_improv_ is a streaming software platform designed to enable adaptive experiments. By analyzing data as they arrive, we can obtain information about the current brain state in real time and use it to adaptively modify an experiment as data collection is ongoing.

@@ -33,7 +37,8 @@ _improv_'s design is based on a streamlined version of the actor model for concu
## Installation
-For installation instructions, please consult the [docs](https://project-improv.github.io/improv/installation.html) on our github.
+For installation instructions, please consult the [documentation](https://project-improv.github.io/improv/installation.html).
+
### Contact
To get in touch, feel free to reach out on Twitter @annedraelos or @jmxpearson.
diff --git a/demos/fastplotlib/actors/lorenz_generator.py b/demos/fastplotlib/actors/lorenz_generator.py
new file mode 100644
index 00000000..ced8bb21
--- /dev/null
+++ b/demos/fastplotlib/actors/lorenz_generator.py
@@ -0,0 +1,91 @@
+from improv.actor import Actor
+import numpy as np
+import logging
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.INFO)
+
+class Generator(Actor):
+ """
+ Generates coordinates for the Lorenz system in real time.
+ Computes the next 50 Lorenz coordinates every half second.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.data = None
+ self.name = "Lorenz Generator"
+ self.dt = 0.01 # time step for numerical integration
+ self.frame_num = 0
+
+ def __str__(self):
+ return f"Name: {self.name}, Current Coordinates: {self.coordinates[-1] if self.coordinates else None}"
+
+ def setup(self):
+ """Generates an array that serves as an initial source of data.
+
+ Initial data is the starting point (x,y,z) for the lorenz attractor.
+ """
+ logger.info("Beginning setup for Lorenz Generator")
+
+ # initial condition
+ self.data = np.array([1.0, 1.0, 1.0]).reshape(1,3)
+
+ logger.info(f"Initialized Lorenz system with initial coordinates: {self.data}")
+
+ def stop(self):
+ """Trivial stop function."""
+ logger.info("Lorenz Generator stopping")
+ return 0
+
+ def runStep(self):
+ """Generates the next 25 points in the lorenz system.
+
+ Sends the progressively filled data as a flattened array with the frame number appended to the processor.
+ """
+ if self.frame_num >= 150:
+ return
+
+ # Add the next 10 points
+ for _ in range(25):
+ # Compute the next coordinate
+ derivative = lorenz(self.data[-1])
+ next_coordinate = self.data[-1] + derivative * self.dt
+ self.data = np.vstack((self.data, next_coordinate))
+
+ # create flattened array with xyz coordinates along with the current frame number
+ data = np.append(self.data.ravel(), self.frame_num)
+
+ # Send the flattened array with frame_num
+ try:
+ data_id = self.client.put(data)
+ if self.store_loc:
+ self.q_out.put([[data_id, str(self.frame_num)]])
+ else:
+ self.q_out.put(data_id)
+
+ # Increment frame number
+ self.frame_num += 1
+ except Exception as e:
+ logger.error(f"--------------------------------Generator Exception: {e}")
+
+
+def lorenz(xyz, s=10, r=28, b=2.667):
+ """
+ Parameters
+ ----------
+ xyz : array-like, shape (3,)
+ Point of interest in three-dimensional space.
+ s, r, b : float
+ Parameters defining the Lorenz attractor.
+
+ Returns
+ -------
+ xyz_dot : array, shape (3,)
+ Values of the Lorenz attractor's partial derivatives at *xyz*.
+ """
+ x, y, z = xyz
+ x_dot = s * (y - x)
+ y_dot = r * x - y - x * z
+ z_dot = x * y - b * z
+ return np.array([x_dot, y_dot, z_dot])
diff --git a/demos/fastplotlib/actors/lorenz_processor.py b/demos/fastplotlib/actors/lorenz_processor.py
new file mode 100644
index 00000000..31d9e8fc
--- /dev/null
+++ b/demos/fastplotlib/actors/lorenz_processor.py
@@ -0,0 +1,78 @@
+from improv.actor import Actor
+import time
+import logging
+import zmq
+import numpy as np
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.INFO)
+
+
+class Processor(Actor):
+ """
+ Processes Lorenz data by performing custom transformations on the coordinates
+ (e.g., scaling and applying mathematical operations) and sends the processed
+ data through a ZMQ socket for visualization.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ def setup(self):
+ """
+ Sets up the ZMQ socket and initialize class variables.
+ """
+ self.name = "Processor"
+ self.frame = None
+ self.frame_num = None
+
+ # Set up ZMQ PUB socket
+ context = zmq.Context()
+ self.socket = context.socket(zmq.PUB)
+ self.socket.bind("tcp://127.0.0.1:5555")
+
+ logger.info("Processor setup completed. ZMQ PUB socket bound to tcp://127.0.0.1:5555")
+
+ def stop(self):
+ """
+ Stop function. Closes the ZMQ socket connection.
+ """
+ logger.info("Processor stopping")
+ self.socket.close()
+ return 0
+
+ def runStep(self):
+ """
+ Trivial processing step that gets the lorenz data and passes it through the
+ ZMQ socket for visualization.
+ """
+ # Delay for half a second for visualization purposes
+ time.sleep(0.5)
+
+ data_id = None
+ try:
+ data_id = self.q_in.get(timeout=0.05)
+ except Exception:
+ logger.error(f"Could not get frame!")
+ pass
+
+ if data_id is not None:
+ try:
+ if self.store_loc:
+ # Fetch the data from the client using the ObjectID
+ self.frame = self.client.getID(data_id[0][0])
+ else:
+ self.frame = self.client.get(data_id)
+
+
+ # unpack the frame to get the frame number
+ data = np.array(self.frame, dtype=np.float64)
+ self.frame_num = int(data[-1])
+
+
+ # Send the processed data through the ZMQ socket
+ self.socket.send(data)
+ logger.info(f"Frame {self.frame_num}: Sent points with size {data.shape} after processing")
+
+ except Exception as e:
+ logger.error(f"Error processing frame: {e}")
\ No newline at end of file
diff --git a/demos/fastplotlib/actors/visual_processor.py b/demos/fastplotlib/actors/visual_processor.py
new file mode 100644
index 00000000..9dc9ec10
--- /dev/null
+++ b/demos/fastplotlib/actors/visual_processor.py
@@ -0,0 +1,93 @@
+from improv.actor import Actor
+import random
+import logging
+import zmq
+import numpy as np
+import time
+from queue import Empty
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.INFO)
+
+
+class Processor(Actor):
+ """Sample processor used to scale a sine or cosine wave and calculate the amplitude.
+ Intended for use with sample_generator.py.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ def setup(self):
+ """Initializes all class variables. Create and bind the socket for zmq to send to fastplotlib.ipynb
+ for visualization.
+
+ self.name (string): name of the actor.
+ self.frame (ObjectID): StoreInterface object id referencing data from the store.
+ self.frame_num (int): index of current frame.
+ self.processed_data (np.array): raveled array containing the processed data appended with the current frame number
+ """
+ self.name = "Processor"
+ self.frame = None
+ self.frame_num = 0
+ self.processed_data = None
+
+ context = zmq.Context()
+ self.socket = context.socket(zmq.PUB)
+ self.socket.bind("tcp://127.0.0.1:5555")
+
+ logger.info("Completed setup for Processor")
+
+ def stop(self):
+ """Stop function that closes the zmq socket to visualization notebook."""
+ logger.info("Processor stopping")
+ self.socket.close()
+ return 0
+
+ def runStep(self):
+ """
+ Gets from the input queue, scales the data in the y-dimension by a random number between 1-10 inclusive and then
+ calculates the amplitude of the wave.
+ """
+ # delay frame unpacking for visualization purposes
+ time.sleep(0.5)
+
+ data_id = None
+ try:
+ data_id = self.q_in.get(timeout=0.05)
+ except Empty:
+ pass
+ except Exception as e:
+ logger.error(f"Could not get frame!")
+
+ if data_id is not None:
+ try:
+ if self.store_loc:
+ # Fetch the data from the client using the ObjectID
+ self.frame = self.client.getID(data_id[0][0])
+ else:
+ self.frame = self.client.get(data_id)
+
+
+ # Unpack the frame to get the data and frame number
+ data = np.array(self.frame, dtype=np.float64)
+ self.frame_num = int(data[-1])
+ # reshape the data to 2D array
+ data = data[:-1].reshape(-1, 2)
+
+ # Scale the y-values of the sine or cosine wave by random factor
+ scale_factor = random.randint(1, 10)
+ data[:, 1] *= scale_factor
+
+ # calculate the amplitude
+ amplitude = np.round((data.max(axis=0)[1] - data.min(axis=0)[1]) / 2)
+ logger.info(f"Frame {self.frame_num} has amplitude {amplitude}")
+
+ # Flatten processed values and append frame number
+ self.processed_data = np.append(data.ravel(), self.frame_num)
+
+ # Send the processed data through the ZMQ socket to be visualized
+ self.socket.send(self.processed_data)
+
+ except Exception as e:
+ logger.error(f"Error processing frame: {e}")
diff --git a/demos/fastplotlib/fastplotlib.ipynb b/demos/fastplotlib/fastplotlib.ipynb
new file mode 100644
index 00000000..ba122d59
--- /dev/null
+++ b/demos/fastplotlib/fastplotlib.ipynb
@@ -0,0 +1,206 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "83566503-c31b-4d83-b6f0-173bc9b2102d",
+ "metadata": {},
+ "source": [
+ "## `fastplotlib` demo"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "093c8fd4-bbf8-4374-a661-494a61a73cca",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import zmq\n",
+ "import numpy as np\n",
+ "import fastplotlib as fpl"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "54d4aeca-cc13-4818-92f7-4ab7ea5734f9",
+ "metadata": {},
+ "source": [
+ "## Setup zmq subscriber client"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "24d73c48-c24d-440b-8dfc-9c4a2f476233",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "context = zmq.Context()\n",
+ "sub = context.socket(zmq.SUB)\n",
+ "sub.setsockopt(zmq.SUBSCRIBE, b\"\")\n",
+ "\n",
+ "# keep only the most recent message\n",
+ "sub.setsockopt(zmq.CONFLATE, 1)\n",
+ "\n",
+ "# address must match publisher in Processor actor\n",
+ "sub.connect(\"tcp://127.0.0.1:5555\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8f637c0c-3854-49ce-9d19-c692d2af61e8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def get_buffer():\n",
+ " \"\"\"Gets the buffer from the publisher.\"\"\"\n",
+ " try:\n",
+ " b = sub.recv(zmq.NOBLOCK)\n",
+ " except zmq.Again:\n",
+ " pass\n",
+ " else:\n",
+ " return b\n",
+ " \n",
+ " return None"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "fc5d4c34-3f4e-4526-b245-1db3fcd84bf5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Create the figure\n",
+ "figure = fpl.Figure()\n",
+ "\n",
+ "# Add an initial cosine wave\n",
+ "xs = np.linspace(-10, 10, 100)\n",
+ "ys = np.cos(xs)\n",
+ "wave_graphic = figure[0, 0].add_line(data=np.column_stack((xs, ys)), name=\"wave\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4b08bd74-9c6f-4c0a-981f-009028e62133",
+ "metadata": {},
+ "source": [
+ "## Define function to fetch the buffer, unpack it, and update the plot "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d5240ab4-3bb4-4840-8a30-b0a60abe4aa1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def update_frame(p):\n",
+ " \"\"\"Update the frame using data received from the socket.\"\"\"\n",
+ " buff = get_buffer()\n",
+ " if buff is not None:\n",
+ " # Deserialize the buffer into a NumPy array\n",
+ " data = np.frombuffer(buff, dtype=np.float64)\n",
+ "\n",
+ " print(data.shape)\n",
+ "\n",
+ " # Extract the frame number from the last index\n",
+ " frame_num = int(data[-1]) \n",
+ "\n",
+ " # reshape the data to (100, 2)\n",
+ " data = data[:-1].reshape(100, 2) \n",
+ "\n",
+ " # Update the line plot\n",
+ " p[\"wave\"].data[:,:2] = data\n",
+ "\n",
+ " # Update the plot title with the frame number\n",
+ " p.name = f\"frame: {frame_num}\"\n",
+ "\n",
+ " # data is scaled in the y\n",
+ " p.auto_scale()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a337ec69-1288-4c5c-a55e-c3f6a1b7bcba",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Add the animation update function\n",
+ "figure[0, 0].add_animations(update_frame)\n",
+ "\n",
+ "# show the figure, should initially see a white cosine wave\n",
+ "figure.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9dcb7d8f-3f87-4965-8f66-93326954f257",
+ "metadata": {},
+ "source": [
+ "## `fastplotlib` is non-blocking :D"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "bf3f56cf-2058-4b7e-af20-6561b5459e84",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "wave_graphic.cmap = \"autumn\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "48567be2-94b5-4eab-99ff-9d6fdc7e6038",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "figure.canvas.get_stats()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "811943fd-72e2-4323-be98-4694e86fbd2b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "figure[0,0].auto_scale()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c8c0f7fb-e6df-484f-b7ff-fa402fac373a",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.16"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/demos/fastplotlib/lorenz.ipynb b/demos/fastplotlib/lorenz.ipynb
new file mode 100644
index 00000000..efb2cbe7
--- /dev/null
+++ b/demos/fastplotlib/lorenz.ipynb
@@ -0,0 +1,187 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "df609e65-ba82-41fa-82b2-a20aa81b5803",
+ "metadata": {},
+ "source": [
+ "## `fastplotlib` lorenz attractor demo\n",
+ "\n",
+ "The Lorenz system is a dynamical system known for having chaotic solutions for certain parameter values and initial conditions. See [here](https://en.wikipedia.org/wiki/Lorenz_system) for more details. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "068919b3-0a3a-47ff-836b-221079d6e095",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import zmq\n",
+ "import numpy as np\n",
+ "import fastplotlib as fpl"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ae2be309-45ad-4e5b-b8a7-1acd4c6aa112",
+ "metadata": {},
+ "source": [
+ "## Setup zmq subscriber client"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c665d7f4-d7eb-4ef7-a9c8-d737b9f0b073",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "context = zmq.Context()\n",
+ "sub = context.socket(zmq.SUB)\n",
+ "sub.setsockopt(zmq.SUBSCRIBE, b\"\")\n",
+ "\n",
+ "# keep only the most recent message\n",
+ "sub.setsockopt(zmq.CONFLATE, 1)\n",
+ "\n",
+ "# address must match publisher in Processor actor\n",
+ "sub.connect(\"tcp://127.0.0.1:5555\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "28f334db-aadb-40fc-b067-c5b4c4a146d1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def get_buffer():\n",
+ " \"\"\"Gets the buffer from the publisher.\"\"\"\n",
+ " try:\n",
+ " b = sub.recv(zmq.NOBLOCK)\n",
+ " except zmq.Again:\n",
+ " pass\n",
+ " else:\n",
+ " return b\n",
+ " \n",
+ " return None"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b4e60560-73f3-41a7-a680-1394109eafcb",
+ "metadata": {},
+ "source": [
+ "## Create a figure"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "147e4be8-fb62-4463-bf6f-0a8e32593c4e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Create the figure\n",
+ "figure = fpl.Figure(\n",
+ " cameras=\"3d\",\n",
+ " controller_types=\"fly\",\n",
+ ")\n",
+ "\n",
+ "# turn off axes\n",
+ "figure[0,0].axes.visible = False"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9c3ac076-d4a8-4877-8843-c5e6cc54a20c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def update_frame(p):\n",
+ " \"\"\"Update the frame using data received from the socket.\"\"\"\n",
+ " buff = get_buffer()\n",
+ " if buff is not None:\n",
+ " # Deserialize the buffer into a NumPy array\n",
+ " data = np.frombuffer(buff, dtype=np.float64)\n",
+ "\n",
+ " # Extract the frame number from the last index\n",
+ " frame_num = int(data[-1]) \n",
+ "\n",
+ " # after the first couple of frames generated, need to auto scale\n",
+ " if frame_num == 3:\n",
+ " p.auto_scale()\n",
+ "\n",
+ " data = data[:-1].reshape(-1, 3)\n",
+ "\n",
+ " # clear the plot to add updated data\n",
+ " p.clear()\n",
+ " \n",
+ " # # add graphic for current data received\n",
+ " p.add_line(data, cmap=\"jet\", thickness=2)\n",
+ "\n",
+ " # Update the plot title with the frame number\n",
+ " p.name = f\"frame: {frame_num}\""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d72d810d-36df-4b00-8b17-f90bde8cc473",
+ "metadata": {},
+ "source": [
+ "## Use can use the `w, a, s, d` keys to \"fly\" around the plot as the data is being generated in 3D"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "439abe77-263c-4ab4-b810-5c7d5e6029e1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Add the animation update function\n",
+ "figure[0, 0].add_animations(update_frame)\n",
+ "\n",
+ "figure.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "cab2edd6-c7d9-4ae2-bb5f-33d54efb7134",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "74b6e08b-83b2-4394-a8a8-4745ca09a88a",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.16"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/demos/fastplotlib/lorenz.yaml b/demos/fastplotlib/lorenz.yaml
new file mode 100644
index 00000000..e4c36e05
--- /dev/null
+++ b/demos/fastplotlib/lorenz.yaml
@@ -0,0 +1,14 @@
+actors:
+ Generator:
+ package: actors.lorenz_generator
+ class: Generator
+
+ Processor:
+ package: actors.lorenz_processor
+ class: Processor
+
+connections:
+ Generator.q_out: [Processor.q_in]
+
+redis_config:
+ port: 6381
\ No newline at end of file
diff --git a/demos/minimal/README.md b/demos/minimal/README.md
new file mode 100644
index 00000000..4c46572d
--- /dev/null
+++ b/demos/minimal/README.md
@@ -0,0 +1,100 @@
+# minimal demo
+
+This folder contains a minimal demo for running improv. In this demo, data generated by the **generator** `actor` is
+stored in a data store and a key to the generated data is sent via a queue to the **processor** `actor` that accesses
+the data in the store and processes it.
+
+### Running the minimal demo
+
+> **_NOTE:_** Make sure the generator and processor specified in the minimal.yaml file
+> are `actors.sample_generator` and `actors.sample_processor` respectively.
+
+Usage:
+
+```bash
+
+# start improv
+improv run ./demos/minimal/minimal.yaml
+
+# call `setup` in the improv TUI
+setup
+
+# call `run` in the improv TUI
+run
+
+# when you are ready to stop the process, call `stop` in the improv TUI
+stop
+```
+
+If you look at the `global.log` file, you should see outputted information about each generated frame.
+
+## Simple Visualization using `fastplotlib`
+
+You can also run the minimal demo and visualize the generated data using `fastplotlib`.
+
+As before, data generated by the **generator** `actor` is stored in a data store and a key to the generated data is
+sent via a queue to the **processor** `actor` that accesses the data in the store and processes it. Additionally, the
+`fastplotlib.ipynb` notebook receives the most recent data from the processor via `zmq` and displays it using
+[`fastplotlib`](https://github.com/fastplotlib/fastplotlib).
+
+### Instructions
+
+1. Update the processor in minimal.yaml to `actors.visual_processor`.
+
+2. Run `pip install -r requirements.txt` in this directory.
+
+Usage:
+
+```bash
+
+# start improv with actor paths to locate the sample_generator and visual_processor actor files
+improv run -a ./demos/fastplotlib/ -a ./demos/minimal/ ./demos/minimal/minimal.yaml
+
+# call `setup` in the improv TUI
+setup
+
+# Run the cells in the jupyter notebook until you receive
+# a plot that has a white cosine wave
+
+# once the plot is ready call `run` in the improv TUI
+run
+
+# You should see the plot updating between a cosine and sine wave depending on
+# whether the frame number is even or odd
+
+# when you are ready to stop the process, call `stop` in the improv TUI
+stop
+```
+
+> **_NOTE:_** The `fastplotlib.ipynb` can only be run in `jupyter lab`
+
+
+## Dynamical System Visualization using `fastplotlib`
+
+The Lorenz system is a dynamical system known for having chaotic solutions for certain parameter values and initial
+conditions. See [here](https://en.wikipedia.org/wiki/Lorenz_system) for more details.
+
+This demo incrementally generates points in a Lorenz system based off of pre-defined parameters and an initial condition.
+The data sent from the processor via `zmq` to the `lorenz.ipynb` updates the visualization, adding new points as they arrive.
+Over time, you should see the build-up of a lorenz attractor.
+
+### Instructions
+
+```bash
+improv run ./demos/fastplotlib/lorenz.yaml
+
+# call `setup` in the improv TUI
+setup
+
+# Run the cells in the jupyter notebook until you get an empty plot
+
+# once the plot is ready call `run` in the improv TUI
+run
+
+# You should see the plot updating with a lorenz attractor
+# curve that changes colors according to the z values.
+
+# when you are ready to stop the process, call `stop` in the improv TUI
+stop
+```
+
diff --git a/demos/minimal/actors/sample_generator.py b/demos/minimal/actors/sample_generator.py
index 31335833..370ca6b4 100644
--- a/demos/minimal/actors/sample_generator.py
+++ b/demos/minimal/actors/sample_generator.py
@@ -1,21 +1,22 @@
from improv.actor import Actor
import numpy as np
import logging
+import time
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Generator(Actor):
- """Sample actor to generate data to pass into a sample processor.
+ """Sample actor to generate a sine/cosine wave based on frame number to pass into a sample processor.
Intended for use along with sample_processor.py.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
- self.data = None
self.name = "Generator"
+ self.data = None
self.frame_num = 0
def __str__(self):
@@ -24,17 +25,20 @@ def __str__(self):
def setup(self):
"""Generates an array that serves as an initial source of data.
- Initial array is a 100 row, 5 column numpy matrix that contains
- integers from 1-99, inclusive.
+ Initial data is a 2D cosine wave consisting of 100 evenly spaced xy points ranging from -10 to 10 inclusive.
"""
-
logger.info("Beginning setup for Generator")
- self.data = np.asmatrix(np.random.randint(100, size=(100, 5)))
+
+ # generate 100 evenly spaced values from -10 to 10
+ xs = np.linspace(-10, 10, 100)
+ ys = np.cos(xs)
+ # stack xs and ys to create a (100, 2) array of xy points
+ self.data = np.column_stack([xs, ys])
+
logger.info("Completed setup for Generator")
def stop(self):
- """Save current randint vector to a file."""
-
+ """Save current wave vector to file."""
logger.info("Generator stopping")
np.save("sample_generator_data.npy", self.data)
return 0
@@ -42,33 +46,38 @@ def stop(self):
def runStep(self):
"""Generates additional data after initial setup data is exhausted.
- Data is of a different form as the setup data in that although it is
- the same size (5x1 vector), it is uniformly distributed in [1, 10]
- instead of in [1, 100]. Therefore, the average over time should
- converge to 5.5.
+ If the frame number is odd, the data is a sine wave. If the frame number is even, the data is a cosine wave.
"""
+ # set a max number of frames to generate
+ if self.frame_num > 1000:
+ return
+
+ xs = np.linspace(-10, 10, 100)
+
+ # Generate sine or cosine values based on frame number
+ if self.frame_num % 2 == 1:
+ ys = np.sin(xs)
+ else:
+ ys = np.cos(xs)
+
+ # update data
+ self.data = np.column_stack([xs, ys])
- if self.frame_num < np.shape(self.data)[0]:
+ # create flattened array with x and y coordinates along with the current frame number
+ data = np.append(self.data.ravel(), self.frame_num) # Shape (201,)
+
+ # Send the flattened array with frame_num
+ try:
+ data_id = self.client.put(data)
if self.store_loc:
- data_id = self.client.put(
- self.data[self.frame_num], str(f"Gen_raw: {self.frame_num}")
- )
+ self.q_out.put([[data_id, str(self.frame_num)]])
else:
- data_id = self.client.put(self.data[self.frame_num])
- # logger.info('Put data in store')
- try:
- if self.store_loc:
- self.q_out.put([[data_id, str(self.frame_num)]])
- else:
- self.q_out.put(data_id)
- # logger.info("Sent message on")
-
- self.frame_num += 1
- except Exception as e:
- logger.error(
- f"--------------------------------Generator Exception: {e}"
- )
- else:
- self.data = np.concatenate(
- (self.data, np.asmatrix(np.random.randint(10, size=(1, 5)))), axis=0
- )
+ self.q_out.put(data_id)
+
+ # Increment frame number
+ self.frame_num += 1
+ except Exception as e:
+ logger.error(f"--------------------------------Generator Exception: {e}")
+
+
+
diff --git a/demos/minimal/actors/sample_processor.py b/demos/minimal/actors/sample_processor.py
index 40fbf4d5..f24fb12f 100644
--- a/demos/minimal/actors/sample_processor.py
+++ b/demos/minimal/actors/sample_processor.py
@@ -1,14 +1,16 @@
from improv.actor import Actor
-import numpy as np
+from queue import Empty
import logging
+import zmq
+import numpy as np
+import random
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Processor(Actor):
- """Sample processor used to calculate the average of an array of integers.
-
+ """Sample processor used to scale a sine or cosine wave and calculate the amplitude.
Intended for use with sample_generator.py.
"""
@@ -20,48 +22,54 @@ def setup(self):
self.name (string): name of the actor.
self.frame (ObjectID): StoreInterface object id referencing data from the store.
- self.avg_list (list): list that contains averages of individual vectors.
self.frame_num (int): index of current frame.
"""
-
self.name = "Processor"
self.frame = None
- self.avg_list = []
- self.frame_num = 1
+ self.frame_num = None
+
logger.info("Completed setup for Processor")
def stop(self):
"""Trivial stop function for testing purposes."""
-
logger.info("Processor stopping")
+ return 0
def runStep(self):
- """Gets from the input queue and calculates the average.
-
- Receives an ObjectID, references data in the store using that
- ObjectID, calculates the average of that data, and finally prints
- to stdout.
"""
+ Gets from the input queue, scales the data in the y-dimension by a random number between 1-10 inclusive and then
+ calculates the amplitude of the wave.
- frame = None
+ """
+ data_id = None
try:
- frame = self.q_in.get(timeout=0.05)
-
+ data_id = self.q_in.get(timeout=0.05)
except Exception:
- logger.error("Could not get frame!")
+ logger.error(f"Could not get frame!")
pass
- if frame is not None and self.frame_num is not None:
- self.done = False
- if self.store_loc:
- self.frame = self.client.getID(frame[0][0])
- else:
- self.frame = self.client.get(frame)
- avg = np.mean(self.frame[0])
+ if data_id is not None:
+ try:
+ if self.store_loc:
+ # Fetch the data from the client using the ObjectID
+ self.frame = self.client.getID(data_id[0][0])
+ else:
+ self.frame = self.client.get(data_id)
+
+ # Unpack the frame to get the data and frame number
+ data = np.array(self.frame, dtype=np.float64)
+ self.frame_num = int(data[-1])
+ # reshape the data to 2D array
+ data = data[:-1].reshape(-1, 2)
+
+ # Scale the y-values of the sine or cosine wave by random factor
+ scale_factor = random.randint(1, 10)
+ data[:, 1] *= scale_factor
+
+ # calculate the amplitude and frequency
+ amplitude = np.round((data.max(axis=0)[1] - data.min(axis=0)[1]) / 2)
+ logger.info(f"Frame {self.frame_num} has amplitude {amplitude}")
- logger.info(f"Average: {avg}")
- self.avg_list.append(avg)
- logger.info(f"Overall Average: {np.mean(self.avg_list)}")
- logger.info(f"Frame number: {self.frame_num}")
+ except Exception as e:
+ logger.error(f"Error processing frame: {e}")
- self.frame_num += 1
diff --git a/demos/minimal/requirements.txt b/demos/minimal/requirements.txt
new file mode 100644
index 00000000..384e4951
--- /dev/null
+++ b/demos/minimal/requirements.txt
@@ -0,0 +1,2 @@
+simplejpeg
+fastplotlib[notebook]
\ No newline at end of file
diff --git a/docs/_config.yml b/docs/_config.yml
index e3881a7a..6398470a 100644
--- a/docs/_config.yml
+++ b/docs/_config.yml
@@ -8,7 +8,7 @@
title : improv documentation # The title of the book. Will be placed in the left navbar.
author : improv team # The author of the book
copyright : "2024" # Copyright year to be placed in the footer
-logo : logo.png # A path to the book logo
+logo : improv_logo_vertical.svg # A path to the book logo
# Force re-execution of notebooks on each build.
# See https://jupyterbook.org/content/execute.html
diff --git a/docs/_toc.yml b/docs/_toc.yml
index 426f566d..400c2b81 100644
--- a/docs/_toc.yml
+++ b/docs/_toc.yml
@@ -5,6 +5,7 @@ format: jb-book
root: intro
chapters:
- file: installation
+- file: configuration
- file: running
- file: demos
- file: design
diff --git a/docs/configuration.md b/docs/configuration.md
new file mode 100644
index 00000000..eab0f073
--- /dev/null
+++ b/docs/configuration.md
@@ -0,0 +1,72 @@
+(page:configuration)=
+# Configuring _improv_
+
+## Redis Configuration Options
+
+Although _improv_ works with Redis out of the box, there are a handful of optional configuration options that can be used to change the behavior of _improv_'s data store.
+Redis configuration is specified in the configuration file under the `redis_config` key.
+
+(#persistence)=
+### Persistence
+_improv_ can configure the data store to write its contents to a set of append-only log files on disk.
+Append-only files, or AOFs, can be reused between instances of _improv_, which will allow successive runs of _improv_ to access previously stored data.
+By default, this capability is disabled, but is controlled by the following settings.
+
+#### enable_saving
+This field enables saving of data store contents to AOFs on disk. The default mode of operation uses the default Redis AOF directory, `appendonlydir`, which will be automatically created if it does not exist.
+- Type: boolean (True/False)
+- Example: True
+
+#### aof_dirname
+This setting controls the name of the directory in which the store saves the append-only logs.
+Persistence happens automatically at scheduled intervals throughout the operation of the data store, as well as during shutdown of the data store.
+
+- Type: string
+- Example: `custom_aof_directory`
+
+#### generate_ephemeral_aof_dirname
+This field indicates that _improv_ should generate a unique AOF directory name to use each time it is run.
+This will set each run of _improv_ to start from a clean data store without needing to set a unique directory name manually before each run.
+
+- Type: boolean (True/False)
+- Example: True
+
+#### fsync_frequency
+Although the append-only log files, when enabled, are updated every time the state of the data store is changed, the contents of the file are not always immediately flushed to the system's disk.
+This field controls how often the data store should request that the operating system flush the contents of the append-only log files to the hard disk.
+
+- Type: string
+- Values:
+ - `every_write` - sync the AOF to disk on every data store modification.
+ - Highest fault tolerance
+ - Highest background disk and data store load. The data store can spend a lot of time doing processing unrelated to running the experiment.
+ - `every_second` - sync the AOF to disk every second.
+ - Moderate fault tolerance - can only lose up to roughly 1 second of data in a disaster.
+ - Better performance.
+ - `no_schedule` - sync the AOF according the default system policy.
+ - Lowest fault tolerance - can go from 1 to 30 seconds, occasionally more, between syncs.
+ - Highest performance.
+- Default: `no_schedule`
+- Example: every_second
+
+### Connectivity
+#### port
+This field controls the port on which the data store runs. If left unspecified, _improv_ will attempt to start the data store on the default port number,
+looking for alternatives if tried port numbers are busy. If specified, then _improv_ will only try to start the data store on the specified port number.
+
+- Type: integer
+- Default: 6379
+- Example: 16300
+
+### Example
+An _improv_ configuration file which sets the data store port to 6385, enables persistence to disk, and uses a unique directory for each run, would look like:
+```
+actors: ...
+
+connections: ...
+
+redis_config:
+ enable_saving: True
+ port: 6385
+ generate_ephemeral_aof_dirname: True
+```
\ No newline at end of file
diff --git a/docs/design.md b/docs/design.md
index 9bd6b188..c67b3d3a 100644
--- a/docs/design.md
+++ b/docs/design.md
@@ -98,4 +98,4 @@ For examples and further documentation, see [](page:actors).
## Logging and persistence
Finally, _improv_ handles centralized logging via the [`logging`](https://docs.python.org/3/library/logging.html) module, which listens for messages on a global logging port. These messages are written to the experimental log file.
-Data from the server are persisted to disk using [LMDB](http://www.lmdb.tech/doc/) (if `settings: use_hdd` is set to `true` in the configuration file).
\ No newline at end of file
+Data from the server are persisted to disk using [Redis Append-Only Log Files](https://redis.io/docs/latest/operate/oss_and_stack/management/persistence/) (if `redis_config: enable_saving` is set to `True` in the configuration file). See the [persistence section](configuration.md#persistence) of the [configuration guide](configuration.md) for more information.
\ No newline at end of file
diff --git a/docs/improv_logo_horizontal.svg b/docs/improv_logo_horizontal.svg
new file mode 100644
index 00000000..57f190b2
--- /dev/null
+++ b/docs/improv_logo_horizontal.svg
@@ -0,0 +1,21 @@
+
diff --git a/docs/improv_logo_vertical.svg b/docs/improv_logo_vertical.svg
new file mode 100644
index 00000000..9d17a96a
--- /dev/null
+++ b/docs/improv_logo_vertical.svg
@@ -0,0 +1,21 @@
+
diff --git a/docs/installation.md b/docs/installation.md
index 6fa470dd..84d20ac5 100644
--- a/docs/installation.md
+++ b/docs/installation.md
@@ -1,18 +1,43 @@
(page:installation)=
# Installation and building
+## Installation Time
+All installation processes described here should take less than 5 minutes to complete on a standard workstation.
+
## Simple installation
-The simplest way to install _improv_ is with pip:
+Once you have the [required dependencies](#required-dependencies), the simplest way to install _improv_ is with pip:
```
pip install improv
```
````{warning}
-Due to [this pyzmq issue](https://github.com/zeromq/libzmq/issues/3313), if you're running on Ubuntu, you need to specify
+Due to [this pyzmq issue](https://github.com/zeromq/libzmq/issues/3313), if you're running on Ubuntu, you _may_ need to specify
```
pip install improv --no-binary pyzmq
```
-to build `pyzmq` from source.
+to build `pyzmq` from source if you're running into ZMQ errors.
````
+(#required-dependencies)=
+## Required dependencies
+
+### Redis
+
+_improv_ uses Redis, an in-memory datastore, to hold data to be communicated between actors.
+_improv_ works automatically with Redis, but additional options for controlling the behavior of the data store are listed in
+the [configuration guide](configuration.md).
+
+_improv_ has been tested with Redis server version 7.2.4. Please refer to the instructions below for your operating system:
+
+#### macOS
+A compatible version of Redis can be installed via [Homebrew](https://brew.sh):
+```
+brew install redis
+```
+
+#### Linux
+A compatible version of Redis can be installed for most standard Linux distributions (e.g. Ubuntu) by following Redis' short [Linux installation guide](https://redis.io/docs/latest/operate/oss_and_stack/install/install-redis/install-redis-on-linux/).
+
+#### Windows (WSL2)
+Redis can also be installed on Windows in WSL2. The [WSL2 installation guide](https://redis.io/docs/latest/operate/oss_and_stack/install/install-redis/install-redis-on-windows/) details the process for both the Windows and Linux portions of WSL2.
## Required dependencies
@@ -66,13 +91,10 @@ Currently, we build using [Setuptools](https://setuptools.pypa.io/en/latest/inde
For now, the package can be built by running
```
-python -m build
-```
-from within the project directory. After that
-```
pip install --editable .
```
-will install the package in editable mode, which means that changes in the project directory will affect the code that is run (i.e., the installation will not copy over the code to `site-packages` but simply link the project directory).
+from within the project directory.
+this will install the package in editable mode, which means that changes in the project directory will affect the code that is run (i.e., the installation will not copy over the code to `site-packages` but simply link the project directory).
When uninstalling, be sure to do so _from outside the project directory_, since otherwise, `pip` only appears to find the command line script, not the full package.
@@ -93,20 +115,3 @@ Then simply run
jupyter-book build docs
```
and open `docs/_build/html/index.html` in your browser.
-
-## Getting around certificate issues
-
-On some systems, building (or installing from `pip`) can run into [this error](https://stackoverflow.com/questions/25981703/pip-install-fails-with-connection-error-ssl-certificate-verify-failed-certi) related to SSL certificates. For `pip`, the solution is given in the linked StackOverflow question (add `--trusted-host` to the command line), but for `build`, we run into the issue that the `trusted-host` flag will not be passed through to `pip`, and `pip` will build inside an isolated venv, meaning it won't read a file-based configuration option like the one given in the answer, either.
-
-The (inelegant) solution that will work is to set the `pip.conf` file with
-```
-[global]
-trusted-host = pypi.python.org
- pypi.org
- files.pythonhosted.org
-```
-and then run
-```
-python -m build --no-isolation
-```
-which will allow `pip` to correctly read the configuration.
\ No newline at end of file
diff --git a/docs/logo.png b/docs/logo.png
deleted file mode 100644
index 06d56f40..00000000
Binary files a/docs/logo.png and /dev/null differ
diff --git a/pyproject.toml b/pyproject.toml
index bf5a42b7..6f094fe4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -35,8 +35,6 @@ classifiers = ['Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
- 'Programming Language :: Python :: 3.11',
- 'Programming Language :: Python :: 3.12',
'Programming Language :: Python :: Implementation :: CPython'
]
dynamic = ["version"]
diff --git a/test/test_store_with_errors.py b/test/test_store_with_errors.py
index b95781a0..8402e794 100644
--- a/test/test_store_with_errors.py
+++ b/test/test_store_with_errors.py
@@ -29,7 +29,7 @@ def test_connect(setup_store, server_port_num):
store = StoreInterface(server_port_num=server_port_num)
assert isinstance(store.client, redis.Redis)
-
+
def test_plasma_connect(setup_plasma_store, set_store_loc):
store = PlasmaStoreInterface(store_loc=set_store_loc)
assert isinstance(store.client, plasma.PlasmaClient)