Skip to content

Webcam#

Download the Jupyter notebook : Webcam.ipynb

The script examples/image/Webcam.py applies a red filter on the input from the webcam, and isolates one mode using a dynamical neural field.

Most of the concepts are similar to the Image Processing example. The VideoPopulation object also requires the Python bindings to OpenCV.

from ANNarchy import *
from ANNarchy.extensions.image import *
from ANNarchy.extensions.convolution import Convolution, Pooling
clear()
ANNarchy 4.7 (4.7.3) on darwin (posix).

# Definition of the neurons
LinearNeuron = Neuron(equations="r=sum(exc): min=0.0")
DNF = Neuron(parameters="tau=10.0", equations="tau*dr/dt + r = sum(exc) + sum(inh): min=0.0, max=1.0")

# Population getting the video stream   
width = 640
height = 480
video = VideoPopulation(geometry=(height, width, 3))

# Subsampling population
pooled = Population(geometry=(48, 64, 3), neuron = LinearNeuron)

# Mean-pooling projection
pool_proj = Pooling(pre=video, post=pooled, target='exc', operation='mean')
pool_proj.connect_pooling()

# Define a red filter with no spatial extent
red_filter = [[ [2.0, -1.0, -1.0] ]]

# Create a population of DNF neurons downscaling the image with a factor 10 
dnf = Population(geometry=(48, 64), neuron = DNF)

# Create the convolution using the red filter
ff = Convolution(pre=pooled, post=dnf, target='exc')
ff.connect_filter(weights=red_filter)

# Create difference of Gaussians lateral connections for denoising/competition
lat = Projection(pre=dnf, post=dnf, target='inh')
lat.connect_dog(amp_pos=0.2, sigma_pos=0.1, amp_neg=0.1, sigma_neg=0.7)
<ANNarchy.core.Projection.Projection at 0x1339c83d0>

The VideoPopulation acquires images from the webcam: here the webcam should be able to deliver 640x480 colored images.

The corresponding population is then subsampled with a factor 10, and a red filter is applied on it. This feeds a DNF (see the Neural Field" example) which selects the region with the highest density.

compile()

We can now start the camera 0 (/dev/video0, adapt it to your machine):

video.start_camera(0)

A simple GUI based on PyQtGraph allows to display the input and output of the network:

try:
    from pyqtgraph.Qt import QtGui, QtCore
    import pyqtgraph as pg
except:
    print('PyQtGraph is not installed, can not visualize the network.')
    exit(0)

# Wrapping class    
class Viewer(object):
    " Class to visualize the network activity using PyQtGraph."

    def __init__(self, video, result):
        self.video = video
        self.result = result
        app = pg.mkQApp()
        self.win = pg.GraphicsWindow(title="Live webcam")
        self.win.resize(640,480)   

        box = self.win.addViewBox(lockAspect=True)
        box.invertY()
        self.vis = pg.ImageItem()
        box.addItem(self.vis)  

        box = self.win.addViewBox(lockAspect=True)
        box.invertY()
        self.res = pg.ImageItem()
        box.addItem(self.res)  

        self.win.show()

        self.lastUpdate = pg.ptime.time()
        self.avgFps = 0.0


    def update(self):
        # Set the input
        self.video.grab_image()
        # Simulate for 10 ms with a new input
        simulate(5.0)
        # Refresh the GUI
        self.vis.setImage(np.swapaxes(self.video.r,0,1))
        self.res.setImage(np.swapaxes(self.result.r,0,1))
        # Listen to mouse/keyboard events
        QtGui.QApplication.processEvents()
        # FPS
        now = pg.ptime.time()
        fps = 1.0 / (now - self.lastUpdate)
        self.lastUpdate = now
        self.avgFps = self.avgFps * 0.8 + fps * 0.2
        # print(self.avgFps)

    def run(self):

        timer = QtCore.QTimer()
        timer.timeout.connect(self.update)
        timer.start(0)  
        QtGui.QApplication.instance().exec_() 
        timer.stop()
# Start the GUI
view = Viewer(video, dnf)
view.run() 
video.release()