diff --git a/navipy/comparing/__init__.py b/navipy/comparing/__init__.py
index ee04adc1d00ab1e198ba5e8d695815900d9d9bbd..9dbf9ae7558d0602edae313e6b49239d5b4410d9 100644
--- a/navipy/comparing/__init__.py
+++ b/navipy/comparing/__init__.py
@@ -87,7 +87,8 @@ the current and memorised place code.
     # (M)
     ridf = np.zeros((current.shape[1], current.shape[2]))
     for azimuth_i in range(0, current.shape[1]):
-        rot_im = np.roll(current, azimuth_i, axis=1)
+        # Perform a counter clock wise rotation
+        rot_im = np.roll(current, -azimuth_i, axis=1)
         ridf[azimuth_i, :] = np.squeeze(imagediff(rot_im, memory))  # rot_im
     return ridf
 
diff --git a/navipy/moving/agent.py b/navipy/moving/agent.py
index 0c4775cf34c9da016efae9c6f380156ed5f65675..3be7d48aa1d66d8a060ed0e19e269507d3f6be9d 100644
--- a/navipy/moving/agent.py
+++ b/navipy/moving/agent.py
@@ -49,27 +49,35 @@ class DefaultBrain():
         raise NameError('No Callback')
 
 
+def posorient_columns(convention):
+    return [('location', 'x'),
+            ('location', 'y'),
+            ('location', 'z'),
+            (convention, 'alpha_0'),
+            (convention, 'alpha_1'),
+            (convention, 'alpha_2')]
+
+
+def velocities_columns(convention):
+    return [('location', 'dx'),
+            ('location', 'dy'),
+            ('location', 'dz'),
+            (convention, 'dalpha_0'),
+            (convention, 'dalpha_1'),
+            (convention, 'dalpha_2')]
+
+
 class AbstractAgent():
     def __init__(self, convention='rzyx'):
         self._brain = DefaultBrain()
         self._alter_posorientvel = defaultcallback
-        tuples = [('location', 'x'),
-                  ('location', 'y'),
-                  ('location', 'z'),
-                  (convention, 'alpha_0'),
-                  (convention, 'alpha_1'),
-                  (convention, 'alpha_2')]
+        tuples = posorient_columns(convention)
         index = pd.MultiIndex.from_tuples(tuples,
                                           names=['position',
                                                  'orientation'])
         self._posorient_col = index
 
-        tuples_vel = [('location', 'dx'),
-                      ('location', 'dy'),
-                      ('location', 'dz'),
-                      (convention, 'dalpha_0'),
-                      (convention, 'dalpha_1'),
-                      (convention, 'dalpha_2')]
+        tuples_vel = velocities_columns(convention)
         index_vel = pd.MultiIndex.from_tuples(tuples_vel,
                                               names=['position',
                                                      'orientation'])
@@ -159,22 +167,58 @@ class AbstractAgent():
             return None
 
 
-class CyberBeeAgent(AbstractAgent):
+class CyberBeeAgent(AbstractAgent, Process):
     """
     A CyberBeeAgent uses the rendering method of cyberbee. \
 CyberBeeAgent is a close loop agent and need to be run within blender \
 (see :doc:`rendering`).
 
+    Single process
+    Here come example of how to use it
+
+    Multi process
+    CyberBeeAgent inherit from the Process \
+    class of the multiprocessing module of the standard python \
+    library. Thus, several GridAgents can safely be run in parallel.
+
     """
 
-    def __init__(self, brain, convention):
+    def __init__(self, brain, convention,
+                 posorients_queue=None,
+                 results_queue=None):
         if convention is None:
             raise Exception("a convention must be specified")
+        if (posorients_queue is not None) and (results_queue is not None):
+            multiprocessing.Process.__init__(self)
         AbstractAgent.__init__(self, convention)
         AbstractAgent._alter_posorientvel = \
             lambda motion_vec: navimomath.next_pos(motion_vec,
                                                    move_mode='free_run')
         self.brain = brain
+        self._posorients_queue = posorients_queue
+        self._results_queue = results_queue
+
+    def run(self):
+        """ Only supported when multiprocess"""
+        if self._posorients_queue is None or self._results_queue is None:
+            raise NameError('Single agent class has not be inititialised '
+                            + 'with multiprocessing suppport')
+        proc_name = self.name
+        print('Process {} started'.format(proc_name))
+        while True:
+            start_posorient = self._posorients_queue.get(timeout=1)
+            if start_posorient is None:
+                # Poison pill means shutdown)
+                break
+            common_id = list(set(start_posorient.index).intersection(
+                self._posorient_vel.index))
+            self._posorient_vel.loc[common_id] = start_posorient.loc[common_id]
+            self.move()
+            posorient_vel = self._posorient_vel
+            self._posorients_queue.task_done()
+            self._results_queue.put(posorient_vel)
+        self._posorients_queue.task_done()
+        print('Process {} done'.format(proc_name))
 
 
 class GridAgent(AbstractAgent, Process):
@@ -329,6 +373,49 @@ the agent motion, or
         self._graph = graph.copy()
         self.check_graph()
 
+    def compute_velocities(self,
+                           ncpu=5,
+                           timeout=1):
+        # Create a dataframe to store the velocities
+        convention = self._brain.renderer.rotation_convention
+        tuples_posvel = posorient_columns(convention)
+        tuples_posvel.extend(velocities_columns(convention))
+        index_posvel = pd.MultiIndex.from_tuples(tuples_posvel,
+                                                 names=['position',
+                                                        'orientation'])
+        self.velocities = pd.DataFrame(columns=index_posvel,
+                                       index=list(self._graph.nodes()))
+        # Build a list of nodes
+        posorients_queue = JoinableQueue()
+        results_queue = Queue()
+        if version < 2:
+            graph_nodes = list(self._graph.nodes())
+        else:
+            graph_nodes = list(self._graph.nodes)
+        for node in graph_nodes:
+            posorients_queue.put(self._graph.nodes[node]['posorient'])
+
+        # Start ndatabase loader
+        num_agents = ncpu
+        agents = [CyberBeeAgent(copy.copy(self._brain),
+                                convention=convention,
+                                posorients_queue=posorients_queue,
+                                results_queue=results_queue)
+                  for _ in range(num_agents)]
+        for w in agents:
+            w.start()
+
+        # Add a poison pill for each agent
+        for _ in range(num_agents):
+            posorients_queue.put(None)
+
+        # Wait for all of the tasks to finish
+        # posorients_queue.join()
+        for _ in range(nx.number_of_nodes(self._graph)):
+            res = results_queue.get(timeout=timeout)
+            self.velocities.loc[res.name, res.index] = res
+        return self.velocities.copy()
+
     def build_graph(self,
                     ncpu=5,
                     timeout=1):
diff --git a/navipy/scripts/blend_overlaytraj.py b/navipy/scripts/blend_overlaytraj.py
index a4a75ef6d0366e488528ffd07126abfdf63f364f..5b7728e1b6e3a2dbd992acbe145e26e279e873d7 100644
--- a/navipy/scripts/blend_overlaytraj.py
+++ b/navipy/scripts/blend_overlaytraj.py
@@ -106,6 +106,9 @@ def run(trajfile):
             break
     if not camera_found:
         raise NameError('The blender file does not contain a camera')
+    # Remove animation to be able to move the camera
+    # and add animation
+    camera.animation_data_clear()
     # set first and last frame
     context = bpy.context
     context.scene.frame_start = trajectory.index.min()
diff --git a/navipy/sensors/renderer.py b/navipy/sensors/renderer.py
index d351859878d73af098736b923e116da5887ca9b8..b537ade72de9c12ad069414becbe96efbf685de1 100644
--- a/navipy/sensors/renderer.py
+++ b/navipy/sensors/renderer.py
@@ -232,6 +232,9 @@ class BlenderRender(AbstractRender):
                 camera_found = True
                 break
         assert camera_found, 'The blender file does not contain a camera'
+        # Remove animation to be able to move the camera
+        # and add animation
+        self.camera.animation_data_clear()
         # The bee eye is panoramic, and with equirectangular projection
         self.camera.data.type = 'PANO'
         self.camera.data.cycles.panorama_type = 'EQUIRECTANGULAR'
@@ -479,8 +482,10 @@ class BlenderRender(AbstractRender):
         """
         # save image as a temporary file, and then loaded
         # sadly the rendered image pixels can not directly be access
+        keyframe = bpy.context.scene.frame_current
         filename = os.path.join(self.tmp_fileoutput['Folder'],
-                                self.tmp_fileoutput['Image'] + '0001' +
+                                self.tmp_fileoutput['Image'] +
+                                '{:04d}'.format(keyframe) +
                                 self.tmp_fileoutput['ext'])
         im_width, im_height = self.camera_resolution
         im = bpy.data.images.load(filename)
@@ -505,8 +510,10 @@ class BlenderRender(AbstractRender):
         """
         # save image as a temporary file, and then loaded
         # sadly the rendered image pixels can not directly be access
+        keyframe = bpy.context.scene.frame_current
         filename = os.path.join(self.tmp_fileoutput['Folder'],
-                                self.tmp_fileoutput['Depth'] + '0001' +
+                                self.tmp_fileoutput['Depth'] +
+                                '{:04d}'.format(keyframe) +
                                 self.tmp_fileoutput['ext'])
         im_width, im_height = self.camera_resolution
         im = bpy.data.images.load(filename)