diff --git a/doc/source/example/processing/apcv.py b/doc/source/example/processing/apcv.py
index 494936458450c16355a08e2be8026b5cc3abce33..2d5433d9d820e50694363b5da85b0efa9604b127 100644
--- a/doc/source/example/processing/apcv.py
+++ b/doc/source/example/processing/apcv.py
@@ -1,20 +1,20 @@
 import matplotlib.pyplot as plt
 from navipy.database import DataBaseLoad
 import navipy.processing as processing
-from navipy.sensors import Senses
+from navipy import Brain
 import pkg_resources
 
 # 1) Connect to the database
 mydb_filename = pkg_resources.resource_filename(
     'navipy', 'resources/database.db')
 mydb = DataBaseLoad(mydb_filename)
-mysenses = Senses(renderer=mydb)
+mybrain = Brain(renderer=mydb)
 # 2) Define the position-orinetation at which
 # we want the image
 posorient = mydb.posorients.loc[12, :]
-mysenses.update(posorient)
-my_apcv = processing.pcode.apcv(mysenses.vision.scene,
-                                mysenses.vision.viewing_directions)
+mybrain.update(posorient)
+my_apcv = processing.pcode.apcv(mybrain.vision.scene,
+                                mybrain.vision.viewing_directions)
 
 my_apcv_sph = processing.tools.cartesian_to_spherical(x=my_apcv[..., 0],
                                                       y=my_apcv[..., 1],
diff --git a/doc/source/example/processing/contrast_weighted_nearness.py b/doc/source/example/processing/contrast_weighted_nearness.py
index 614d6675c9473386666ec6653174c3742b2e7548..283580c4345a0c5be2f7442e6340dcedc2c59b7c 100644
--- a/doc/source/example/processing/contrast_weighted_nearness.py
+++ b/doc/source/example/processing/contrast_weighted_nearness.py
@@ -1,7 +1,7 @@
 import matplotlib.pyplot as plt
 from navipy.database import DataBaseLoad
 import navipy.processing as processing
-from navipy.sensors import Senses
+from navipy import Brain
 import pkg_resources
 
 
@@ -9,13 +9,13 @@ import pkg_resources
 mydb_filename = pkg_resources.resource_filename(
     'navipy', 'resources/database.db')
 mydb = DataBaseLoad(mydb_filename)
-mysenses = Senses(renderer=mydb)
+mybrain = Brain(renderer=mydb)
 # 2) Define the position-orinetation at which
 # we want the image
 posorient = mydb.posorients.loc[12, :]
-mysenses.update(posorient)
+mybrain.update(posorient)
 my_contrast = processing.pcode.contrast_weighted_nearness(
-    mysenses.vision.scene)
+    mybrain.vision.scene)
 
 f, axarr = plt.subplots(2, 2, figsize=(15, 8))
 axarr = axarr.flatten()
diff --git a/doc/source/example/processing/michelson_contrast.py b/doc/source/example/processing/michelson_contrast.py
index 9d6e12c0b2dcea90e0a6738934de42724047f767..c5681b904ef0f5cfaba14c9e71cdedb50e46baf4 100644
--- a/doc/source/example/processing/michelson_contrast.py
+++ b/doc/source/example/processing/michelson_contrast.py
@@ -1,19 +1,19 @@
 import matplotlib.pyplot as plt
 from navipy.database import DataBaseLoad
 import navipy.processing as processing
-from navipy.sensors import Senses
+from navipy import Brain
 import pkg_resources
 
 # 1) Connect to the database
 mydb_filename = pkg_resources.resource_filename(
     'navipy', 'resources/database.db')
 mydb = DataBaseLoad(mydb_filename)
-mysenses = Senses(renderer=mydb)
+mybrain = Brain(renderer=mydb)
 # 2) Define the position-orinetation at which
 # we want the image
 posorient = mydb.posorients.loc[12, :]
-mysenses.update(posorient)
-my_contrast = processing.pcode.michelson_contrast(mysenses.vision.scene)
+mybrain.update(posorient)
+my_contrast = processing.pcode.michelson_contrast(mybrain.vision.scene)
 
 f, axarr = plt.subplots(2, 2, figsize=(15, 8))
 axarr = axarr.flatten()
diff --git a/doc/source/example/processing/pcv.py b/doc/source/example/processing/pcv.py
index 325d61952fe5357578362b837b6477505d9e9c5c..19d517fd4e63415ad4f35b32cde3a179db414ddc 100644
--- a/doc/source/example/processing/pcv.py
+++ b/doc/source/example/processing/pcv.py
@@ -1,17 +1,17 @@
 # import matplotlib.pyplot as plt
 from navipy.database import DataBaseLoad
 import navipy.processing as processing
-from navipy.sensors import Senses
+from navipy import Brain
 import pkg_resources
 
 # 1) Connect to the database
 mydb_filename = pkg_resources.resource_filename(
     'navipy', 'resources/database.db')
 mydb = DataBaseLoad(mydb_filename)
-mysenses = Senses(renderer=mydb)
+mybrain = Brain(renderer=mydb)
 # 2) Define the position-orinetation at which
 # we want the image
 posorient = mydb.posorients.loc[12, :]
-mysenses.update(posorient)
-my_pcv = processing.pcode.pcv(mysenses.vision.scene,
-                              mysenses.vision.viewing_directions)
+mybrain.update(posorient)
+my_pcv = processing.pcode.pcv(mybrain.vision.scene,
+                              mybrain.vision.viewing_directions)
diff --git a/doc/source/example/processing/skyline.py b/doc/source/example/processing/skyline.py
index 8846f93e37ef579bcb7a58bb712b24fc25874985..d8b2440714fefcd121c9fa6c8c57da05bbf0ca74 100644
--- a/doc/source/example/processing/skyline.py
+++ b/doc/source/example/processing/skyline.py
@@ -1,19 +1,19 @@
 import matplotlib.pyplot as plt
 from navipy.database import DataBaseLoad
 import navipy.processing as processing
-from navipy.sensors import Senses
+from navipy import Brain
 import pkg_resources
 
 # 1) Connect to the database
 mydb_filename = pkg_resources.resource_filename(
     'navipy', 'resources/database.db')
 mydb = DataBaseLoad(mydb_filename)
-mysenses = Senses(renderer=mydb)
+mybrain = Brain(renderer=mydb)
 # 2) Define the position-orinetation at which
 # we want the image
 posorient = mydb.posorients.loc[12, :]
-mysenses.update(posorient)
-my_skyline = processing.pcode.skyline(mysenses.vision.scene)
+mybrain.update(posorient)
+my_skyline = processing.pcode.skyline(mybrain.vision.scene)
 
 f, axarr = plt.subplots(1, 2, figsize=(15, 4))
 for chan_i, chan_n in enumerate(mydb.channels):
diff --git a/doc/source/example/tutorials/asv_homing_grid.py b/doc/source/example/tutorials/asv_homing_grid.py
index 8d3124c5b6bd524409373ecdad206a3a3e312b3c..5f75e8346f17be0729c350521a1b534ac2756e02 100644
--- a/doc/source/example/tutorials/asv_homing_grid.py
+++ b/doc/source/example/tutorials/asv_homing_grid.py
@@ -5,27 +5,41 @@ import pkg_resources
 from navipy.database import DataBaseLoad
 from navipy.processing.pcode import apcv
 from navipy.moving.agent import GridAgent
-from navipy.sensors import Senses
-
+from navipy import Brain
+
+
+# 0) Define a class heriting from Brain
+class ASVBrain(Brain):
+    def __init__(self, renderer=None):
+        Brain.__init__(self,renderer=renderer)
+        # Init memory
+        posorient = mydb.posorients.loc[12, :]
+        mybrain.update(posorient)
+        self.memory = apcv(mybrain.vision.scene,
+                      mybrain.vision.viewing_directions)
+
+    def velocity(self):
+        asv = apcv(self.vision.scene,
+                   self.vision.viewing_directions)
+        homing_vector = self.memory - asv
+        homing_vector = np.squeeze(homing_vector[..., 0, :])
+        velocity = pd.Series(data=0,
+                         index=['dx', 'dy', 'dz',
+                                'dalpha_0', 'dalpha_1', 'dalpha_2'])
+        velocity[['dx', 'dy', 'dz']] = homing_vector
+        return velocity
+    
 # 1) Connect to the database
 mydb_filename = pkg_resources.resource_filename(
     'navipy', 'resources/database.db')
 mydb = DataBaseLoad(mydb_filename)
-mysenses = Senses(renderer=mydb)
-# 2) Define the position-orinetation at which
-# we want the image
-posorient = mydb.posorients.loc[12, :]
-mysenses.update(posorient)
-memory = apcv(mysenses.vision.scene,
-              mysenses.vision.viewing_directions)
-
-
+mybrain = ASVBrain(renderer=mydb)
 # Create a grid agent
-my_agent = GridAgent(mydb_filename)
+my_agent = GridAgent(mybrain)
 
 # init position
 rowid = 1
-initpos = mydb.posorients.loc[rowid]
+initpos = mybrain.posorients.loc[rowid]
 my_agent.posorient = initpos
 
 # Mode of motion
@@ -35,23 +49,6 @@ mode_of_motion['param'] = dict()
 mode_of_motion['param']['grid_spacing'] = 1
 my_agent.mode_of_motion = mode_of_motion
 
-# Define a motion function
-
-
-def asv_homing(posorient_vel, senses, memory):
-    asv = apcv(senses.vision.scene,
-               senses.vision.viewing_directions)
-    homing_vector = memory - asv
-    homing_vector = np.squeeze(homing_vector[..., 0, :])
-    velocity = pd.Series(data=0,
-                         index=['dx', 'dy', 'dz',
-                                'dalpha_0', 'dalpha_1', 'dalpha_2'])
-    velocity[['dx', 'dy', 'dz']] = homing_vector
-    return velocity
-
-
-my_agent.motion = asv_homing
-my_agent.motion_param = {'memory': memory}
 # Run
 max_nstep = 100
 trajectory = my_agent.fly(max_nstep, return_tra=True)
diff --git a/navipy/__init__.py b/navipy/__init__.py
index 99e6fe839c8d6ded6fd96f768b1e3f07f7394df3..8c0854c94e0d2d77b520b36f7b33a3e41af889d5 100644
--- a/navipy/__init__.py
+++ b/navipy/__init__.py
@@ -20,12 +20,8 @@ Then the senses can be updated at a new position orientation:
 .. literalinclude:: example/processing/apcv.py
    :lines: 15
 
-Renderer
---------
-.. automodule:: navipy.sensors.renderer
-
 """
-from database import DataBaseLoad
+from navipy.database import DataBaseLoad
 
 class Bunch:
     def __init__(self, **kwds):
@@ -48,6 +44,7 @@ class Brain():
         raise NotImplementedError("Subclasses should implement this")
 
     def update(self, posorient):
+        self.posorient = posorient
         if self.renderer is not None:
             self.vision.scene = self.renderer.scene(posorient)
 
diff --git a/navipy/moving/agent.py b/navipy/moving/agent.py
index 6a8acb54d9706912c62ae1ab78a46749264be93b..53dae2fc93a40d09092d5c3fb4b54f14a0cfe27d 100644
--- a/navipy/moving/agent.py
+++ b/navipy/moving/agent.py
@@ -11,13 +11,14 @@
 """
 import numpy as np
 import pandas as pd
+import copy
 import networkx as nx
 import multiprocessing
 from multiprocessing import Queue, JoinableQueue, Process
 import inspect
 from navipy.database import DataBaseLoad
 import navipy.moving.maths as navimomath
-from navipy.sensors import Senses
+from navipy import Brain
 
 version = float(nx.__version__)
 
@@ -27,18 +28,20 @@ def defaultcallback(*args, **kwargs):
     raise NameError('No Callback')
 
 
-class DefaultSensors():
+class DefaultBrain():
     def __init__(self):
         pass
 
     def update(self, posorient):
         raise NameError('No Callback')
 
+    def velocity(self):
+        raise NameError('No Callback')
+
 
 class AbstractAgent():
     def __init__(self):
-        self._brian = DefaultSensors()
-        self._motion_param = None
+        self._brain = DefaultBrain()
         self._alter_posorientvel = defaultcallback
         self._posorient_col = ['x', 'y', 'z',
                                'alpha_0', 'alpha_1', 'alpha_2']
@@ -93,7 +96,7 @@ class AbstractAgent():
 
     def move(self):
         self._brain.update(self.posorient)
-        self.velocity = self._brian.velocity()
+        self.velocity = self._brain.velocity()
         alteredpos = self._alter_posorientvel(self._posorient_vel)
         self.posorient = alteredpos
         self.velocity = alteredpos
@@ -193,7 +196,7 @@ GridAgent is a close loop agent here its position is snap to a grid.
             move_mode=self._mode_move['mode'],
             move_param=self._mode_move['param'])
         tmp = navimomath.closest_pos(
-            posorient_vel, self._posorients)
+            posorient_vel, self._brain.posorients)
         posorient_vel.loc[self._posorient_col] = \
             tmp.loc[self._posorient_col]
         posorient_vel.name = tmp.name
@@ -248,8 +251,8 @@ the agent motion, or
 2. pre-computed agent-motion
     """
 
-    def __init__(self, brian):
-        self._brain = copy.deepcopy(brain)
+    def __init__(self, brain):
+        self._brain = copy.copy(brain)
         # Init the graph
         self._graph = nx.DiGraph()
         for row_id, posor in self._brain.posorients.iterrows():
@@ -284,7 +287,7 @@ the agent motion, or
 
         # Start ndatabase loader
         num_agents = ncpu
-        agents = [GridAgent(copy.deepcopy(self._brain),
+        agents = [GridAgent(copy.copy(self._brain),
                             posorients_queue=posorients_queue,
                             results_queue=results_queue)
                   for _ in range(num_agents)]
diff --git a/navipy/moving/test_agent.py b/navipy/moving/test_agent.py
index 0247b407359794581950d17b79712943844394a7..ace9b780245bfab309908e4a5f3f0fedd92b1cab 100644
--- a/navipy/moving/test_agent.py
+++ b/navipy/moving/test_agent.py
@@ -6,6 +6,7 @@ import pandas as pd
 import networkx as nx
 import navipy.moving.agent as naviagent
 import navipy.database as navidb
+from navipy import Brain
 import pkg_resources
 
 import unittest
@@ -13,18 +14,29 @@ import unittest
 version = float(nx.__version__)
 
 
-class TestNavipyMovingAgent(unittest.TestCase):
+class BrainTest(Brain):
+    def __init__(self, renderer=None):
+        Brain.__init__(self,renderer=renderer)
+        self.__posorient_col = ['x', 'y', 'z',
+                            'alpha_0', 'alpha_1', 'alpha_2']
+        self.__velocity_col = ['d' + col for col in self.__posorient_col]
+        self.__posorient_vel_col = self.__posorient_col
+        self.__posorient_vel_col.extend(self.__velocity_col)
+
+    def velocity(self):
+        return pd.Series(data=0,index=self.__posorient_vel_col)
 
+class TestNavipyMovingAgent(unittest.TestCase):
     def setUp(self):
         self.mydb_filename = pkg_resources.resource_filename(
             'navipy', 'resources/database.db')
         self.mydb = navidb.DataBaseLoad(self.mydb_filename)
+        self.brain = BrainTest(self.mydb)
         self.__posorient_col = ['x', 'y', 'z',
                                 'alpha_0', 'alpha_1', 'alpha_2']
         self.__velocity_col = ['d' + col for col in self.__posorient_col]
         self.__posorient_vel_col = self.__posorient_col
         self.__posorient_vel_col.extend(self.__velocity_col)
-
     #
     # AbstractAgent
     #
@@ -42,15 +54,12 @@ class TestNavipyMovingAgent(unittest.TestCase):
     # GridAgent
     #
     def test_move_gridagent(self):
-        agent = naviagent.GridAgent(self.mydb_filename)
-        initposorient = agent.db.posorients.loc[13, :]
+        agent = naviagent.GridAgent(self.brain)
+        initposorient = self.brain.posorients.loc[13, :]
         initposovel = pd.Series(data=0,
                                 index=self.__posorient_vel_col)
         initposovel.loc[initposorient.index] = initposorient
         agent.posorient = initposovel
-        agent.motion = lambda posorient, scene:\
-            pd.Series(data=0,
-                      index=self.__posorient_vel_col)
         with self.assertRaises(AttributeError):
             agent.move()
         mode_move = {'mode': 'on_cubic_grid',
@@ -64,15 +73,12 @@ class TestNavipyMovingAgent(unittest.TestCase):
             obtained, initposorient.loc[obtained.index]))
 
     def test_fly_gridagent(self):
-        agent = naviagent.GridAgent(self.mydb_filename)
-        initposorient = agent.db.posorients.loc[13, :]
+        agent = naviagent.GridAgent(self.brain)
+        initposorient = self.brain.posorients.loc[13, :]
         initposovel = pd.Series(data=0,
                                 index=self.__posorient_vel_col)
         initposovel.loc[initposorient.index] = initposorient
         agent.posorient = initposovel
-        agent.motion = lambda posorient, scene:\
-            pd.Series(data=0,
-                      index=self.__posorient_vel_col)
         with self.assertRaises(AttributeError):
             agent.fly(max_nstep=10)
         mode_move = {'mode': 'on_cubic_grid',
@@ -90,7 +96,7 @@ class TestNavipyMovingAgent(unittest.TestCase):
     #
 
     def test_init_graphagent(self):
-        agent = naviagent.GraphAgent(self.mydb_filename)
+        agent = naviagent.GraphAgent(self.brain)
         if version < 2:
             graph_nodes = list(agent.graph.nodes())
         else:
@@ -100,7 +106,7 @@ class TestNavipyMovingAgent(unittest.TestCase):
                          'Init of graph failed. Node missmatch')
 
     def test_graph_setter(self):
-        agent = naviagent.GraphAgent(self.mydb_filename)
+        agent = naviagent.GraphAgent(self.brain)
         if version < 2:
             graph_nodes = list(agent.graph.nodes())
         else:
@@ -128,7 +134,7 @@ class TestNavipyMovingAgent(unittest.TestCase):
         3 Two loops attractors
         """
         # Test all node to first
-        agent = naviagent.GraphAgent(self.mydb_filename)
+        agent = naviagent.GraphAgent(self.brain)
 
         if version < 2:
             graph_nodes = list(agent.graph.nodes())
@@ -193,7 +199,7 @@ class TestNavipyMovingAgent(unittest.TestCase):
         2. Saddle points
         3. Local minima
         """
-        agent = naviagent.GraphAgent(self.mydb_filename)
+        agent = naviagent.GraphAgent(self.brain)
         # Local maxima
         if version < 2:
             graph_nodes = list(agent.graph.nodes())
diff --git a/navipy/sensors/__init__.py b/navipy/sensors/__init__.py
index 3928b27c6bdf132d56924146838d7cfd9e88e83e..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/navipy/sensors/__init__.py
+++ b/navipy/sensors/__init__.py
@@ -1,47 +0,0 @@
-"""
-Every agent comes with a battery of senses (biological agent) \
-or sensors (technical agent). The senses of agents in navipy are limited
-to:
-
-* 4d vision (brighness + depth)
-
-The 4d vision sense is controlled by rendering module, either an \
-online rendering or loaded from a database containing pre-rendered images.
-
-For example to use pre-rendered images from a database:
-
-.. literalinclude:: example/processing/apcv.py
-   :lines: 10-11
-
-Then the senses can be updated at a new position orientation:
-
-.. literalinclude:: example/processing/apcv.py
-   :lines: 15
-
-Renderer
---------
-.. automodule:: navipy.sensors.renderer
-
-"""
-
-
-class Bunch:
-    def __init__(self, **kwds):
-        self.__dict__.update(kwds)
-
-
-class Senses():
-    def __init__(self,
-                 renderer=None):
-        self.vision = Bunch(scene=None,
-                            viewing_directions=None,
-                            channels=None)
-        self.renderer = renderer
-        if self.renderer is not None:
-            self.vision.scene = None
-            self.vision.viewing_directions = renderer.viewing_directions
-            self.vision.channels = renderer.channels
-
-    def update(self, posorient):
-        if self.renderer is not None:
-            self.vision.scene = self.renderer.scene(posorient)