From 2f0ca967f03450d7680ef35e3b26c2ba95148913 Mon Sep 17 00:00:00 2001 From: "Olivier J.N. Bertrand" <olivier.bertrand@uni-bielefeld.de> Date: Thu, 8 Feb 2018 07:24:43 +0100 Subject: [PATCH] Restructure doc as overview, tutorials, references --- doc/source/brain.rst | 4 - doc/source/comparing.rst | 27 ------ doc/source/database.rst | 7 -- doc/source/example/moving/multi_agent.py | 3 - doc/source/index.rst | 10 +-- doc/source/moving.rst | 28 ------ doc/source/overview/brain.rst | 41 +++++++++ doc/source/overview/comparing.rst | 11 +++ doc/source/overview/database.rst | 64 ++++++++++++++ .../examples}/Cam_blender.svg | 0 .../examples}/Python_blender.svg | 0 .../processing => overview/examples}/apcv.py | 18 ++-- .../brain => overview/examples}/asv_brain.py | 0 .../examples}/blenddemo_beesampling.py | 0 .../examples}/blenddemo_cyberbee.py | 0 .../examples}/contrast_weighted_nearness.py | 0 .../examples}/get_posorients.py | 0 .../examples}/load_image_posorient.py | 0 .../examples}/load_image_rowid.py | 0 .../examples}/michelson_contrast.py | 0 .../processing => overview/examples}/pcv.py | 6 +- .../comparing => overview/examples}/ridf.py | 0 .../database => overview/examples}/scene.py | 0 .../examples}/skyline.py | 4 +- .../examples}/static_brain.py | 0 doc/source/overview/index.rst | 13 +++ doc/source/overview/moving.rst | 10 +++ doc/source/overview/processing.rst | 87 +++++++++++++++++++ doc/source/overview/rendering.rst | 81 +++++++++++++++++ doc/source/processing.rst | 4 - doc/source/references/brain.rst | 4 + doc/source/references/comparing.rst | 5 ++ doc/source/references/database.rst | 5 ++ doc/source/references/index.rst | 12 +++ doc/source/references/moving.rst | 10 +++ doc/source/references/processing.rst | 10 +++ doc/source/references/sensors.rst | 10 +++ doc/source/rendering.rst | 3 - .../examples}/asv_homing_graph.py | 0 .../examples}/asv_homing_grid.py | 0 .../{tutorials.rst => tutorials/index.rst} | 26 +++--- navipy/comparing/__init__.py | 7 +- navipy/database/__init__.py | 61 ------------- navipy/moving/__init__.py | 2 +- navipy/moving/agent.py | 30 ++++--- navipy/processing/__init__.py | 48 ---------- navipy/processing/pcode.py | 25 ------ navipy/sensors/bee_sampling.py | 26 +----- navipy/sensors/renderer.py | 63 +------------- 49 files changed, 418 insertions(+), 347 deletions(-) delete mode 100644 doc/source/brain.rst delete mode 100644 doc/source/comparing.rst delete mode 100644 doc/source/database.rst delete mode 100644 doc/source/example/moving/multi_agent.py delete mode 100644 doc/source/moving.rst create mode 100644 doc/source/overview/brain.rst create mode 100644 doc/source/overview/comparing.rst create mode 100644 doc/source/overview/database.rst rename doc/source/{example/blender => overview/examples}/Cam_blender.svg (100%) rename doc/source/{example/blender => overview/examples}/Python_blender.svg (100%) rename doc/source/{example/processing => overview/examples}/apcv.py (55%) rename doc/source/{example/brain => overview/examples}/asv_brain.py (100%) rename doc/source/{example/rendering => overview/examples}/blenddemo_beesampling.py (100%) rename doc/source/{example/rendering => overview/examples}/blenddemo_cyberbee.py (100%) rename doc/source/{example/processing => overview/examples}/contrast_weighted_nearness.py (100%) rename doc/source/{example/database => overview/examples}/get_posorients.py (100%) rename doc/source/{example/database => overview/examples}/load_image_posorient.py (100%) rename doc/source/{example/database => overview/examples}/load_image_rowid.py (100%) rename doc/source/{example/processing => overview/examples}/michelson_contrast.py (100%) rename doc/source/{example/processing => overview/examples}/pcv.py (73%) rename doc/source/{example/comparing => overview/examples}/ridf.py (100%) rename doc/source/{example/database => overview/examples}/scene.py (100%) rename doc/source/{example/processing => overview/examples}/skyline.py (87%) rename doc/source/{example/brain => overview/examples}/static_brain.py (100%) create mode 100644 doc/source/overview/index.rst create mode 100644 doc/source/overview/moving.rst create mode 100644 doc/source/overview/processing.rst create mode 100644 doc/source/overview/rendering.rst delete mode 100644 doc/source/processing.rst create mode 100644 doc/source/references/brain.rst create mode 100644 doc/source/references/comparing.rst create mode 100644 doc/source/references/database.rst create mode 100644 doc/source/references/index.rst create mode 100644 doc/source/references/moving.rst create mode 100644 doc/source/references/processing.rst create mode 100644 doc/source/references/sensors.rst delete mode 100644 doc/source/rendering.rst rename doc/source/{example/tutorials => tutorials/examples}/asv_homing_graph.py (100%) rename doc/source/{example/tutorials => tutorials/examples}/asv_homing_grid.py (100%) rename doc/source/{tutorials.rst => tutorials/index.rst} (83%) diff --git a/doc/source/brain.rst b/doc/source/brain.rst deleted file mode 100644 index 222f808..0000000 --- a/doc/source/brain.rst +++ /dev/null @@ -1,4 +0,0 @@ -Brain -===== - -.. automodule:: navipy diff --git a/doc/source/comparing.rst b/doc/source/comparing.rst deleted file mode 100644 index 7d9a4ad..0000000 --- a/doc/source/comparing.rst +++ /dev/null @@ -1,27 +0,0 @@ -Comparing -========= - -.. automodule:: navipy.comparing - -Place code ----------- - -Image diff -~~~~~~~~~~ -.. autofunction:: navipy.comparing.simple_imagediff - -Euclidian image diff -~~~~~~~~~~~~~~~~~~~~ -.. autofunction:: navipy.comparing.imagediff - -Rotational image difference function -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autofunction:: navipy.comparing.rot_imagediff - -Differential optic flow -~~~~~~~~~~~~~~~~~~~~~~~ -.. autofunction:: navipy.comparing.diff_optic_flow - - -Memory in networks ------------------- diff --git a/doc/source/database.rst b/doc/source/database.rst deleted file mode 100644 index 7b6604c..0000000 --- a/doc/source/database.rst +++ /dev/null @@ -1,7 +0,0 @@ -Database -======== - -.. automodule:: navipy.database - -.. autoclass:: navipy.database.DataBaseLoad - :members: diff --git a/doc/source/example/moving/multi_agent.py b/doc/source/example/moving/multi_agent.py deleted file mode 100644 index 771404d..0000000 --- a/doc/source/example/moving/multi_agent.py +++ /dev/null @@ -1,3 +0,0 @@ -""" -An example of how to use Multi agent -""" diff --git a/doc/source/index.rst b/doc/source/index.rst index aeda5a5..d87a51c 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -23,13 +23,9 @@ Content :maxdepth: 1 gettingstarted - brain - rendering - processing - comparing - moving - database - tutorials + overview/index + tutorials/index + references/index Indices and tables diff --git a/doc/source/moving.rst b/doc/source/moving.rst deleted file mode 100644 index 4156fd2..0000000 --- a/doc/source/moving.rst +++ /dev/null @@ -1,28 +0,0 @@ -Moving -###### - -Overview -******** -.. automodule:: navipy.moving - -Close-loop agent -**************** - -Online rendering -================ -.. autoclass:: navipy.moving.agent.CyberBeeAgent - :members: - -Pre-rendered -============ -.. autoclass:: navipy.moving.agent.GridAgent - :members: - -Graph agent -*********** -.. autoclass:: navipy.moving.agent.GraphAgent - :members: - -Summary -******* -.. automodule:: navipy.moving.agent diff --git a/doc/source/overview/brain.rst b/doc/source/overview/brain.rst new file mode 100644 index 0000000..3439dda --- /dev/null +++ b/doc/source/overview/brain.rst @@ -0,0 +1,41 @@ +Brain +===== + +Every agent comes with a brain processing the about of \ +senses or sensors for biological or technical agent, respectively. + +The senses of agents in navipy are limited +to: + +* 4d vision (brighness + depth) + +The 4d vision sense is controlled by rendering module, either an \ +online rendering or loaded from a database containing pre-rendered images. + +For example to use pre-rendered images from a database: + +.. literalinclude:: examples/apcv.py + :lines: 10-11 + +Then the brain can be updated at a new position orientation: + +.. literalinclude:: examples/apcv.py + :lines: 15 + +Building your own brain +----------------------- + +The Brain class is an abstract Brain, such that it can not control an agent. \ +To control, an agent, the Brain should have a function called velocity. + +For example, an stationary agent should always return a null velocity. + +.. literalinclude:: examples/static_brain.py + :lines: 3,9-17 + +An agent using an average skyline homing vector, could be build as follow + +.. literalinclude:: examples/asv_brain.py + :lines: 4-5,11-36 + + diff --git a/doc/source/overview/comparing.rst b/doc/source/overview/comparing.rst new file mode 100644 index 0000000..cb3d6a1 --- /dev/null +++ b/doc/source/overview/comparing.rst @@ -0,0 +1,11 @@ +Comparing +========= + + +Rotational image difference function +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. literalinclude:: examples/ridf.py + :lines: 4,20 + + .. plot:: overview/examples/ridf.py diff --git a/doc/source/overview/database.rst b/doc/source/overview/database.rst new file mode 100644 index 0000000..26694db --- /dev/null +++ b/doc/source/overview/database.rst @@ -0,0 +1,64 @@ +Database +======== + +Database are generated by the rendering module, and contains all \ +images and there corresponding position-orientations. + +* position_orientation: containing all position and orientation of where \ +images were rendered. The position-orientation is described by \ +['x','y','z','alpha_0','alpha_1','alpha_2'] +* image: containing all images ever rendered. Each channel of each image \ +are normalised, so to use the full coding range. +* normalisation: the normalisation constantes + + +How to load a database +---------------------- + +.. literalinclude:: examples/get_posorients.py + :lines: 8 + +How to load all position-orientation +------------------------------------ + +The database contains all position-orientation \ +at which an image as been rendered. In certain \ +situation, it may be usefull to know all \ +position-orientation in the database. More technically \ +speaking, loading the full table of position-orientaiton. + +.. literalinclude:: examples/get_posorients.py + :lines: 9-10 + +.. plot:: overview/examples/get_posorients.py + +How to load an image +-------------------- + +The database contains images which can be processed differently \ +depending on the navigation strategy beeing used. + +Images are at given position-orientations. To load an image \ +the position-orientation can be given. The DataBaseLoader will \ +look if this position-orientation has been rendered. If it is \ +the case, the image will be returned. + +.. literalinclude:: examples/load_image_posorient.py + :lines: 14-23 + +.. plot:: overview/examples/load_image_posorient.py + +However, looking in the database if an image has already been \ +rendered at a given position-orientation can cost time. To speed up \ +certain calculation, image can instead be access by row number. \ +Indeed each position-orientation can be identified by a unique row \ +number. This number is consistant through the entire database. Thus, \ +an image can be loaded by providing the row number. + +.. literalinclude:: examples/load_image_rowid.py + :lines: 13-15 + +.. plot:: overview/examples/load_image_rowid.py + + +.. todo: channels as part of database diff --git a/doc/source/example/blender/Cam_blender.svg b/doc/source/overview/examples/Cam_blender.svg similarity index 100% rename from doc/source/example/blender/Cam_blender.svg rename to doc/source/overview/examples/Cam_blender.svg diff --git a/doc/source/example/blender/Python_blender.svg b/doc/source/overview/examples/Python_blender.svg similarity index 100% rename from doc/source/example/blender/Python_blender.svg rename to doc/source/overview/examples/Python_blender.svg diff --git a/doc/source/example/processing/apcv.py b/doc/source/overview/examples/apcv.py similarity index 55% rename from doc/source/example/processing/apcv.py rename to doc/source/overview/examples/apcv.py index 2d5433d..2af5b6d 100644 --- a/doc/source/example/processing/apcv.py +++ b/doc/source/overview/examples/apcv.py @@ -1,6 +1,8 @@ import matplotlib.pyplot as plt from navipy.database import DataBaseLoad -import navipy.processing as processing +from navipy.processing import pcode +from navipy.processing import tools +from navipy.processing import constants from navipy import Brain import pkg_resources @@ -13,17 +15,17 @@ mybrain = Brain(renderer=mydb) # we want the image posorient = mydb.posorients.loc[12, :] mybrain.update(posorient) -my_apcv = processing.pcode.apcv(mybrain.vision.scene, - mybrain.vision.viewing_directions) +my_apcv = pcode.apcv(mybrain.vision.scene, + mybrain.vision.viewing_directions) -my_apcv_sph = processing.tools.cartesian_to_spherical(x=my_apcv[..., 0], - y=my_apcv[..., 1], - z=my_apcv[..., 2]) +my_apcv_sph = tools.cartesian_to_spherical(x=my_apcv[..., 0], + y=my_apcv[..., 1], + z=my_apcv[..., 2]) elevation = mydb.viewing_directions[..., - processing.constants.__spherical_indeces__[ + constants.__spherical_indeces__[ 'elevation']] azimuth = mydb.viewing_directions[..., - processing.constants.__spherical_indeces__[ + constants.__spherical_indeces__[ 'azimuth']] diff --git a/doc/source/example/brain/asv_brain.py b/doc/source/overview/examples/asv_brain.py similarity index 100% rename from doc/source/example/brain/asv_brain.py rename to doc/source/overview/examples/asv_brain.py diff --git a/doc/source/example/rendering/blenddemo_beesampling.py b/doc/source/overview/examples/blenddemo_beesampling.py similarity index 100% rename from doc/source/example/rendering/blenddemo_beesampling.py rename to doc/source/overview/examples/blenddemo_beesampling.py diff --git a/doc/source/example/rendering/blenddemo_cyberbee.py b/doc/source/overview/examples/blenddemo_cyberbee.py similarity index 100% rename from doc/source/example/rendering/blenddemo_cyberbee.py rename to doc/source/overview/examples/blenddemo_cyberbee.py diff --git a/doc/source/example/processing/contrast_weighted_nearness.py b/doc/source/overview/examples/contrast_weighted_nearness.py similarity index 100% rename from doc/source/example/processing/contrast_weighted_nearness.py rename to doc/source/overview/examples/contrast_weighted_nearness.py diff --git a/doc/source/example/database/get_posorients.py b/doc/source/overview/examples/get_posorients.py similarity index 100% rename from doc/source/example/database/get_posorients.py rename to doc/source/overview/examples/get_posorients.py diff --git a/doc/source/example/database/load_image_posorient.py b/doc/source/overview/examples/load_image_posorient.py similarity index 100% rename from doc/source/example/database/load_image_posorient.py rename to doc/source/overview/examples/load_image_posorient.py diff --git a/doc/source/example/database/load_image_rowid.py b/doc/source/overview/examples/load_image_rowid.py similarity index 100% rename from doc/source/example/database/load_image_rowid.py rename to doc/source/overview/examples/load_image_rowid.py diff --git a/doc/source/example/processing/michelson_contrast.py b/doc/source/overview/examples/michelson_contrast.py similarity index 100% rename from doc/source/example/processing/michelson_contrast.py rename to doc/source/overview/examples/michelson_contrast.py diff --git a/doc/source/example/processing/pcv.py b/doc/source/overview/examples/pcv.py similarity index 73% rename from doc/source/example/processing/pcv.py rename to doc/source/overview/examples/pcv.py index 19d517f..5f0b107 100644 --- a/doc/source/example/processing/pcv.py +++ b/doc/source/overview/examples/pcv.py @@ -1,6 +1,6 @@ # import matplotlib.pyplot as plt from navipy.database import DataBaseLoad -import navipy.processing as processing +from navipy.processing import pcode from navipy import Brain import pkg_resources @@ -13,5 +13,5 @@ mybrain = Brain(renderer=mydb) # we want the image posorient = mydb.posorients.loc[12, :] mybrain.update(posorient) -my_pcv = processing.pcode.pcv(mybrain.vision.scene, - mybrain.vision.viewing_directions) +my_pcv = pcode.pcv(mybrain.vision.scene, + mybrain.vision.viewing_directions) diff --git a/doc/source/example/comparing/ridf.py b/doc/source/overview/examples/ridf.py similarity index 100% rename from doc/source/example/comparing/ridf.py rename to doc/source/overview/examples/ridf.py diff --git a/doc/source/example/database/scene.py b/doc/source/overview/examples/scene.py similarity index 100% rename from doc/source/example/database/scene.py rename to doc/source/overview/examples/scene.py diff --git a/doc/source/example/processing/skyline.py b/doc/source/overview/examples/skyline.py similarity index 87% rename from doc/source/example/processing/skyline.py rename to doc/source/overview/examples/skyline.py index d8b2440..1932fb7 100644 --- a/doc/source/example/processing/skyline.py +++ b/doc/source/overview/examples/skyline.py @@ -1,6 +1,6 @@ import matplotlib.pyplot as plt from navipy.database import DataBaseLoad -import navipy.processing as processing +from navipy.processing import pcode as processing from navipy import Brain import pkg_resources @@ -13,7 +13,7 @@ mybrain = Brain(renderer=mydb) # we want the image posorient = mydb.posorients.loc[12, :] mybrain.update(posorient) -my_skyline = processing.pcode.skyline(mybrain.vision.scene) +my_skyline = processing.skyline(mybrain.vision.scene) f, axarr = plt.subplots(1, 2, figsize=(15, 4)) for chan_i, chan_n in enumerate(mydb.channels): diff --git a/doc/source/example/brain/static_brain.py b/doc/source/overview/examples/static_brain.py similarity index 100% rename from doc/source/example/brain/static_brain.py rename to doc/source/overview/examples/static_brain.py diff --git a/doc/source/overview/index.rst b/doc/source/overview/index.rst new file mode 100644 index 0000000..2fb66a4 --- /dev/null +++ b/doc/source/overview/index.rst @@ -0,0 +1,13 @@ + +Overview +======== + +.. toctree:: + :maxdepth: 2 + + brain + rendering + processing + comparing + moving + database diff --git a/doc/source/overview/moving.rst b/doc/source/overview/moving.rst new file mode 100644 index 0000000..9737f5c --- /dev/null +++ b/doc/source/overview/moving.rst @@ -0,0 +1,10 @@ +Moving +###### + +Overview +******** +.. automodule:: navipy.moving + +Summary +******* +.. automodule:: navipy.moving.agent diff --git a/doc/source/overview/processing.rst b/doc/source/overview/processing.rst new file mode 100644 index 0000000..02359bf --- /dev/null +++ b/doc/source/overview/processing.rst @@ -0,0 +1,87 @@ +Processing a scene +================== + +An agent comes equipped with a battery of sensors, such as a camera \ +depth estimation sensors, compass, and odometer. Here, we focus on the \ +the processing of retino-topic information provided by a camera and a \ +depth estimation sensor. This retino-topic information is refer as a scene. + +image based scene (IBS) + A classical image. Each pixel is viewed in a direction + (elevation,azimuth) in a regular manner. + In that case the scene is a 4d numpy array + [elevation-index,azimuth-index,channel-index,1]. + +Omatidium based scene (OBS) + In an ommatidia based scene, the viewing direction + do not need to be regularally spaced. + In that case the scene is a 3d numpy array + [ommatidia-index, channel-index,1]. + +Place code +---------- +Processing a scene yield to a certain encoding of information at the location \ +where the scene was acquired, rendered, seen by the agent. + +By extension a place-code is either image based or ommatidium based. +The number of dimension of an ib-place-code is always 4, and of an +ob-place-code always 3. + +image based place-code (IBPC) + A place code derived from IBS. Each pixel is viewed in a direction + (elevation,azimuth) in a regular manner. + In that case the scene is a 4d numpy array + [elevation-index,azimuth-index,channel-index,component-index]. + +Omatidium based place-code (OBPC) + A place code derived from OBS, the viewing direction + do not need to be regularally spaced. + In that case the scene is a 3d numpy array + [ommatidia-index, channel-index,component-index]. + + +Abusing the terminology of a place-code, a scene can be a place-code. +Therefore ibs and obs have 4 and 3 dimension, respectively. + +Skyline +~~~~~~~ + + .. literalinclude:: examples/skyline.py + :lines: 16 + + .. plot:: overview/examples/skyline.py + + +Michelson-contrast +~~~~~~~~~~~~~~~~~~ + + + .. literalinclude:: examples/michelson_contrast.py + :lines: 16 + + .. plot:: overview/examples/michelson_contrast.py + + +Contrast weighted nearness +~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. literalinclude:: examples/contrast_weighted_nearness.py + :lines: 17-18 + + .. plot:: overview/examples/contrast_weighted_nearness.py + +Place code vectors +~~~~~~~~~~~~~~~~~~ + + .. literalinclude:: examples/pcv.py + :lines: 16-17 + + .. plot:: overview/examples/pcv.py + +Average place code vectors +~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. literalinclude:: examples/apcv.py + :lines: 16-17 + + .. plot:: overview/examples/apcv.py diff --git a/doc/source/overview/rendering.rst b/doc/source/overview/rendering.rst new file mode 100644 index 0000000..58a546b --- /dev/null +++ b/doc/source/overview/rendering.rst @@ -0,0 +1,81 @@ +Rendering +========= +""" +Navipy & blender +---------------- +What is blender? +~~~~~~~~~~~~~~~~ +Explain blender + +Create a world +~~~~~~~~~~~~~~ +Explain How to create env for navipy + +Using navipy in blender +~~~~~~~~~~~~~~~~~~~~~~~ +Blender comes with its own python installation. Thus, we need to \ +tell blender to use our virtualenv where the navigation toolbox \ +is installed. To do we need to import the os module + +.. literalinclude:: ../blender_run.py + :lines: 6 - 7 + +then activate the environment by using the following function: + +.. literalinclude:: ../blender_run.py + :lines: 13 - 18 + +here venv_path is the path to the virtual environment within which \ +navipy has been installed. + +Now, blender can import all modules used by the navigation toolbox. + +How to run python code with blender: +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +>>> blender path/to/world.blend --background --python path/to/code.py + +How to generate a database using blender +---------------------------------------- + +.. literalinclude:: examples/blenddemo_beesampling.py + :lines: 6 + +With the toolbox at disposition we just need to configure the \ +BeeSampling to render images on a regular 3D grid. + +.. literalinclude:: examples/blenddemo_beesampling.py + :lines: 9 + +.. literalinclude:: examples/blenddemo_beesampling.py + :lines: 12-19 + +If we want to use the distance to objects, we need to tell the \ +BeeSampling what is the maximum distance to objects in the environment.\ + Otherwise the distance can go until infinity, and since the image are \ +compressed in the database, all distance to object will be equal to \ +zero: + +.. literalinclude:: examples/blenddemo_beesampling.py + :lines: 23-24 + +Finally we can generate the database. + +.. literalinclude:: examples/blenddemo_beesampling.py + :lines: 28-29 + +Custom sampling +--------------- + + .. literalinclude:: examples/blenddemo_cyberbee.py + :lines: 5 + + With the toolbox at disposition we just need to configure the \ + Cyberbee to render images at desired positions. + + .. literalinclude:: examples/blenddemo_cyberbee.py + :lines: 8-13 + + To render a scene at a given positions we just have to do: + + .. literalinclude:: examples/blenddemo_cyberbee.py + :lines: 14-22 diff --git a/doc/source/processing.rst b/doc/source/processing.rst deleted file mode 100644 index 6322afd..0000000 --- a/doc/source/processing.rst +++ /dev/null @@ -1,4 +0,0 @@ -Processing a scene -================== - -.. automodule:: navipy.processing diff --git a/doc/source/references/brain.rst b/doc/source/references/brain.rst new file mode 100644 index 0000000..71c5a93 --- /dev/null +++ b/doc/source/references/brain.rst @@ -0,0 +1,4 @@ +Brain +----- + +.. autoclass:: navipy.Brain diff --git a/doc/source/references/comparing.rst b/doc/source/references/comparing.rst new file mode 100644 index 0000000..d3bd13f --- /dev/null +++ b/doc/source/references/comparing.rst @@ -0,0 +1,5 @@ +Comparing +--------- + +.. automodule:: navipy.comparing + :members: diff --git a/doc/source/references/database.rst b/doc/source/references/database.rst new file mode 100644 index 0000000..4b7cef6 --- /dev/null +++ b/doc/source/references/database.rst @@ -0,0 +1,5 @@ +Database +-------- + +.. automodule:: navipy.database + :members: diff --git a/doc/source/references/index.rst b/doc/source/references/index.rst new file mode 100644 index 0000000..b3fbe6c --- /dev/null +++ b/doc/source/references/index.rst @@ -0,0 +1,12 @@ +References +========== + +.. toctree:: + :maxdepth: 2 + + brain + sensors + processing + comparing + moving + database diff --git a/doc/source/references/moving.rst b/doc/source/references/moving.rst new file mode 100644 index 0000000..d484c30 --- /dev/null +++ b/doc/source/references/moving.rst @@ -0,0 +1,10 @@ +Moving +------ + +.. automodule:: navipy.moving + +Agents +~~~~~~ + +.. automodule:: navipy.moving.agent + :members: diff --git a/doc/source/references/processing.rst b/doc/source/references/processing.rst new file mode 100644 index 0000000..6f3291b --- /dev/null +++ b/doc/source/references/processing.rst @@ -0,0 +1,10 @@ +Processing +---------- + +.. automodule:: navipy.processing + +Place code +~~~~~~~~~~ + +.. automodule:: navipy.processing.pcode + :members: diff --git a/doc/source/references/sensors.rst b/doc/source/references/sensors.rst new file mode 100644 index 0000000..08c72fd --- /dev/null +++ b/doc/source/references/sensors.rst @@ -0,0 +1,10 @@ +Sensors +------- + +.. automodule:: navipy.sensors + +Renderer +~~~~~~~~ + +.. automodule:: navipy.sensors.renderer + :members: diff --git a/doc/source/rendering.rst b/doc/source/rendering.rst deleted file mode 100644 index 2d70aad..0000000 --- a/doc/source/rendering.rst +++ /dev/null @@ -1,3 +0,0 @@ -Rendering -========= -.. automodule:: navipy.rendering diff --git a/doc/source/example/tutorials/asv_homing_graph.py b/doc/source/tutorials/examples/asv_homing_graph.py similarity index 100% rename from doc/source/example/tutorials/asv_homing_graph.py rename to doc/source/tutorials/examples/asv_homing_graph.py diff --git a/doc/source/example/tutorials/asv_homing_grid.py b/doc/source/tutorials/examples/asv_homing_grid.py similarity index 100% rename from doc/source/example/tutorials/asv_homing_grid.py rename to doc/source/tutorials/examples/asv_homing_grid.py diff --git a/doc/source/tutorials.rst b/doc/source/tutorials/index.rst similarity index 83% rename from doc/source/tutorials.rst rename to doc/source/tutorials/index.rst index 78c50b6..2d4fe5e 100644 --- a/doc/source/tutorials.rst +++ b/doc/source/tutorials/index.rst @@ -24,7 +24,7 @@ the velocity of the agent. Our agent needs to have a function to convert its current state to a motion. \ This function, velocity, can be added as follow: -.. literalinclude:: example/tutorials/asv_homing_grid.py +.. literalinclude:: examples/asv_homing_grid.py :lines: 12-30 On a grid @@ -33,33 +33,33 @@ On a grid By restricting the agent motion on a grid, we can used a database containing \ images rendered at pre defined location (the grid nodes). -.. literalinclude:: example/tutorials/asv_homing_grid.py - :lines: 35 +.. literalinclude:: examples/asv_homing_grid.py + :lines: 36 And initialise the senses of our virtual agent -.. literalinclude:: example/tutorials/asv_homing_grid.py - :lines: 36 +.. literalinclude:: examples/asv_homing_grid.py + :lines: 37 Now we have to initialise an agent moving on a grid (i.e. a GridAgent) -.. literalinclude:: example/tutorials/asv_homing_grid.py - :lines: 38 +.. literalinclude:: examples/asv_homing_grid.py + :lines: 39 at an initial position -.. literalinclude:: example/tutorials/asv_homing_grid.py - :lines: 40-43 +.. literalinclude:: examples/asv_homing_grid.py + :lines: 42-44 a mode of motion corresponding to the grid used in the database -.. literalinclude:: example/tutorials/asv_homing_grid.py - :lines: 36-50 +.. literalinclude:: examples/asv_homing_grid.py + :lines: 47-51 Finally our agent is ready to fly for a number of step or until its velocity is null. -.. literalinclude:: example/tutorials/asv_homing_grid.py - :lines: 53-54 +.. literalinclude:: examples/asv_homing_grid.py + :lines: 54-55 In close loop ~~~~~~~~~~~~~ diff --git a/navipy/comparing/__init__.py b/navipy/comparing/__init__.py index e33ae90..934ef27 100644 --- a/navipy/comparing/__init__.py +++ b/navipy/comparing/__init__.py @@ -1,7 +1,5 @@ """ -The Place comparator list different methods to -compare a current place to a memorised place or -memorised places. +Comparing """ import numpy as np from navipy.processing.tools import is_ibpc, is_obpc @@ -73,10 +71,7 @@ the current and memorised place code. ..note: assume that the image is periodic along the x axis (the left-right axis) - .. literalinclude:: example/comparing/ridf.py - :lines: 4,20 - .. plot:: example/comparing/ridf.py """ if not is_ibpc(current): # and not is_obpc(current): raise TypeError('The current and memory place code\ diff --git a/navipy/database/__init__.py b/navipy/database/__init__.py index a31f25f..1a3d7b6 100644 --- a/navipy/database/__init__.py +++ b/navipy/database/__init__.py @@ -1,65 +1,4 @@ """ -Database are generated by the rendering module, and contains all \ -images and there corresponding position-orientations. - -* position_orientation: containing all position and orientation of where \ -images were rendered. The position-orientation is described by \ -['x','y','z','alpha_0','alpha_1','alpha_2'] -* image: containing all images ever rendered. Each channel of each image \ -are normalised, so to use the full coding range. -* normalisation: the normalisation constantes - - -How to load a database ----------------------- - -.. literalinclude:: example/database/get_posorients.py - :lines: 8 - -How to load all position-orientation ------------------------------------- - -The database contains all position-orientation \ -at which an image as been rendered. In certain \ -situation, it may be usefull to know all \ -position-orientation in the database. More technically \ -speaking, loading the full table of position-orientaiton. - -.. literalinclude:: example/database/get_posorients.py - :lines: 9-10 - -.. plot:: example/database/get_posorients.py - -How to load an image --------------------- - -The database contains images which can be processed differently \ -depending on the navigation strategy beeing used. - -Images are at given position-orientations. To load an image \ -the position-orientation can be given. The DataBaseLoader will \ -look if this position-orientation has been rendered. If it is \ -the case, the image will be returned. - -.. literalinclude:: example/database/load_image_posorient.py - :lines: 14-23 - -.. plot:: example/database/load_image_posorient.py - -However, looking in the database if an image has already been \ -rendered at a given position-orientation can cost time. To speed up \ -certain calculation, image can instead be access by row number. \ -Indeed each position-orientation can be identified by a unique row \ -number. This number is consistant through the entire database. Thus, \ -an image can be loaded by providing the row number. - -.. literalinclude:: example/database/load_image_rowid.py - :lines: 13-15 - -.. plot:: example/database/load_image_rowid.py - - -.. todo: channels as part of database """ diff --git a/navipy/moving/__init__.py b/navipy/moving/__init__.py index 9fddbbf..7a199eb 100644 --- a/navipy/moving/__init__.py +++ b/navipy/moving/__init__.py @@ -5,7 +5,7 @@ A standard method to move an agent is to update: 1. update the sensory information at the current agent location :math:`x` 2. deduce the agent motion :math:`vdt` from this information -3. displace the agent by motion ( :math:`x \rightarrow x + vdt`) +3. displace the agent by motion ( :math:`x\\rightarrow x + vdt`) The use of a close loop model including visual rendering is \ diff --git a/navipy/moving/agent.py b/navipy/moving/agent.py index c03eb97..9036c1e 100644 --- a/navipy/moving/agent.py +++ b/navipy/moving/agent.py @@ -1,13 +1,24 @@ """ -+----------------+--------------+-------------+ -|Agent class |Type of agent | Rendering + -+================+==============+=============+ -|:CyberBeeAgent: |Close loop |Online | -+----------------+ |-------------+ -|:GraphAgent: | |Pre-rendered | -+----------------+--------------+ | -|:GridAgent: +Open loop | | -+----------------+--------------+-------------+ ++-------------------------------------------+\ +--------------+-------------+ +|Agent class |\ +Type of agent | Rendering | ++===========================================+\ +==============+=============+ +|:class:`navipy.moving.agent.CyberBeeAgent` |\ +Close loop |Online | ++-------------------------------------------+\ + +-------------+ +|:class:`navipy.moving.agent.GraphAgent` |\ + |Pre-rendered | ++-------------------------------------------+\ +--------------+ + +|:class:`navipy.moving.agent.GridAgent` |\ +Open loop | | ++-------------------------------------------+\ +--------------+-------------+ + + """ import numpy as np import pandas as pd @@ -122,7 +133,6 @@ class CyberBeeAgent(AbstractAgent): CyberBeeAgent is a close loop agent and need to be run within blender \ (see :doc:`rendering`). - bla """ def __init__(self, brain): diff --git a/navipy/processing/__init__.py b/navipy/processing/__init__.py index 4729dfb..6af00b0 100644 --- a/navipy/processing/__init__.py +++ b/navipy/processing/__init__.py @@ -1,46 +1,4 @@ """ -An agent comes equipped with a battery of sensors, such as a camera \ -depth estimation sensors, compass, and odometer. Here, we focus on the \ -the processing of retino-topic information provided by a camera and a \ -depth estimation sensor. This retino-topic information is refer as a scene. - -image based scene (IBS) - A classical image. Each pixel is viewed in a direction - (elevation,azimuth) in a regular manner. - In that case the scene is a 4d numpy array - [elevation-index,azimuth-index,channel-index,1]. - -Omatidium based scene (OBS) - In an ommatidia based scene, the viewing direction - do not need to be regularally spaced. - In that case the scene is a 3d numpy array - [ommatidia-index, channel-index,1]. - -Place code ----------- -Processing a scene yield to a certain encoding of information at the location \ -where the scene was acquired, rendered, seen by the agent. - -By extension a place-code is either image based or ommatidium based. -The number of dimension of an ib-place-code is always 4, and of an -ob-place-code always 3. - -image based place-code (IBPC) - A place code derived from IBS. Each pixel is viewed in a direction - (elevation,azimuth) in a regular manner. - In that case the scene is a 4d numpy array - [elevation-index,azimuth-index,channel-index,component-index]. - -Omatidium based place-code (OBPC) - A place code derived from OBS, the viewing direction - do not need to be regularally spaced. - In that case the scene is a 3d numpy array - [ommatidia-index, channel-index,component-index]. - - -Abusing the terminology of a place-code, a scene can be a place-code. -Therefore ibs and obs have 4 and 3 dimension, respectively. - Skyline ~~~~~~~ .. autofunction:: navipy.processing.pcode.skyline @@ -61,10 +19,4 @@ Average place-code vector ~~~~~~~~~~~~~~~~~~~~~~~~~ .. autofunction:: navipy.processing.pcode.apcv -Motion code ------------ -Optic flow -~~~~~~~~~~ -.. autofunction:: navipy.processing.mcode.optic_flow - """ diff --git a/navipy/processing/pcode.py b/navipy/processing/pcode.py index b637f19..8a6d038 100644 --- a/navipy/processing/pcode.py +++ b/navipy/processing/pcode.py @@ -20,11 +20,6 @@ def skyline(scene): :returns: the skyline [1,azimuth,channel,1] :rtype: np.ndarray - .. literalinclude:: example/processing/skyline.py - :lines: 16 - - .. plot:: example/processing/skyline.py - """ if not is_ibpc(scene): raise TypeError('scene should be image based to compute a skyline') @@ -49,11 +44,6 @@ and minimum of the local image intensity :returns: the michelson-contrast :rtype: np.ndarray - .. literalinclude:: example/processing/michelson_contrast.py - :lines: 16 - - .. plot:: example/processing/michelson_contrast.py - """ check_scene(scene) if not is_ibpc(scene): @@ -88,11 +78,6 @@ def contrast_weighted_nearness(scene, contrast_size=3, distance_channel=3): and minimum of the local image intensity in the michelson-contrast. :param distance_channel: the index of the distance-channel. - .. literalinclude:: example/processing/contrast_weighted_nearness.py - :lines: 17-18 - - .. plot:: example/processing/contrast_weighted_nearness.py - """ check_scene(scene) if not isinstance(contrast_size, int): @@ -121,11 +106,6 @@ def pcv(place_code, viewing_directions): :returns: the place code vectors in cartesian coordinates :rtype: (np.ndarray) - .. literalinclude:: example/processing/pcv.py - :lines: 16-17 - - .. plot:: example/processing/pcv.py - """ # print("place code shape",place_code.shape) if is_ibpc(place_code): @@ -171,11 +151,6 @@ def apcv(place_code, viewing_directions): :returns: the average place-code vector :rtype: (np.ndarray) - .. literalinclude:: example/processing/apcv.py - :lines: 16-17 - - .. plot:: example/processing/apcv.py - """ check_scene(place_code) check_viewing_direction(viewing_directions) diff --git a/navipy/sensors/bee_sampling.py b/navipy/sensors/bee_sampling.py index 8a9d5f1..3c1472b 100644 --- a/navipy/sensors/bee_sampling.py +++ b/navipy/sensors/bee_sampling.py @@ -1,29 +1,5 @@ """ -.. literalinclude:: example/rendering/blenddemo_beesampling.py - :lines: 6 - -With the toolbox at disposition we just need to configure the \ -BeeSampling to render images on a regular 3D grid. - -.. literalinclude:: example/rendering/blenddemo_beesampling.py - :lines: 9 - -.. literalinclude:: example/rendering/blenddemo_beesampling.py - :lines: 12-19 - -If we want to use the distance to objects, we need to tell the \ -BeeSampling what is the maximum distance to objects in the environment.\ - Otherwise the distance can go until infinity, and since the image are \ -compressed in the database, all distance to object will be equal to \ -zero: - -.. literalinclude:: example/rendering/blenddemo_beesampling.py - :lines: 23-24 - -Finally we can generate the database. - -.. literalinclude:: example/rendering/blenddemo_beesampling.py - :lines: 28-29 +Bee sampler / database creator """ import warnings try: diff --git a/navipy/sensors/renderer.py b/navipy/sensors/renderer.py index df6bafe..af791f7 100644 --- a/navipy/sensors/renderer.py +++ b/navipy/sensors/renderer.py @@ -1,52 +1,5 @@ """ -Navipy & blender ----------------- -What is blender? -~~~~~~~~~~~~~~~~ -Explain blender - -Create a world -~~~~~~~~~~~~~~ -Explain How to create env for navipy - -Using navipy in blender -~~~~~~~~~~~~~~~~~~~~~~~ -Blender comes with its own python installation. Thus, we need to \ -tell blender to use our virtualenv where the navigation toolbox \ -is installed. To do we need to import the os module - -.. literalinclude:: blender_run.py - :lines: 6 - 7 - -then activate the environment by using the following function: - -.. literalinclude:: blender_run.py - :lines: 13 - 18 - -here venv_path is the path to the virtual environment within which \ -navipy has been installed. - -Now, blender can import all modules used by the navigation toolbox. - -How to run python code with blender: -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ->>> blender path/to/world.blend --background --python path/to/code.py - -How to generate a database using blender ----------------------------------------- -.. automodule:: navipy.sensors.bee_sampling - -Custom sampling ---------------- -.. autoclass:: navipy.sensors.renderer.BlenderRender - -Rendering classes ------------------ -.. autoclass:: navipy.sensors.bee_sampling.BeeSampling - :members: - -.. autoclass:: navipy.sensors.renderer.BlenderRender - :members: +Renderer """ import warnings try: @@ -70,20 +23,6 @@ class BlenderRender(): The Bee eye is a panoramic camera with equirectangular projection The light rays attaining the eyes are filtered with a gaussian. - .. literalinclude:: example/rendering/blenddemo_cyberbee.py - :lines: 5 - - With the toolbox at disposition we just need to configure the \ - Cyberbee to render images at desired positions. - - .. literalinclude:: example/rendering/blenddemo_cyberbee.py - :lines: 8-13 - - To render a scene at a given positions we just have to do: - - .. literalinclude:: example/rendering/blenddemo_cyberbee.py - :lines: 14-22 - """ def __init__(self): -- GitLab