Skip to content
Snippets Groups Projects
Commit 687fb470 authored by Olivier Bertrand's avatar Olivier Bertrand
Browse files

Merge branch 'master' into OpticFlow

parents db486708 623a57ec
Branches
Tags
No related merge requests found
Showing
with 352 additions and 76 deletions
"""
Run a script within blender
"""
import os
import sys
import shutil
import inspect
import tempfile
import argparse
def activate_virtualenv(venv_path):
""" activate venv
Blender comes with its own python installation. Thus, we need to \
tell blender to use our virtualenv where the navigation toolbox \
is installed.
"""
if venv_path is None:
raise NameError('Python is not running within a virtualenv')
filepath = os.path.join(venv_path, 'bin', 'activate_this.py')
with open(filepath, 'r') as f:
exec(f.read(), dict(__file__=filepath))
def blender_version(pyversion):
""" check version
Blender comes with its own version of python, and should be
match the one used by navipy
"""
blendpyversion = sys.version_info[:3]
if blendpyversion != pyversion:
errormsg = 'Blender comes with its own version of python'
errormsg += ' (here: {}). To run without hard to debug issues, '
errormsg += ' the python version used by navipy (here {}) and '
errormsg += 'the python version used by blender should match'
errormsg = errormsg.format(blendpyversion, pyversion)
raise NameError(errormsg)
def parser_blendnavipy():
# Create command line options
parser = argparse.ArgumentParser()
arghelp = 'Path to the environment (.blend) in which your agent lives'
parser.add_argument('--blender-world',
type=str,
default=None,
help=arghelp)
arghelp = 'Path to your python script to be run in blender'
parser.add_argument('--python-script',
type=str,
default=None,
help=arghelp)
arghelp = 'Command to run blender\n'
arghelp += 'If not provided, the script will try to find the command'
arghelp += " by using: shutil.which('blender')"
parser.add_argument('--blender-command',
type=str,
default=None,
help=arghelp)
arghelp = 'To display some stuff \n'
arghelp += ' * -v print command \n'
arghelp += ' * -vv print also script'
parser.add_argument('-v', '--verbose',
action='count',
default=0,
help=arghelp)
return parser
def main():
# Find the name of the virtualenv, so that we can activate
# it in blender
venv_path = sys.base_prefix
# Find python version to be checked agains blender python version
pyver = sys.version_info[:3]
# encoding for temporary file
encoding = 'utf-8'
args = parser_blendnavipy().parse_args()
if args.blender_command is None:
# Find blender command to do a system call
args.blender_command = shutil.which('blender')
python_script = args.python_script
header = '""" Script generated by {}"""\n'.format(sys.argv[0])
with tempfile.NamedTemporaryFile() as tfile:
# Start of file
tfile.write(header.encode(encoding))
tfile.write('# check blender version\n'.encode(encoding))
tfile.write('import sys \n'.encode(encoding))
for line in inspect.getsourcelines(blender_version)[0]:
tfile.write(line.encode(encoding))
line = 'blender_version({})\n'.format(pyver)
tfile.write(line.encode(encoding))
tfile.write('# activate virtualenv within blender\n'.encode(encoding))
tfile.write('import os \n'.encode(encoding))
for line in inspect.getsourcelines(activate_virtualenv)[0]:
tfile.write(line.encode(encoding))
line = 'activate_virtualenv(\"{}\")\n'.format(venv_path)
tfile.write(line.encode(encoding))
tfile.write('# run simulation\n'.encode(encoding))
with open(python_script) as infile:
for line in infile:
tfile.write(line.encode(encoding))
tfile.write('print("I am done")\n'.encode(encoding))
tfile.seek(0)
# End of file
if args.verbose > 1:
print('Script to be run:')
print('=================')
print(tfile.read().decode(encoding))
tfile.seek(0)
command = '{} {} --background --python {}'.format(
args.blender_command,
args.blender_world,
tfile.name)
if args.verbose > 0:
print('Run blender with the following command')
print('======================================')
print('>>> ' + command)
os.system(command)
if __name__ == "__main__":
# execute only if run as a script
main()
Comparing
=========
.. automodule:: navipy.comparing
Place code
----------
Image diff
~~~~~~~~~~
.. autofunction:: navipy.comparing.simple_imagediff
Euclidian image diff
~~~~~~~~~~~~~~~~~~~~
.. autofunction:: navipy.comparing.imagediff
Rotational image difference function
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: navipy.comparing.rot_imagediff
Differential optic flow
~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: navipy.comparing.diff_optic_flow
Memory in networks
------------------
......@@ -41,7 +41,8 @@ extensions = ['matplotlib.sphinxext.only_directives',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode']
'sphinx.ext.viewcode',
'sphinxarg.ext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
......
"""
An example of how to use Multi agent
"""
......@@ -23,12 +23,9 @@ Content
:maxdepth: 1
gettingstarted
rendering
processing
comparing
moving
database
tutorials
overview/index
tutorials/index
references/index
Indices and tables
......
Moving
######
Overview
********
.. automodule:: navipy.moving
Close-loop agent
****************
Online rendering
================
.. autoclass:: navipy.moving.agent.CyberBeeAgent
:members:
Pre-rendered
============
.. autoclass:: navipy.moving.agent.GridAgent
:members:
Graph agent
***********
.. autoclass:: navipy.moving.agent.GraphAgent
:members:
Summary
*******
.. automodule:: navipy.moving.agent
Brain
=====
Every agent comes with a brain processing the about of \
senses or sensors for biological or technical agent, respectively.
The senses of agents in navipy are limited
to:
* 4d vision (brighness + depth)
The 4d vision sense is controlled by rendering module, either an \
online rendering or loaded from a database containing pre-rendered images.
For example to use pre-rendered images from a database:
.. literalinclude:: examples/apcv.py
:lines: 10-11
Then the brain can be updated at a new position orientation:
.. literalinclude:: examples/apcv.py
:lines: 15
Building your own brain
-----------------------
The Brain class is an abstract Brain, such that it can not control an agent. \
To control, an agent, the Brain should have a function called velocity.
For example, an stationary agent should always return a null velocity.
.. literalinclude:: examples/static_brain.py
:lines: 3,9-17
An agent using an average skyline homing vector, could be build as follow
.. literalinclude:: examples/asv_brain.py
:lines: 4-5,11-36
Comparing
=========
Rotational image difference function
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. literalinclude:: examples/ridf.py
:lines: 4,20
.. plot:: overview/examples/ridf.py
Database
========
Database are generated by the rendering module, and contains all \
images and there corresponding position-orientations.
* position_orientation: containing all position and orientation of where \
images were rendered. The position-orientation is described by \
['x','y','z','alpha_0','alpha_1','alpha_2']
* image: containing all images ever rendered. Each channel of each image \
are normalised, so to use the full coding range.
* normalisation: the normalisation constantes
How to load a database
----------------------
.. literalinclude:: examples/get_posorients.py
:lines: 8
How to load all position-orientation
------------------------------------
The database contains all position-orientation \
at which an image as been rendered. In certain \
situation, it may be usefull to know all \
position-orientation in the database. More technically \
speaking, loading the full table of position-orientaiton.
.. literalinclude:: examples/get_posorients.py
:lines: 9-10
.. ipython:: examples/get_posorients.py
:verbatim:
How to load an image
--------------------
The database contains images which can be processed differently \
depending on the navigation strategy beeing used.
Images are at given position-orientations. To load an image \
the position-orientation can be given. The DataBaseLoader will \
look if this position-orientation has been rendered. If it is \
the case, the image will be returned.
.. literalinclude:: examples/load_image_posorient.py
:lines: 14-23
.. plot:: overview/examples/load_image_posorient.py
However, looking in the database if an image has already been \
rendered at a given position-orientation can cost time. To speed up \
certain calculation, image can instead be access by row number. \
Indeed each position-orientation can be identified by a unique row \
number. This number is consistant through the entire database. Thus, \
an image can be loaded by providing the row number.
.. literalinclude:: examples/load_image_rowid.py
:lines: 13-15
.. plot:: overview/examples/load_image_rowid.py
.. todo: channels as part of database
import matplotlib.pyplot as plt
from navipy.database import DataBaseLoad
import navipy.processing as processing
from navipy.sensors import Senses
from navipy.processing import pcode
from navipy.processing import tools
from navipy.processing import constants
from navipy import Brain
import pkg_resources
# 1) Connect to the database
mydb_filename = pkg_resources.resource_filename(
'navipy', 'resources/database.db')
mydb = DataBaseLoad(mydb_filename)
mysenses = Senses(renderer=mydb)
mybrain = Brain(renderer=mydb)
# 2) Define the position-orinetation at which
# we want the image
posorient = mydb.posorients.loc[12, :]
mysenses.update(posorient)
my_apcv = processing.pcode.apcv(mysenses.vision.scene,
mysenses.vision.viewing_directions)
mybrain.update(posorient)
my_apcv = pcode.apcv(mybrain.vision.scene,
mybrain.vision.viewing_directions)
my_apcv_sph = processing.tools.cartesian_to_spherical(x=my_apcv[..., 0],
y=my_apcv[..., 1],
z=my_apcv[..., 2])
my_apcv_sph = tools.cartesian_to_spherical(x=my_apcv[..., 0],
y=my_apcv[..., 1],
z=my_apcv[..., 2])
elevation = mydb.viewing_directions[...,
processing.constants.__spherical_indeces__[
constants.__spherical_indeces__[
'elevation']]
azimuth = mydb.viewing_directions[...,
processing.constants.__spherical_indeces__[
constants.__spherical_indeces__[
'azimuth']]
......
import numpy as np
import pandas as pd
from navipy.database import DataBaseLoad
from navipy.processing.pcode import apcv, skyline
from navipy import Brain
import pkg_resources
# 0) Define a class heriting from Brain
class ASVBrain(Brain):
def __init__(self, renderer=None,
channel=0, goalpos=[0, 0]):
Brain.__init__(self, renderer=renderer)
# Init memory
locid = self.posorients[(mydb.posorients.x == goalpos[0])
& (mydb.posorients.y == goalpos[1])].index[0]
posorient = mydb.posorients.loc[locid, :]
self.update(posorient)
self.channel = channel
self.memory = self.asv()
def asv(self):
skyl = skyline(self.vision.scene)
vector = apcv(skyl,
self.vision.viewing_directions)
return vector[..., self.channel, :]
def velocity(self):
homing_vector = self.memory - self.asv()
homing_vector = np.squeeze(homing_vector)
velocity = pd.Series(data=0,
index=['dx', 'dy', 'dz',
'dalpha_0', 'dalpha_1', 'dalpha_2'])
velocity[['dx', 'dy', 'dz']] = homing_vector
return velocity
# 1) Connect to the database
mydb_filename = pkg_resources.resource_filename(
'navipy', 'resources/database.db')
mydb = DataBaseLoad(mydb_filename)
mybrain = ASVBrain(renderer=mydb)
# 2) Define the position-orinetation at which
# we want the image
posorient = mydb.posorients.loc[12, :]
mybrain.update(posorient)
mybrain.velocity()
......@@ -3,6 +3,7 @@ Example on how to use the rendering module
"""
import tempfile
import numpy as np
import os
from navipy.sensors.bee_sampling import BeeSampling
from navipy.sensors.renderer import BlenderRender
......@@ -25,6 +26,7 @@ bee_samp.create_sampling_grid(
# will be set to 0
world_dim = 50 * np.sqrt(2)
bee_samp.world_dim = world_dim
# Rendering in a tmp folder
# Rendering in a temporary folder.
with tempfile.TemporaryDirectory() as folder:
bee_samp.render(folder + '/database.db')
filename_db = os.path.join(folder, 'database.db')
bee_samp.render(filename_db)
......@@ -4,13 +4,15 @@ from matplotlib.colors import hsv_to_rgb, rgb_to_hsv
import matplotlib.pyplot as plt
from navipy.sensors.renderer import BlenderRender
# with tempfile.TemporaryDirectory() as folder:
# Configure the rendering module
cyberbee = BlenderRender()
cyberbee.cycle_samples = 50
cyberbee.camera_rotation_mode = 'XYZ'
cyberbee.camera_fov = [[-90, 90], [-180, 180]]
cyberbee.gaussian_width = 1.5
cyberbee.camera_resolution = [360, 180]
# Set the position at which to render an image/distance
posorient = pd.Series(index=['x', 'y', 'z',
'alpha_0', 'alpha_1', 'alpha_2'])
posorient.x = 0
......@@ -21,7 +23,7 @@ posorient.alpha_1 = 0
posorient.alpha_2 = 0
scene = cyberbee.scene(posorient)
# plot
f, axarr = plt.subplots(2, 1, sharex=True)
to_plot_im = scene[:, :, :3]
......
import matplotlib.pyplot as plt
from navipy.database import DataBaseLoad
import navipy.processing as processing
from navipy.sensors import Senses
from navipy import Brain
import pkg_resources
......@@ -9,13 +9,13 @@ import pkg_resources
mydb_filename = pkg_resources.resource_filename(
'navipy', 'resources/database.db')
mydb = DataBaseLoad(mydb_filename)
mysenses = Senses(renderer=mydb)
mybrain = Brain(renderer=mydb)
# 2) Define the position-orinetation at which
# we want the image
posorient = mydb.posorients.loc[12, :]
mysenses.update(posorient)
mybrain.update(posorient)
my_contrast = processing.pcode.contrast_weighted_nearness(
mysenses.vision.scene)
mybrain.vision.scene)
f, axarr = plt.subplots(2, 2, figsize=(15, 8))
axarr = axarr.flatten()
......
import matplotlib.pyplot as plt
from navipy.database import DataBaseLoad
import navipy.processing as processing
from navipy.sensors import Senses
from navipy import Brain
import pkg_resources
# 1) Connect to the database
mydb_filename = pkg_resources.resource_filename(
'navipy', 'resources/database.db')
mydb = DataBaseLoad(mydb_filename)
mysenses = Senses(renderer=mydb)
mybrain = Brain(renderer=mydb)
# 2) Define the position-orinetation at which
# we want the image
posorient = mydb.posorients.loc[12, :]
mysenses.update(posorient)
my_contrast = processing.pcode.michelson_contrast(mysenses.vision.scene)
mybrain.update(posorient)
my_contrast = processing.pcode.michelson_contrast(mybrain.vision.scene)
f, axarr = plt.subplots(2, 2, figsize=(15, 8))
axarr = axarr.flatten()
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment