Skip to content
Snippets Groups Projects
Commit 87ee8685 authored by Olivier Bertrand's avatar Olivier Bertrand
Browse files

Move 4snapshots code into processing and comparing and adapt it to toolbox

parent 6961ea51
No related branches found
No related tags found
No related merge requests found
......@@ -2,7 +2,9 @@
Comparing
"""
import numpy as np
import pandas as pd
from navipy.scene import is_ibpc, is_obpc, check_scene
from navipy.scene import __spherical_indeces__
def simple_imagediff(current, memory):
......@@ -145,3 +147,51 @@ The intput parameters are the following:
def gradient(current, memory):
return 0
def weighted_irdf(current,
mem_scenes,
viewing_directions):
"""Weighted image rotational difference
Return an homing vector direction based on an \
Image rotational difference weighted between \
some reference snapshots
:param current: actual scene, np.array
:param mem_scenes: list of memorised of views
:returns: dx, dy, dz, dyaw, dpitch, droll.
:rtypes: pd.Series
"""
if not isinstance(mem_scenes, (list, tuple)):
msg = 'mem_scenes should be of type'
msg += 'list or tuple and not {}'
msg = msg.format(type(mem_scenes))
raise TypeError(mem_scenes)
for scene in mem_scenes:
check_scene(scene)
check_scene(current)
# A dataframe to store
# the minimum of the irdf and the angle
# at which the minimum takes place
df_svp = pd.DataFrame(index=range(0, len(mem_scenes)),
columns=['irdf', 'angle'])
for i, scene in enumerate(mem_scenes):
irdf = rot_imagediff(current, scene)
idx = np.argmin(irdf[..., 0])
value = np.min(irdf[..., 0])
df_svp.loc[i, 'angle'] = \
viewing_directions[idx,
__spherical_indeces__['azimuth']]
df_svp.loc[i, 'irdf'] = value
min_irdf = df_svp.irdf.min()
# Take the best svp irdf and make the ratio
# for each others that gives the weighted irdf
w_svp = min_irdf / df_svp.irdf
# Weighting of the vector direction based on circular statistics
j = complex(0, 1)
H = w_svp * np.exp(df_svp.angle * j)
return np.sum(H)
......@@ -2,182 +2,70 @@ import numpy as np
import pandas as pd
def nposorient_around_ref(mydb, position_df, ref_pos, nb_snapshot, radius, blender_view):
'''Return set of views around and oriented towards memorized location
:param mydb: Database environment
:param position_df: dataframe with the positions of the grid
:param ref_pos: df with x, y and z position of the reference snapshot
:param nb_snapshot: number of wanted set of views (multiple of 2)
:param radius: distance from memorized location to take snapshots
:param blender_view: viewing axis camera id y=90, if x=0
:returns: list of reoriented image array, snaphots positions
:rtypes: array of np.array, pd.DataFrame
'''
def ird(ref_svp,scene):
"""
compute the rotational image difference between a reference memorized snapshot (SVP) and actual position
svp_all = pd.DataFrame(columns=['x', 'y', 'z', 'frame'])
nsvp_image = []
: scene
: ref_svp: snapshot memorized reference (denormalized image)
: return: a Serie with angle and image rotation difference value
: typer:pd.Series
"""
# angle of rotation
ang_deg = 360 / nb_snapshot
ang = np.deg2rad(ang_deg)
rms=pd.Series(index=['x','y','z','irdf','angle',])
# make the different view
angles=range(0,ref_svp.shape[1]+1,1)
for i in range(0, nb_snapshot):
# for this image do the rotation and found the minimum
x = ref_pos.x + radius * np.cos(ang * i)
y = ref_pos.y + radius * np.sin(ang * i)
z = ref_pos.z
image_rot=pd.Series(index=angles)
image_rot.name='irdf'
svp_frame = pd.Series(index=['frame', 'x', 'y', 'z'])
for ang in angles:
# rotation of the image for one degree angle
out = np.roll(scene,ang,1)
distance_arr = pd.Series(index=position_df.index)
# difference between view position and reference snapshot
diff_rot=out-ref_svp
for index, pos in position_df.dropna().iterrows():
distance = (pos.x - x)**2
distance += (pos.y - y)**2
distance += (pos.z - z)**2
# calculate Root Mean Square
distance_arr[index] = distance
diff_val=np.sqrt(np.mean(diff_rot[:,:,:3]**2))
svp_frame.frame = int(distance_arr.dropna().argmin())
svp_frame.x = position_df.x[svp_frame.frame]
svp_frame.y = position_df.y[svp_frame.frame]
svp_frame.z = position_df.z[svp_frame.frame]
image_rot[ang]=diff_val
rms['angle']=image_rot.argmin()
rms['irdf']=image_rot.min()
rms['x']=rms.irdf*np.cos(rms.angle)
rms['y']=rms.irdf*np.sin(rms.angle)
rms['z']=np.nan
svp_all.loc[i] = [svp_frame.x, svp_frame.y,
svp_frame.z, int(svp_frame.frame)]
return rms
reoriented_mem = []
rot = list()
for i, j in svp_all.iterrows():
ide = int(j.frame)
image = mydb.scene(rowid=ide)
def weighted_ird(scene,mem_scenes):
alpha = np.floor(np.rad2deg(np.arctan2(
(ref_pos.y - j.y), (ref_pos.x - j.x))))
'''Return an homing vector direction based on an Image rotational difference weighted between some reference snapshots
: param scene: actual scene , np.array
: param mem_scenes: list of set of views
: returns: dx,dy,dz,dyaw,dpitch,droll.
: rtypes : pd.Series
'''
df_svp= pd.DataFrame(index=range(0,len(mem_scenes)),columns=['irdf','angle'])
for i in range(0,len(mem_scenes)):
df_svp.loc[i]=ird(mem_scenes[i],scene)
best_svp=pd.Series(index=['svp_nb','irdf'])
best_svp.loc['svp_nb']=np.argmin(df_svp.irdf)
best_svp['irdf']=min(df_svp.irdf)
# Take the best svp irdf and make the ratio for each others that gives the weighted irdf
w_svp=pd.DataFrame(index=df_svp.index,columns=['w'])
for i in df_svp.index :
w=best_svp['irdf']/df_svp.irdf[i]
w_svp.loc[i,['w']]=w
# Weighting of the vector direction based on circular statistics
Heading=pd.Series(data=complex())
j=complex(0,1)
H=list()
for i in df_svp.index:
t=w_svp.loc[i]*np.exp(np.deg2rad(df_svp.loc[i,['angle']][0])*j)
H.append(t)
Heading=np.sum(H)
Hdeg=np.angle(Heading, deg=True)
move_vector=pd.Series(index=['dx','dy','dz','dyaw','dpitch','droll'])
move_vector['dx']=np.cos((Hdeg)*np.pi/180)
move_vector['dy']=np.sin((Hdeg)*np.pi/180)
move_vector['dz']=0
move_vector['dyaw','dpitch','droll']=[np.nan,np.nan,np.nan]
return move_vector
def nposorient_around_ref(mydb,position_df,ref_pos,nb_snapshot,radius,blender_view):
'''Return set of views around and oriented towards memorized location
:param mydb: Database environment
:param position_df: dataframe with the positions of the grid
:param ref_pos: df with x, y and z position of the reference snapshot
:param nb_snapshot: number of wanted set of views (multiple of 2)
:param radius: distance from memorized location to take snapshots
:param blender_view: viewing axis camera id y=90, if x=0
:returns: list of reoriented image array, snaphots positions
:rtypes: array of np.array, pd.DataFrame
'''
svp_all=pd.DataFrame(columns=['x','y','z','frame'])
nsvp_image=[]
# angle of rotation
ang_deg=360/nb_snapshot
ang=np.deg2rad(ang_deg)
# make the different view
for i in range(0,nb_snapshot):
x=ref_pos.x+radius*np.cos(ang*i)
y=ref_pos.y+radius*np.sin(ang*i)
z=ref_pos.z
svp_frame=pd.Series(index=['frame','x','y','z'])
distance_arr=pd.Series(index=position_df.index)
for index, pos in position_df.dropna().iterrows():
distance = (pos.x-x)**2
distance += (pos.y-y)**2
distance += (pos.z-z)**2
distance_arr[index]=distance
svp_frame.frame=int(distance_arr.dropna().argmin())
svp_frame.x=position_df.x[svp_frame.frame]
svp_frame.y=position_df.y[svp_frame.frame]
svp_frame.z=position_df.z[svp_frame.frame]
svp_all.loc[i]=[svp_frame.x,svp_frame.y,svp_frame.z,int(svp_frame.frame)]
reoriented_mem=[]
rot=list()
for i,j in svp_all.iterrows():
ide=int(j.frame)
image=mydb.scene(rowid=ide)
alpha=np.floor(np.rad2deg(np.arctan2((ref_pos.y-j.y),(ref_pos.x-j.x))))
alpha=alpha+blender_view##because y view on blender
rot.append(alpha)
alpha=int(alpha)
svp_reorient=np.roll(image,alpha,1)
reoriented_mem.append(svp_reorient)
return reoriented_mem,svp_all
alpha = alpha + blender_view # because y view on blender
rot.append(alpha)
alpha = int(alpha)
svp_reorient = np.roll(image, alpha, 1)
reoriented_mem.append(svp_reorient)
return reoriented_mem, svp_all
......@@ -2,6 +2,7 @@
place code derived from scene
"""
import numpy as np
import pandas as pd
from scipy.ndimage import maximum_filter, minimum_filter
from navipy.scene import __spherical_indeces__
from navipy.scene import __cartesian_indeces__
......@@ -133,7 +134,7 @@ def pcv(place_code, viewing_directions):
should be 1'.format(place_code.shape[component_dim]))
elevation = viewing_directions[..., __spherical_indeces__['elevation']]
azimuth = viewing_directions[..., __spherical_indeces__['azimuth']]
if (np.any(elevation < -np.pi/2) or np.any(elevation > np.pi/2)):
if (np.any(elevation < -np.pi / 2) or np.any(elevation > np.pi / 2)):
# if (np.any(elevation < -2*np.pi) or np.any(elevation > 2*np.pi)):
raise ValueError(" Elevation must be radians in range [-2*pi;2*pi]")
if (np.max(elevation) - np.min(elevation) > 2 * np.pi):
......@@ -184,3 +185,58 @@ def apcv(place_code, viewing_directions):
return (scaled_lv.sum(axis=0))[np.newaxis, ...]
else:
raise TypeError('place code is neither an ibpc nor obpc')
def nposorient_around_ref(mydb,
position_df,
ref_pos,
nb_snapshot,
radius,
blender_view):
"""Return set of views around and oriented towards memorized location
:param mydb: Database environment
:param position_df: dataframe with the positions of the grid
:param ref_pos: df with x, y and z position of the reference snapshot
:param nb_snapshot: number of wanted set of views (multiple of 2)
:param radius: distance from memorized location to take snapshots
:param blender_view: viewing axis camera id y=90, if x=0
:returns: list of reoriented image array, snaphots positions
:rtypes: array of np.array, pd.DataFrame
"""
svp_all = pd.DataFrame(columns=['x', 'y', 'z', 'frame'])
# angle of rotation
ang_deg = 360 / nb_snapshot
ang = np.deg2rad(ang_deg)
# make the different view
for i in range(0, nb_snapshot):
x = ref_pos.x + radius * np.cos(ang * i)
y = ref_pos.y + radius * np.sin(ang * i)
z = ref_pos.z
svp_frame = pd.Series(index=['frame', 'x', 'y', 'z'])
distance_arr = pd.Series(index=position_df.index)
for index, pos in position_df.dropna().iterrows():
distance = (pos.x - x)**2
distance += (pos.y - y)**2
distance += (pos.z - z)**2
distance_arr[index] = distance
svp_frame.frame = int(distance_arr.dropna().argmin())
svp_frame.x = position_df.x[svp_frame.frame]
svp_frame.y = position_df.y[svp_frame.frame]
svp_frame.z = position_df.z[svp_frame.frame]
svp_all.loc[i] = [svp_frame.x, svp_frame.y,
svp_frame.z, int(svp_frame.frame)]
reoriented_mem = []
rot = list()
for i, j in svp_all.iterrows():
ide = int(j.frame)
image = mydb.scene(rowid=ide)
alpha = np.floor(np.rad2deg(np.arctan2(
(ref_pos.y - j.y), (ref_pos.x - j.x))))
alpha = alpha + blender_view # because y view on blender
rot.append(alpha)
alpha = int(alpha)
svp_reorient = np.roll(image, alpha, 1)
reoriented_mem.append(svp_reorient)
return reoriented_mem, svp_all
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment