prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# Copyright 2019 Image Analysis Lab, German Center for Neurodegenerative Diseases (DZNE), Bonn
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# IMPORTS
import optparse
import sys
import nibabel.freesurfer.io as fs
import numpy as np
import math
from lapy.DiffGeo import tria_mean_curvature_flow
from lapy.TriaMesh import TriaMesh
from lapy.read_geometry import read_geometry
from lapy.Solver import Solver
HELPTEXT = """
Script to compute ShapeDNA using linear FEM matrices.
After correcting sign flips, embeds a surface mesh into the spectral domain,
then projects it onto a unit sphere. This is scaled and rotated to match the
atlas used for FreeSurfer surface registion.
USAGE:
spherically_project -i <input_surface> -o <output_surface>
References:
<NAME> et al. Discrete Laplace-Beltrami Operators for Shape Analysis and
Segmentation. Computers & Graphics 33(3):381-390, 2009
Martin Reuter et al. Laplace-Beltrami spectra as "Shape-DNA" of surfaces and
solids Computer-Aided Design 38(4):342-366, 2006
<NAME> at al. High-resolution inter-subject averaging and a coordinate
system for the cortical surface. Human Brain Mapping 8:272-284, 1999
Dependencies:
Python 3.5
Scipy 0.10 or later to solve the generalized eigenvalue problem.
http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
Numpy
http://www.numpy.org
Nibabel to read and write FreeSurfer surface meshes
http://nipy.org/nibabel/
Original Author: <NAME>
Date: Jan-18-2016
"""
h_input = 'path to input surface'
h_output = 'path to ouput surface, spherically projected'
def options_parse():
"""
Command line option parser for spherically_project.py
"""
parser = optparse.OptionParser(version='$Id: spherically_project,v 1.1 2017/01/30 20:42:08 ltirrell Exp $',
usage=HELPTEXT)
parser.add_option('--input', '-i', dest='input_surf', help=h_input)
parser.add_option('--output', '-o', dest='output_surf', help=h_output)
(options, args) = parser.parse_args()
if options.input_surf is None or options.output_surf is None:
sys.exit('ERROR: Please specify input and output surfaces')
return options
def tria_spherical_project(tria, flow_iter=3, debug=False):
"""
spherical(tria) computes the first three non-constant eigenfunctions
and then projects the spectral embedding onto a sphere. This works
when the first functions have a single closed zero level set,
splitting the mesh into two domains each. Depending on the original
shape triangles could get inverted. We also flip the functions
according to the axes that they are aligned with for the special
case of brain surfaces in FreeSurfer coordinates.
Inputs: tria : TriaMesh
flow_iter : mean curv flow iterations (3 should be enough)
Outputs: tria : TriaMesh
"""
if not tria.is_closed():
raise ValueError('Error: Can only project closed meshes!')
# sub-function to compute flipped area of trias where normal
# points towards origin, meaningful for the sphere, centered at zero
def get_flipped_area(tria):
v1 = tria.v[tria.t[:, 0], :]
v2 = tria.v[tria.t[:, 1], :]
v3 = tria.v[tria.t[:, 2], :]
v2mv1 = v2 - v1
v3mv1 = v3 - v1
cr = np.cross(v2mv1, v3mv1)
spatvol = np.sum(v1 * cr, axis=1)
areas = 0.5 * np.sqrt(np.sum(cr * cr, axis=1))
area = np.sum(areas[np.where(spatvol < 0)])
return area
fem = Solver(tria, lump=False)
evals, evecs = fem.eigs(k=4)
if debug:
data = dict()
data['Eigenvalues'] = evals
data['Eigenvectors'] = evecs
data['Creator'] = 'spherically_project.py'
data['Refine'] = 0
data['Degree'] = 1
data['Dimension'] = 2
data['Elements'] = tria.t.shape[0]
data['DoF'] = evecs.shape[0]
data['NumEW'] = 4
from lapy.FuncIO import export_ev
export_ev(data, 'debug.ev')
# flip efuncs to align to coordinates consistently
ev1 = evecs[:, 1]
# ev1maxi = np.argmax(ev1)
# ev1mini = np.argmin(ev1)
# cmax = v[ev1maxi,:]
# cmin = v[ev1mini,:]
cmax1 = np.mean(tria.v[ev1 > 0.5 * np.max(ev1), :], 0)
cmin1 = np.mean(tria.v[ev1 < 0.5 * np.min(ev1), :], 0)
ev2 = evecs[:, 2]
cmax2 = np.mean(tria.v[ev2 > 0.5 * np.max(ev2), :], 0)
cmin2 = np.mean(tria.v[ev2 < 0.5 * np.min(ev2), :], 0)
ev3 = evecs[:, 3]
cmax3 = np.mean(tria.v[ev3 > 0.5 * np.max(ev3), :], 0)
cmin3 = np.mean(tria.v[ev3 < 0.5 * np.min(ev3), :], 0)
# we trust ev 1 goes from front to back
l11 = abs(cmax1[1] - cmin1[1])
l21 = abs(cmax2[1] - cmin2[1])
l31 = abs(cmax3[1] - cmin3[1])
if l11 < l21 or l11 < l31:
print("ERROR: direction 1 should be (anterior -posterior) but is not!")
print(" debug info: {} {} {} ".format(l11, l21, l31))
# sys.exit(1)
raise ValueError('Direction 1 should be anterior - posterior')
# only flip direction if necessary
print("ev1 min: {} max {} ".format(cmin1, cmax1))
# axis 1 = y is aligned with this function (for brains in FS space)
v1 = cmax1 - cmin1
if cmax1[1] < cmin1[1]:
ev1 = -1 * ev1
print("inverting direction 1 (anterior - posterior)")
l1 = abs(cmax1[1] - cmin1[1])
# for ev2 and ev3 there could be also a swap of the two
l22 = abs(cmax2[2] - cmin2[2])
l32 = abs(cmax3[2] - cmin3[2])
# usually ev2 should be superior inferior, if ev3 is better in that direction, swap
if l22 < l32:
print("swapping direction 2 and 3")
ev2, ev3 = ev3, ev2
cmax2, cmax3 = cmax3, cmax2
cmin2, cmin3 = cmin3, cmin2
l23 = abs(cmax2[0] - cmin2[0])
l33 = abs(cmax3[0] - cmin3[0])
if l33 < l23:
print("WARNING: direction 3 wants to swap with 2, but cannot")
print("ev2 min: {} max {} ".format(cmin2, cmax2))
# axis 2 = z is aligned with this function (for brains in FS space)
v2 = cmax2 - cmin2
if cmax2[2] < cmin2[2]:
ev2 = -1 * ev2
print("inverting direction 2 (superior - inferior)")
l2 = abs(cmax2[2] - cmin2[2])
print("ev3 min: {} max {} ".format(cmin3, cmax3))
# axis 0 = x is aligned with this function (for brains in FS space)
v3 = cmax3 - cmin3
if cmax3[0] < cmin3[0]:
ev3 = -1 * ev3
print("inverting direction 3 (right - left)")
l3 = abs(cmax3[0] - cmin3[0])
v1 = v1 * (1.0 / np.sqrt(np.sum(v1 * v1)))
v2 = v2 * (1.0 / np.sqrt(np.sum(v2 * v2)))
v3 = v3 * (1.0 / np.sqrt(np.sum(v3 * v3)))
spatvol = abs(np.dot(v1, np.cross(v2, v3)))
print("spat vol: {}".format(spatvol))
mvol = tria.volume()
print("orig mesh vol {}".format(mvol))
bvol = l1 * l2 * l3
print("box {}, {}, {} volume: {} ".format(l1, l2, l3, bvol))
print("box coverage: {}".format(bvol / mvol))
# we map evN to -1..0..+1 (keep zero level fixed)
# I have the feeling that this helps a little with the stretching
# at the poles, but who knows...
ev1min = np.amin(ev1)
ev1max = np.amax(ev1)
ev1[ev1 < 0] /= - ev1min
ev1[ev1 > 0] /= ev1max
ev2min = | np.amin(ev2) | numpy.amin |
import numpy as np
from prlqr.systems.dynamical_system import DiscreteTimeDynamicalSystem, StateFeedbackLaw, NormalRandomControlLaw
from prlqr.analysis.stability_analysis import check_stability
class LinearSystem(DiscreteTimeDynamicalSystem):
def __init__(self, A, B, controller, settings):
self.A = A
self.B = B
super().__init__(state_dimension=B.shape[0], input_dimension=B.shape[1], controller=controller, settings=settings)
def x_next(self, u):
noise = np.random.randn(self.state_dimension, 1) * np.sqrt(self.process_noise)
return self.A @ self.current_state + self.B @ u + noise
def empirically_unstable(self, x, u):
if isinstance(self.controller, StateFeedbackLaw):
A_cl = self.A + self.B @ self.controller.K
return not check_stability(A_cl)
# Check bounds that a stable controller should not pass... Overwrite this if necessary for your system
else:
return np.any(x > 1e3) or np.any(u > 1e3)
def linearize(self, q):
return self.A, self.B
def optimal_controller(self, Q, R):
from scipy.linalg import solve_discrete_are
P = np.array(solve_discrete_are(self.A, self.B, Q, R))
K_opt = - np.linalg.inv(R + self.B.T @ P @ self.B) @ (self.B.T @ P @ self.A)
return K_opt
class DoubleIntegrator(LinearSystem):
def __init__(self, controller, settings):
A = np.array([
[1, 0.2],
[0, 1]
])
B = np.array([
[0],
[.7]
])
super().__init__(A, B, controller, settings)
class GraphLaplacian3D(LinearSystem):
def __init__(self, controller, settings):
A = np.array([
[1.01, 0.01, 0.00],
[0.01, 1.01, 0.01],
[0.00, 0.01, 1.01],
])
B = np.array([
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]
])
super().__init__(A, B, controller, settings)
def x_next(self, u):
noise = np.random.randn(self.state_dimension, 1) * np.sqrt(self.process_noise)
return self.A @ self.current_state + self.B @ u + noise
class GraphLaplacian3DNonLin(GraphLaplacian3D):
def __init__(self, controller, settings):
super().__init__(controller, settings)
def x_next(self, u):
noise = np.random.randn(self.state_dimension, 1) * np.sqrt(self.process_noise)
state = self.current_state
non_lin = (0.3 * np.tril(np.ones((3, 3))) @ state) ** 3
return np.clip(self.A @ self.current_state + self.B @ u + non_lin + noise, -1e3, 1e3)
def empirically_unstable(self, x, u):
if isinstance(self.controller, StateFeedbackLaw):
A_cl = self.A + self.B @ self.controller.K
return not check_stability(A_cl) or np.any(x > 9e2)
else:
return np.any(x > 9e2)
if __name__ == "__main__":
control_var = 0.5
controller = NormalRandomControlLaw(variance=control_var)
system = GraphLaplacian3D(controller, {'process_noise': 0.001})
Q = np.eye(system.state_dimension)
R = np.eye(system.input_dimension) * 1
controller = StateFeedbackLaw(K=system.optimal_controller(Q, R))
#system = GraphLaplacian3D(controller, {'process_noise': 0.001})
x0 = np.ones((system.state_dimension, 1)) * 0.0
u0 = np.ones((system.input_dimension, 1)) * 0.0
data = system.create_trajectory(x0, n=15)
x = data['x']
u = data['u']
import matplotlib.pyplot as plt
plt.plot(x[0,:], label="x_0")
plt.plot(x[1, :], label="x_1")
plt.plot(u[0, :], label="u")
plt.legend()
plt.show()
def lse(training_data):
X0 = training_data['xtrain']
X1 = training_data['ytrain']
BA = X1 @ | np.linalg.pinv(X0) | numpy.linalg.pinv |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from torchlib.common import FloatTensor
from torchlib.dataset.utils import create_data_loader
from torchlib.generative_model.made import MADE
class WarmUpModel(nn.Module):
def __init__(self, n=100):
super(WarmUpModel, self).__init__()
self.n = n
self.theta = nn.Parameter(torch.randn(1, self.n))
def forward(self, x):
return self.theta.repeat((x.shape[0], 1))
@property
def pmf(self):
return F.softmax(self.theta[0].cpu().detach(), dim=-1).numpy()
def sample(self, shape):
p = self.pmf
return np.random.choice(np.arange(self.n), size=shape, p=p)
class MLP(nn.Module):
def __init__(self, n, nn_size=32, n_layers=3):
super(MLP, self).__init__()
self.n = n
self.embedding = nn.Embedding(n, nn_size)
models = []
models.append(self.embedding)
models.append(nn.Dropout(0.5))
for i in range(n_layers - 1):
models.append(nn.Linear(nn_size, nn_size))
models.append(nn.ReLU())
models.append(nn.Linear(nn_size, n))
self.model = nn.Sequential(*models)
def forward(self, x1):
"""
Args:
x1: The condition variable x1. of shape (batch_size). Encoded as one hot vector.
Returns: a logits over x2
"""
return self.model.forward(x1)
class TwoDimensionModel(nn.Module):
def __init__(self, n=200):
super(TwoDimensionModel, self).__init__()
self.x2_cond_x1 = MLP(n=n)
self.x1_model = WarmUpModel(n=n)
def forward(self, x):
x1 = x[:, 0]
return self.x1_model.forward(x1), self.x2_cond_x1.forward(x1)
def sample(self, num_samples):
self.eval()
with torch.no_grad():
x1 = self.x1_model.sample(num_samples)
x2_temp = []
data_loader = create_data_loader((x1,), batch_size=1000, drop_last=False, shuffle=False)
for data in data_loader:
data = data[0]
x2_logits = self.x2_cond_x1.forward(data)
x2_prob = F.softmax(x2_logits, dim=-1)
distribution = Categorical(probs=x2_prob)
x2 = distribution.sample().cpu().numpy()
x2_temp.append(x2)
x2 = | np.concatenate(x2_temp, axis=0) | numpy.concatenate |
from hashlib import new
import numpy as np
import random
import numba as nb
import matplotlib.pyplot as plt
from numpy.core.defchararray import count
from numpy.matrixlib import defmatrix
# from numpy.testing._private.utils import rand
from agent import *
from grid import *
import time
from neighbor import neighbor
import pyximport
pyximport.install()
import is_ag
import meshgrid
import neighbor_value
import neighbor_c
def is_agent(xy,use_map):
# 判断当前地块是空地还是被智能体占据
# ID > 999是ID号
# ID = 0是空地
x,y = xy
# um = use_map.tolist()
ID = float(use_map[x][y])
# print(use_map[x,y],um[x][y])
if ID > 999: return ID
elif ID < 999: return 0
def meshG(offset=[10,10],pop=1):
'''
生成类似于如下的矩阵
((-1,-1),(-1,0),(-1,1),
(0,-1), (0,1),
(1,-1),(1,0),(1,1))
'''
x_offset, y_offset = offset
dir = []
for x in range(-x_offset,x_offset):
for y in range(-y_offset,y_offset):
dir.append([x,y])
if pop==1:
dir.pop(int(0.5*2*x_offset*2*y_offset+y_offset)) # 刨除(0,0)点
return dir
def location_effect(ID,xy,agent_pool,work_xy,tra_xy,val_map,
use_map,map_size,wg,ws,c1,c2):
'''
计算区位效应
U = wg * G + ws * (1 - S) + e
G是外部吸引力
S是内部压力
e是随机变量
xy可以是ID的坐标,也可以不是
因此可以计算给定位置的Agent与另一块土地的假设区位效应
'''
weight = agent_pool[ID].weight # 智能体的权重
work_id = agent_pool[ID].work_id
work_xy = work_xy[work_id]
G = cal_out_pressure(xy,
work_xy,
tra_xy,
val_map,
weight).sum() # 外部居住环境吸引力
S = cal_in_pressure(ID,
xy,
agent_pool,
val_map,
use_map,
map_size,
c1,
c2
) # ID智能体在xy位置的内部压力
LocationEffect = wg*G + ws*(1-S) + 0.1*np.random.rand()
# print("内部压力:",S)
return LocationEffect
def cal_out_pressure(xy, work_xy, tra_xy, val_map, weight):
'''
计算给定坐标的外部居住环境吸引力
G_h^t = w_env*E_env + w_edu*E_edu + w_tra*E_tra + w_pri*E_pri + w_con*E_con
Agent权重的排序: 交通、通勤、地价
'''
'''环境、教育、基础设施
E_env = np.min(np.sqrt(np.sum((self.grid.env_xy-xy)**2,1)))
E_env = np.exp(1-0.001*E_env) # 指数距离衰减函数
E_edu = np.min(np.sqrt(np.sum((self.grid.edu_xy-xy)**2,1)))
E_edu = np.exp(1-0.001*E_edu) # 指数距离衰减函数
E_inf = np.min(np.sqrt(np.sum((self.grid.inf_xy-xy)**2,1)))
E_inf = np.exp(1-0.001*E_inf) # 指数距离衰减函数
'''
x,y = xy
# 交通
E_tra = np.min(np.sqrt(np.sum((tra_xy-xy)**2,1))) # 与最近地铁站点的距离
E_tra = np.exp(1-0.001*E_tra) # 指数距离衰减函数
# 通勤
E_work = np.sqrt(np.sum((work_xy-xy)**2)) # 和指定的企业计算通勤距离
# E_work = np.sum(np.abs(work_xy-xy)) # 曼哈顿距离
# 房价
# 实际上外部吸引力与房价并非正比关系,而是对不同人群有不同影响,人们可能更偏好比当前收入稍好一点的房屋,但不喜欢房价高出很多的房屋
E_price = 0.001 * val_map[x][y]
return weight * np.array([E_tra,E_work,E_price])
def cal_in_pressure(ID, xy, agent_pool, val_map, use_map, map_size, c1, c2):
'''
计算内部社会经济压力
S_h^t = c1*|I_h^t - V_h^t| + c2*|I_h^t - P_h^t|
S是社会经济压力
I是个人收入
V是所占据土地的价值
P是邻居平均经济状况
c1、c2是系数
'''
x,y = xy
income = agent_pool[ID].income
price = val_map[x][y] # 所占土地地价
P = neighbor(xy,
map_size,
val_map,
use_map,
agent_pool,
offset=5) # 邻里平均经济状况
'''
P = neighbor_c.neighbor(xy,
map_size,
val_map,
use_map,
agent_pool,
offset=10) # 邻里平均经济状况
'''
S = c1 * np.abs(income-price) + c2 * np.abs(income-P)
# print('income,',income,price,P,S)
return S
def nei_value(xy,use_map,val_map,map_size,offset=10):
'''
计算周围土地的价值
input:xy坐标
output:地价list和均值
'''
x,y = xy
t1 = time.time()
# dir = meshG(offset=[offset,offset])
dir = meshgrid.meshgrid(offset=[offset,offset])
# if (time.time()-t1)>0: print(time.time()-t1)
value,count = 0,0
# n_value = []
for off_x,off_y in dir:
if ((x+off_x) >= 0) and \
((x+off_x)<map_size[0]) and \
((y+off_y) >= 0) and \
((y+off_y)<map_size[1]) and \
(use_map[x+off_x][y+off_y] >= 0): # 不越界,能访问
value += val_map[x+off_x][y+off_y]
count += 1
# n_value.append(self.grid.val_map[x+off_x,y+off_y])
return value/count # n_value, np.mean(n_value)
class env:
def __init__(self) -> None:
self.grid = Grid(scale_factor=100)
self.map_size = self.grid.map_size
# print(type(self.map_size))
self.init_pop = 200 # 初始人口,500
self.max_pop = 10000 # 人口上限,6000
self.pop_step = 50 # 单步增加的人口
self.max_income = 20000 # 最高收入
self.r = 0.005 # 收入增速
self.R = 0.002 # 地价增速
self.D = 0.1 # 地价折旧率
self.c1 = 1e-3 # 内部经济压力权重
self.c2 = 1e-3 # 内部社会压力权重
self.ws = 1.0 # 外部压力权重
self.wg = 1.0 # 内部压力权重
self.a = 0.5 # 更新地价的权重
self.move_step = 7 # 在周围[10,10]范围内计算候选迁居地块,move_step是半边长
self.class_ratio = np.array([0.1,0.2,0.4,0.2,0.1]) # 低,中低,中,中高,高
# 各个阶层的初始收入上下限,需要实时更新
# 这些数字是通过将最高收入乘以一定系数得到的
# 低收入阶层的收入范围是最高收入的[0,0.175],以此类推
self.income = np.array([[1000,1750], # 低
[1750,3500], # 中低
[3500,5000], # 中
[5000,7500], # 中高
[7500,10000]]) # 高
self.WT = 0.95 # 迁居阈值
self.work_income() # 为每个企业分配随机收入增速
self.agent_pool = {}
self.pop_size = len(self.agent_pool)
self.gen_agent(N=self.init_pop)
def work_income(self,):
'''
为不同的企业赋予不同的收入增速
按理说,最大收入也应该是区分不同企业的
'''
self.num_work = self.grid.work_xy.shape[0]
self.r_work = [random.uniform(self.r*0.8,self.r*1.2) for _ in range(self.num_work)]
return None
# @nb.jit()
def step(self,t):
'''
单步执行函数
改变收入,更新地价,执行每个智能体的迁居判断
改变每个阶层的收入范围
将移走的地块重新置为0
'''
shuffle_list = random.sample(list(self.agent_pool),len(self.agent_pool))
t1 = time.time()
print('---------step ',t,'---------')
print('number of agents:',len(shuffle_list))
move_count = 0
for idx in shuffle_list:
agent = self.agent_pool[idx]
flag = self.move(agent.index) # 决定是否搬家,以及完成搬家操作
if flag is True: move_count += 1
t2 = time.time()
self.update_income()
t3 = time.time()
self.update_value()
t4 = time.time()
if len(self.agent_pool) < self.max_pop:
self.gen_agent(N=self.pop_step)
t5 = time.time()
print('move:%.3f,update income:%.3f,update value:%.3f,generate agent:%.3f'%(t2-t1,t3-t2,t4-t3,t5-t4))
return move_count
def update_income(self):
'''
更新群体收入和所属阶层
更新各阶层收入范围
'''
self.income_list = []
shuffle_pool = random.sample(list(self.agent_pool),len(self.agent_pool)) # 打乱更新顺序
for idx in shuffle_pool:
a = self.agent_pool[idx]
a_work = a.work_id
income = a.update_income(self.r_work[a_work], self.max_income)
self.income_list.append(income)
max_income = | np.max(self.income_list) | numpy.max |
import numpy as np
import math
import time
import json
import os
from sklearn.neural_network import MLPClassifier
import controller as c
import utils as u
class Neuro_controller(c.Controller):
def __init__(self, config):
super(Neuro_controller, self).__init__("NEURO", config)
self.n_sonar = 0
self.ann = 0
self.distance = 0
self.ranges = []
self.odom = (-1, -1)
self.origin = (-1, -1)
self.distance = 0
with open('../conf/controller-neuro.json', 'r') as fp:
f = json.load(fp)
self.weights = f['weights']
self.hidden_layer = f['hidden_layer']
self.activation = f['activation']
self.time = f['time'] * 1000
self.epoch_time = self.time
self.evolve = f['evolve']
self.log = open("../logs/neuro.log", 'w')
self.cur_detected_edges = []
self.actual_sensor_angles = []
self.cur_detected_edges_distances = []
def on_collision(self, pos):
self.time = 0
def handle_collision(self, col):
"""
Registers the currently detected edges so they can be represented by the simulator.
"""
self.cur_detected_edges = col
def update_sensor_angles(self, angles, distances):
"""
Updates the list representing the actual orientation of the sensors so that the simulator may
provide a proper representation. It also adjusts distances if needed.
"""
self.cur_detected_edges_distances = distances
self.actual_sensor_angles = angles
for i in range(0, len(self.cur_detected_edges_distances)):
if self.cur_detected_edges_distances[i] is math.inf:
self.cur_detected_edges_distances[i] = 10000
def __normalize(self, dst):
r = []
for i in range(len(dst)):
r.append(self.robot.vision_range[1] / dst[i])
r = np.asarray(r).reshape(1, self.input_layer_size)
return r
def __output_fitness(self):
f = open('../res/fitness.txt', 'w')
f.write(str(self.fitness()))
f.close()
def control(self, dst):
self.update_sensor_angles(self.ang, dst)
self.distance += np.linalg.norm((self.robot.x - self.odom[0],\
self.robot.y - self.odom[1]))
self.odom = (self.robot.x, self.robot.y)
if self.evolve:
self.time -= u.delta
if self.time <= 0:
if self.evolve:
self.__output_fitness()
self.time = self.epoch_time
self.odom = self.origin
self.distance = 0
self.robot.positon(self.origin[0], self.origin[1])
self.robot.orientation = 0
self.robot.stop()
self.robot.acceleration = 0
self.__build_network()
return 0, 0
inpt = self.__normalize(dst)
pred = self.ann.predict(inpt)
prob = self.ann.predict_proba(inpt)
ang = prob[0][0]
if ang > 180:
ang = ang - 360
spd = prob[0][1]
out = (ang, spd)
self.log_step(inpt, out)
return out
def log_step(self, dst, out):
self.log.write("[" + str(time.time())+"]\tOdom: "+str(self.odom)+"\t-\tIn: "+str(dst)+"\t-\tOut: "+str(out)+"\n")
def register_robot(self, r):
super(Neuro_controller, self).register_robot(r)
self.odom = (self.robot.x, self.robot.y)
self.origin = (self.robot.x, self.robot.y)
self.__build_network()
def __build_network(self):
if type(self.robot.sensors) is list:
self.n_sonar = len(self.robot.sensors)
else:
self.n_sonar = self.robot.sensors
self.input_layer_size = self.n_sonar
hidden_layer_size = tuple(self.hidden_layer) # Layers are fully connected
# (1) defines one hidden layer with one neuron
init_data = np.asarray([0 for _ in range(self.input_layer_size)])
init_data = init_data.reshape(1, self.input_layer_size)
self.ann = MLPClassifier(hidden_layer_sizes = hidden_layer_size,
activation=self.activation, # You can try another activation function
solver='adam', # This is not used at all
warm_start = True,
max_iter = 1)
self.ann.fit(init_data, [[360, self.robot.max_speed]])
self.ann.out_activation_ = 'identity'
if self.evolve:
self.__load_new_params()
else:
self.set_network_params(self.weights)
def fitness(self):
return self.distance + np.linalg.norm((self.odom[0] - self.origin[0],\
self.odom[1] - self.origin[1]))
def has_cur_detected_edge_list(self):
"""
Always returns true, given that the controller keeps track of the currently detected edges.
"""
return True
def set_network_params(self, weights):
shapes = self.__calculate_shape(self.ann.coefs_)
print(shapes)
cutoff = []
for i in range(len(shapes)):
if cutoff:
cutoff.append(shapes[i][0] * shapes[i][1] + cutoff[-1])
else:
cutoff.append(shapes[i][0] * shapes[i][1])
print("Cutoff: {}".format(cutoff))
w = []
b = []
for i in range(len(shapes)):
b.append(np.asarray([0 for _ in range(shapes[i][1])]))
if i > 0: # General case
w.append(np.asarray(weights[cutoff[i-1]:cutoff[i]]).reshape(shapes[i]))
else: # First position
w.append( | np.asarray(weights[:cutoff[i]]) | numpy.asarray |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The Phantom class is instantiated with a ground-truth phantom and corresponding material properties data. The get_projections method simulates data acquisition and returns radiographs for the specified theta values.
"""
import sys
import os
import numpy as np
import pandas as pd
from scipy import misc
import h5py
import time
from scipy.integrate import simps
import matplotlib.pyplot as plt
import cv2
from tomopy import project
from scipy.ndimage.filters import gaussian_filter
from tomo_twin.pg_filter import add_phase_contrast
model_data_path = '../model_data'
class Phantom:
def __init__(self, vol, materials, res, energy_pts, bits = 16, data_path = model_data_path):
'''
Parameters
----------
vol : np.array
labeled (segmented / ground-truth) volume. voxel values are in finite range [0,...n_materials-1].
materials : dict
dict of material names and their respective density g/cc, e.g. {"Fe" : 7.87, "Al": 2.7}
res : float
voxel size in microns
energy_pts : float or np.array
list of energies
bits : int
16 for 16 bit camera
data_path : str
path to exported XOP data
'''
# deal with materials
self.bits = bits
self.res = res
self.data_path = data_path
self.energy_pts = np.asarray(energy_pts) if type(energy_pts) is float else energy_pts
self.materials = [Material(key, value, \
self.res, \
self.energy_pts, \
data_path = self.data_path) for key, value in materials.items()]
self.sigma_mat = np.concatenate([material.sigma for material in self.materials], axis = 1)
# some numbers
self.n_mat = len(self.materials)
self.n_energies = np.size(self.energy_pts)
# deal with labeled volume
self.vol = vol
self.vol_shape = self.vol.shape
if self.vol.max() != (len(self.materials)-1):
raise ValueError("Number of materials does not match voxel value range.")
if len(self.vol_shape) not in (2,3): raise ValueError("vol must have either 2 or 3 dimensions.")
self.ray_axis = 1 if len(self.vol_shape) == 3 else 0
if len(self.vol_shape) == 3:
self.proj_shape = (self.vol_shape[0], self.vol_shape[-1])
else:
self.proj_shape = (self.vol_shape[-1],)
self.make_volume() # blows up volume into individual energies
def make_volume(self):
'''
Converts the labeled GT volume provided into a volume of sigma values (attenutation coefficient, density and pixel size as pathlength). The resulting shape is (nz, ny, nx) or (n_energies, nz, ny, nx). The "energy" channel is added if multiple energies are requested.
'''
voxel_vals = np.arange(self.n_mat)
self.vol = np.asarray([self.vol]*self.n_energies, dtype = np.float32)
for ie in range(self.n_energies):
for voxel in voxel_vals:
self.vol[ie, self.vol[ie] == voxel] = self.sigma_mat[ie,voxel]
if self.n_energies == 1:
self.vol = self.vol[0]
return
else:
return
def get_projections(self, theta = (0,180,180), beam = None, noise = 0.01, blur_size = 5, detector_dist = 0.0):
'''
Acquire projections on the phantom.
Returns
-------
np.array
output shape is a stack of radiographs (nthetas, nrows, ncols)
Parameters
----------
theta : tuple
The tuple must be defined as (starting_theta, ending_theta, number_projections). The angle is intepreted as degrees.
beam : np.array
The flat-field (beam array) must be provided with shape (1, nrows, ncols) or (n_energies, nrows, ncols).
noise : float
The noise parameter is interpreted as a fraction (0,1). The noise transforms the pixel map I(y,x) in the projection space as I(y,x) --> I(y,x)*(1 + N(mu=0, sigma=noise)).
'''
# make theta array in radians
theta = np.linspace(*theta, endpoint = True)
theta = np.radians(theta)
# make beam array (if not passed)
if beam is None:
beam = np.ones(self.proj_shape, dtype = np.float32)
beam = beam*(2**self.bits-1)
# if monochromatic beam
if self.n_energies == 1:
projs = project(self.vol, theta, pad = False, emission = False)
projs = projs*beam
# scintillator / detector blurring
if blur_size > 0:
projs = [proj for proj in projs]
projs = Parallelize(projs, gaussian_filter, \
procs = 12, \
sigma = 0.3*(0.5*(blur_size - 1) - 1) + 0.8, \
order = 0)
projs = np.asarray(projs)
# in-line phase contrast based on detector-sample distance (cm)
if detector_dist > 0.0:
pad_h = int(projs.shape[1]*0.4)
projs = np.pad(projs, ((0,0), (pad_h,pad_h), (0,0)), mode = 'reflect')
projs = add_phase_contrast(projs, \
pixel_size = self.res*1e-04, \
energy = float(self.energy_pts), \
dist = detector_dist)
projs = projs[:,pad_h:-pad_h,:]
# Poisson noise model (approximated as normal distribution)
projs = np.random.normal(projs, noise*np.sqrt(projs))
# projs = np.random.poisson(projs)
# This actually worked fine
# projs = projs*beam*(1 + np.random.normal(0, noise, projs.shape))
# if polychromatic beam
else:
projs = Parallelize(theta.tolist(), \
_project_at_theta, \
vol = self.vol, \
n_energies = self.n_energies, \
beam = beam, \
noise = noise, procs = 12)
projs = np.asarray(projs)
# saturated pixels
projs = np.clip(projs, 0, 2**self.bits-1)
return projs.astype(np.uint16)
class Material:
# Ideas borrowed from <NAME>'s code for BeamHardeningCorrections (7-BM github)
def __init__(self, name, density, path_len, energy_pts, scintillator_flag = False, data_path = None):
"""
Parameters
----------
name : str
string describing material name. Typically, use chemical formula, e.g. Fe, Cu, etc.
density : float
g/cm3 units
path_len : float
thickness for components (filters, scintillators, etc.) and pixel size for materials in phantom
energy_pts : np array
listing the energy_pts requested. shape is (n,)
scintillator_flag : bool
return absorption data instead of attenuation, if material is scintillator
sigma : np.array
sigma array with dimensions (n_energies, 1)
att_coeff : np.array
mass attenuation coefficient array (n_energies, 1)
data_path : str
path to exported XOP data
"""
self.name = name
self.data_path = data_path
self.density = density # g/cc
self.scintillator_flag = scintillator_flag
self.path_len = path_len # um
self.energy_pts = energy_pts
self.calc_sigma()
def read_attcoeff(self):
"""
# att_coeff : cm2/g units, array dimensions of (n_energies,)
"""
df = pd.read_csv(os.path.join(self.data_path, 'materials', self.name + "_properties_xCrossSec.dat"), sep = '\t', delimiter = " ", header = None)
old_energy_pts = np.asarray(df[0])/1000.0
if self.scintillator_flag:
att_coeff = | np.asarray(df[3]) | numpy.asarray |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 19 11:11:49 2017
@author: robertmarsland
"""
from __future__ import division
import numpy as np
import pandas as pd
from numpy.random import dirichlet
import numbers
#DEFAULT PARAMETERS FOR CONSUMER AND METABOLIC MATRICES, AND INITIAL STATE
a_default = {'sampling':'Binary', #{'Gaussian','Binary','Gamma'} specifies choice of sampling algorithm
'SA': 6*np.ones(3), #Number of species in each specialist family (here, 3 families of 60 species)
'MA': 3*np.ones(3), #Number of resources in each class
'Sgen': 30, #Number of generalist species (unbiased sampling over alll resource classes)
'muc': 10, #Mean sum of consumption rates (used in all models)
'sigc': 3, #Standard deviation of sum of consumption rates for Gaussian and Gamma models
'q': 0.0, #Preference strength of specialist families (0 for generalist and 1 for specialist)
'c0':0.0, #Sum of background consumption rates in binary model
'c1':1., #Specific consumption rate in binary model
'l':0.8, #Leakage fraction
'fs':0.45, #Fraction of secretion flux with same resource type
'fw':0.45, #Fraction of secretion flux to 'waste' resource
'sparsity':0.2, #Effective sparsity of metabolic matrix (between 0 and 1)
'n_wells':10, #Number of independent wells
'S':100, #Number of species per well (randomly sampled from the pool of size Stot = sum(SA) + Sgen)
'food':0, #index of food source (when a single resource is supplied externally)
'R0_food':1000, #unperturbed fixed point for supplied food
'regulation':'independent', #metabolic regulation (see dRdt)
'response':'type I', #functional response (see dRdt)
'supply':'off' #resource supply (see dRdt)
}
def MakeInitialState(assumptions):
"""
Construct stochastically colonized initial state, at unperturbed resource fixed point.
assumptions = dictionary of metaparameters
'SA' = number of species in each family
'MA' = number of resources of each type
'Sgen' = number of generalist species
'n_wells' = number of independent wells in the experiment
'S' = initial number of species per well
'food' = index of supplied "food" resource
'R0_food' = unperturbed fixed point for supplied food resource
Returns:
N0 = initial consumer populations
R0 = initial resource concentrations
"""
#PREPARE VARIABLES
#Force number of species to be an array:
if isinstance(assumptions['MA'],numbers.Number):
assumptions['MA'] = [assumptions['MA']]
if isinstance(assumptions['SA'],numbers.Number):
assumptions['SA'] = [assumptions['SA']]
#Force numbers of species to be integers:
assumptions['MA'] = np.asarray(assumptions['MA'],dtype=int)
assumptions['SA'] = np.asarray(assumptions['SA'],dtype=int)
assumptions['Sgen'] = int(assumptions['Sgen'])
#Extract total numbers of resources, consumers, resource types, and consumer families:
M = int(np.sum(assumptions['MA']))
T = len(assumptions['MA'])
S_tot = int(np.sum(assumptions['SA'])+assumptions['Sgen'])
F = len(assumptions['SA'])
#Construct lists of names of resources, consumers, resource types, consumer families and wells:
resource_names = ['R'+str(k) for k in range(M)]
type_names = ['T'+str(k) for k in range(T)]
family_names = ['F'+str(k) for k in range(F)]
consumer_names = ['S'+str(k) for k in range(S_tot)]
resource_index = [[type_names[m] for m in range(T) for k in range(assumptions['MA'][m])],
resource_names]
consumer_index = [[family_names[m] for m in range(F) for k in range(assumptions['SA'][m])]
+['GEN' for k in range(assumptions['Sgen'])],consumer_names]
well_names = ['W'+str(k) for k in range(assumptions['n_wells'])]
R0 = np.zeros((M,assumptions['n_wells']))
N0 = np.zeros((S_tot,assumptions['n_wells']))
if not isinstance(assumptions['food'],int):
assert len(assumptions['food']) == assumptions['n_wells'], 'Length of food vector must equal n_wells.'
food_list = assumptions['food']
else:
food_list = np.ones(assumptions['n_wells'],dtype=int)*assumptions['food']
if not (isinstance(assumptions['R0_food'],int) or isinstance(assumptions['R0_food'],float)):
assert len(assumptions['R0_food']) == assumptions['n_wells'], 'Length of food vector must equal n_wells.'
R0_food_list = assumptions['R0_food']
else:
R0_food_list = np.ones(assumptions['n_wells'],dtype=int)*assumptions['R0_food']
for k in range(assumptions['n_wells']):
N0[np.random.choice(S_tot,size=assumptions['S'],replace=False),k]=1.
R0[food_list[k],k] = R0_food_list[k]
N0 = pd.DataFrame(N0,index=consumer_index,columns=well_names)
R0 = pd.DataFrame(R0,index=resource_index,columns=well_names)
return N0, R0, M, T, S_tot, F
def MakeMatrices(assumptions):
"""
Construct consumer matrix and metabolic matrix.
assumptions = dictionary of metaparameters
'sampling' = {'Gaussian','Binary','Gamma'} specifies choice of sampling algorithm
'SA' = number of species in each family
'MA' = number of resources of each type
'Sgen' = number of generalist species
'muc' = mean sum of consumption rates
'sigc' = standard deviation for Gaussian sampling of consumer matrix
'q' = family preference strength (from 0 to 1)
'c0' = row sum of background consumption rates for Binary sampling
'c1' = specific consumption rate for Binary sampling
'fs' = fraction of secretion flux into same resource type
'fw' = fraction of secretion flux into waste resource type
'sparsity' = effective sparsity of metabolic matrix (from 0 to 1)
'wate_type' = index of resource type to designate as "waste"
Returns:
c = consumer matrix
D = metabolic matrix
"""
#PREPARE VARIABLES
#Force number of species to be an array:
if isinstance(assumptions['MA'],numbers.Number):
assumptions['MA'] = [assumptions['MA']]
if isinstance(assumptions['SA'],numbers.Number):
assumptions['SA'] = [assumptions['SA']]
#Force numbers of species to be integers:
assumptions['MA'] = np.asarray(assumptions['MA'],dtype=int)
assumptions['SA'] = np.asarray(assumptions['SA'],dtype=int)
assumptions['Sgen'] = int(assumptions['Sgen'])
#Default waste type is last type in list:
if 'waste_type' not in assumptions.keys():
assumptions['waste_type']=len(assumptions['MA'])-1
#Extract total numbers of resources, consumers, resource types, and consumer families:
M = np.sum(assumptions['MA'])
T = len(assumptions['MA'])
S = np.sum(assumptions['SA'])+assumptions['Sgen']
F = len(assumptions['SA'])
M_waste = assumptions['MA'][assumptions['waste_type']]
#Construct lists of names of resources, consumers, resource types, and consumer families:
resource_names = ['R'+str(k) for k in range(M)]
type_names = ['T'+str(k) for k in range(T)]
family_names = ['F'+str(k) for k in range(F)]
consumer_names = ['S'+str(k) for k in range(S)]
waste_name = type_names[assumptions['waste_type']]
resource_index = [[type_names[m] for m in range(T) for k in range(assumptions['MA'][m])],
resource_names]
consumer_index = [[family_names[m] for m in range(F) for k in range(assumptions['SA'][m])]
+['GEN' for k in range(assumptions['Sgen'])],consumer_names]
#PERFORM GAUSSIAN SAMPLING
if assumptions['sampling'] == 'Gaussian':
#Initialize dataframe:
c = pd.DataFrame(np.zeros((S,M)),columns=resource_index,index=consumer_index)
#Add Gaussian-sampled values, biasing consumption of each family towards its preferred resource:
for k in range(F):
for j in range(T):
if k==j:
c_mean = (assumptions['muc']/M)*(1+assumptions['q']*(M-assumptions['MA'][j])/assumptions['MA'][j])
c_var = (assumptions['sigc']**2/M)*(1+assumptions['q']*(M-assumptions['MA'][j])/assumptions['MA'][j])
else:
c_mean = (assumptions['muc']/M)*(1-assumptions['q'])
c_var = (assumptions['sigc']**2/M)*(1-assumptions['q'])
c.loc['F'+str(k)]['T'+str(j)] = c_mean + np.random.randn(assumptions['SA'][k],assumptions['MA'][j])*np.sqrt(c_var)
if 'GEN' in c.index:
c_mean = assumptions['muc']/M
c_var = assumptions['sigc']**2/M
c.loc['GEN'] = c_mean + np.random.randn(assumptions['Sgen'],M)*np.sqrt(c_var)
#PERFORM BINARY SAMPLING
elif assumptions['sampling'] == 'Binary':
assert assumptions['muc'] < M*assumptions['c1'], 'muc not attainable with given M and c1.'
#Construct uniform matrix at total background consumption rate c0:
c = pd.DataFrame(np.ones((S,M))*assumptions['c0']/M,columns=resource_index,index=consumer_index)
#Sample binary random matrix blocks for each pair of family/resource type:
for k in range(F):
for j in range(T):
if k==j:
p = (assumptions['muc']/(M*assumptions['c1']))*(1+assumptions['q']*(M-assumptions['MA'][j])/assumptions['MA'][j])
else:
p = (assumptions['muc']/(M*assumptions['c1']))*(1-assumptions['q'])
c.loc['F'+str(k)]['T'+str(j)] = (c.loc['F'+str(k)]['T'+str(j)].values
+ assumptions['c1']*BinaryRandomMatrix(assumptions['SA'][k],assumptions['MA'][j],p))
#Sample uniform binary random matrix for generalists:
if 'GEN' in c.index:
p = assumptions['muc']/(M*assumptions['c1'])
c.loc['GEN'] = c.loc['GEN'].values + assumptions['c1']*BinaryRandomMatrix(assumptions['Sgen'],M,p)
elif assumptions['sampling'] == 'Gamma':
#Initialize dataframe
c = pd.DataFrame(np.zeros((S,M)),columns=resource_index,index=consumer_index)
#Add Gamma-sampled values, biasing consumption of each family towards its preferred resource
for k in range(F):
for j in range(T):
if k==j:
c_mean = (assumptions['muc']/M)*(1+assumptions['q']*(M-assumptions['MA'][j])/assumptions['MA'][j])
c_var = (assumptions['sigc']**2/M)*(1+assumptions['q']*(M-assumptions['MA'][j])/assumptions['MA'][j])
thetac = c_var/c_mean
kc = c_mean**2/c_var
c.loc['F'+str(k)]['T'+str(j)] = np.random.gamma(kc,scale=thetac,size=(assumptions['SA'][k],assumptions['MA'][j]))
else:
c_mean = (assumptions['muc']/M)*(1-assumptions['q'])
c_var = (assumptions['sigc']**2/M)*(1-assumptions['q'])
thetac = c_var/c_mean
kc = c_mean**2/c_var
c.loc['F'+str(k)]['T'+str(j)] = np.random.gamma(kc,scale=thetac,size=(assumptions['SA'][k],assumptions['MA'][j]))
if 'GEN' in c.index:
c_mean = assumptions['muc']/M
c_var = assumptions['sigc']**2/M
thetac = c_var/c_mean
kc = c_mean**2/c_var
c.loc['GEN'] = np.random.gamma(kc,scale=thetac,size=(assumptions['Sgen'],M))
#PERFORM GAUSSIAN SAMPLING
elif assumptions['sampling'] == 'Binary_Gamma':
assert assumptions['muc'] < M*assumptions['c1'], 'muc not attainable with given M and c1.'
#Construct uniform matrix at total background consumption rate c0:
c = pd.DataFrame(np.ones((S,M))*assumptions['c0']/M,columns=resource_index,index=consumer_index)
#Sample binary random matrix blocks for each pair of family/resource type:
for k in range(F):
for j in range(T):
if k==j:
p = (assumptions['muc']/(M*assumptions['c1']))*(1+assumptions['q']*(M-assumptions['MA'][j])/assumptions['MA'][j])
c_mean = (assumptions['muc']/M)*(1+assumptions['q']*(M-assumptions['MA'][j])/assumptions['MA'][j])
c_var = (assumptions['sigc']**2/M)*(1+assumptions['q']*(M-assumptions['MA'][j])/assumptions['MA'][j])
else:
p = (assumptions['muc']/(M*assumptions['c1']))*(1-assumptions['q'])
c_mean = (assumptions['muc']/M)*(1-assumptions['q'])
c_var = (assumptions['sigc']**2/M)*(1-assumptions['q'])
c_mean_binary = assumptions['c0']+ assumptions['c1']*p
c_var_binary = assumptions['c1']**2 *p*(1-p)
c_mean_gamma = c_mean/c_mean_binary
c_var_gamma = (c_var - c_var_binary*(c_mean_gamma**2))/(c_var_binary + c_mean_binary**2)
thetac = c_var_gamma/c_mean_gamma
kc = c_mean_gamma**2/c_var_gamma
c.loc['F'+str(k)]['T'+str(j)] = (c.loc['F'+str(k)]['T'+str(j)].values + assumptions['c1']*BinaryRandomMatrix(assumptions['SA'][k],assumptions['MA'][j],p))*np.random.gamma(kc,scale=thetac,size=(assumptions['SA'][k],assumptions['MA'][j]))
#Sample uniform binary random matrix for generalists:
if 'GEN' in c.index:
p = assumptions['muc']/(M*assumptions['c1'])
c_mean = assumptions['muc']/M
c_var = assumptions['sigc']**2/M
c_mean_binary = assumptions['c0']+ assumptions['c1']*p
c_var_binary = assumptions['c1']**2 *p*(1-p)
c_mean_gamma = c_mean/c_mean_binary
c_var_gamma = (c_var - c_var_binary*(c_mean_gamma**2))/(c_var_binary + c_mean_binary**2)
thetac = c_var_gamma/c_mean_gamma
kc = c_mean_gamma**2/c_var_gamma
c.loc['GEN'] = (c.loc['GEN'].values + assumptions['c1']*BinaryRandomMatrix(assumptions['Sgen'],M,p))* | np.random.gamma(kc,scale=thetac,size=(assumptions['Sgen'],M)) | numpy.random.gamma |