prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import os
import tempfile
import numpy as np
import scipy.ndimage.measurements as meas
from functools import reduce
import warnings
import sys
sys.path.append(os.path.abspath(r'../lib'))
import NumCppPy as NumCpp # noqa E402
####################################################################################
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
####################################################################################
def test_seed():
np.random.seed(1)
####################################################################################
def test_abs():
randValue = np.random.randint(-100, -1, [1, ]).astype(np.double).item()
assert NumCpp.absScaler(randValue) == np.abs(randValue)
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.absScaler(value), 9) == np.round(np.abs(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.absArray(cArray), np.abs(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.absArray(cArray), 9), np.round(np.abs(data), 9))
####################################################################################
def test_add():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(cArray, value), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.add(value, cArray), data + value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.add(cArray1, cArray2), data1 + data2)
####################################################################################
def test_alen():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.alen(cArray) == shape.rows
####################################################################################
def test_all():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.all(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.all(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.all(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.all(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.all(data, axis=1))
####################################################################################
def test_allclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
tolerance = 1e-5
data1 = np.random.randn(shape.rows, shape.cols)
data2 = data1 + tolerance / 10
data3 = data1 + 1
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
assert NumCpp.allclose(cArray1, cArray2, tolerance) and not NumCpp.allclose(cArray1, cArray3, tolerance)
####################################################################################
def test_amax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amax(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.ROW).flatten(), np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amax(cArray, NumCpp.Axis.COL).flatten(), np.max(data, axis=1))
####################################################################################
def test_amin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.amin(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.ROW).flatten(), np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.amin(cArray, NumCpp.Axis.COL).flatten(), np.min(data, axis=1))
####################################################################################
def test_angle():
components = np.random.randint(-100, -1, [2, ]).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.angleScaler(value), 9) == np.round(np.angle(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols]) + \
1j * np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.angleArray(cArray), 9), np.round(np.angle(data), 9))
####################################################################################
def test_any():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.any(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.any(data).item()
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.ROW).flatten().astype(bool), np.any(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.any(cArray, NumCpp.Axis.COL).flatten().astype(bool), np.any(data, axis=1))
####################################################################################
def test_append():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.append(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
numRows = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + numRows, shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.append(data1, data2, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
NumCppols = np.random.randint(1, 100, [1, ]).item()
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + NumCppols)
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.append(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.append(data1, data2, axis=1))
####################################################################################
def test_arange():
start = np.random.randn(1).item()
stop = np.random.randn(1).item() * 100
step = np.abs(np.random.randn(1).item())
if stop < start:
step *= -1
data = np.arange(start, stop, step)
assert np.array_equal(np.round(NumCpp.arange(start, stop, step).flatten(), 9), np.round(data, 9))
####################################################################################
def test_arccos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccosScaler(value), 9) == np.round(np.arccos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccosArray(cArray), 9), np.round(np.arccos(data), 9))
####################################################################################
def test_arccosh():
value = np.abs(np.random.rand(1).item()) + 1
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arccoshScaler(value), 9) == np.round(np.arccosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) + 1
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arccoshArray(cArray), 9), np.round(np.arccosh(data), 9))
####################################################################################
def test_arcsin():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinScaler(value), 9) == np.round(np.arcsin(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinArray(cArray), 9), np.round(np.arcsin(data), 9))
####################################################################################
def test_arcsinh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arcsinhScaler(value), 9) == np.round(np.arcsinh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arcsinhArray(cArray), 9), np.round(np.arcsinh(data), 9))
####################################################################################
def test_arctan():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanScaler(value), 9) == np.round(np.arctan(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanArray(cArray), 9), np.round(np.arctan(data), 9))
####################################################################################
def test_arctan2():
xy = np.random.rand(2) * 2 - 1
assert np.round(NumCpp.arctan2Scaler(xy[1], xy[0]), 9) == np.round(np.arctan2(xy[1], xy[0]), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArrayX = NumCpp.NdArray(shape)
cArrayY = NumCpp.NdArray(shape)
xy = np.random.rand(*shapeInput, 2) * 2 - 1
xData = xy[:, :, 0].reshape(shapeInput)
yData = xy[:, :, 1].reshape(shapeInput)
cArrayX.setArray(xData)
cArrayY.setArray(yData)
assert np.array_equal(np.round(NumCpp.arctan2Array(cArrayY, cArrayX), 9), np.round(np.arctan2(yData, xData), 9))
####################################################################################
def test_arctanh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.arctanhScaler(value), 9) == np.round(np.arctanh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
np.array_equal(np.round(NumCpp.arctanhArray(cArray), 9), np.round(np.arctanh(data), 9))
####################################################################################
def test_argmax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.NONE).item(), np.argmax(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.ROW).flatten(), np.argmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmax(cArray, NumCpp.Axis.COL).flatten(), np.argmax(data, axis=1))
####################################################################################
def test_argmin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.NONE).item(), np.argmin(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.ROW).flatten(), np.argmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.argmin(cArray, NumCpp.Axis.COL).flatten(), np.argmin(data, axis=1))
####################################################################################
def test_argsort():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
dataFlat = data.flatten()
assert np.array_equal(dataFlat[NumCpp.argsort(cArray, NumCpp.Axis.NONE).flatten().astype(np.uint32)],
dataFlat[np.argsort(data, axis=None)])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=0)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.ROW).astype(np.uint16)
allPass = True
for idx, row in enumerate(data.T):
if not np.array_equal(row[cIdx[:, idx]], row[pIdx[:, idx]]):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]): # noqa
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pIdx = np.argsort(data, axis=1)
cIdx = NumCpp.argsort(cArray, NumCpp.Axis.COL).astype(np.uint16)
allPass = True
for idx, row in enumerate(data):
if not np.array_equal(row[cIdx[idx, :]], row[pIdx[idx, :]]):
allPass = False
break
assert allPass
####################################################################################
def test_argwhere():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
randValue = np.random.randint(0, 100, [1, ]).item()
data2 = data > randValue
cArray.setArray(data2)
assert np.array_equal(NumCpp.argwhere(cArray).flatten(), np.argwhere(data.flatten() > randValue).flatten())
####################################################################################
def test_around():
value = np.abs(np.random.rand(1).item()) * np.random.randint(1, 10, [1, ]).item()
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert NumCpp.aroundScaler(value, numDecimalsRound) == np.round(value, numDecimalsRound)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numDecimalsRound = np.random.randint(0, 10, [1, ]).astype(np.uint8).item()
assert np.array_equal(NumCpp.aroundArray(cArray, numDecimalsRound), np.round(data, numDecimalsRound))
####################################################################################
def test_array_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, shapeInput)
data2 = np.random.randint(1, 100, shapeInput)
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
cArray3 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data1)
cArray3.setArray(data2)
assert NumCpp.array_equal(cArray1, cArray2) and not NumCpp.array_equal(cArray1, cArray3)
####################################################################################
def test_array_equiv():
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
data1 = np.random.randint(1, 100, shapeInput1)
data3 = np.random.randint(1, 100, shapeInput3)
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput3 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput1[1].item(), shapeInput1[0].item())
shape3 = NumCpp.Shape(shapeInput3[0].item(), shapeInput3[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
cArray3 = NumCpp.NdArrayComplexDouble(shape3)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
imag3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data3 = real3 + 1j * imag3
cArray1.setArray(data1)
cArray2.setArray(data1.reshape([shapeInput1[1].item(), shapeInput1[0].item()]))
cArray3.setArray(data3)
assert NumCpp.array_equiv(cArray1, cArray2) and not NumCpp.array_equiv(cArray1, cArray3)
####################################################################################
def test_asarray():
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayArray1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayVector1DCopy(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVector2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayVectorArray2DCopy(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayDeque1D(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayDeque2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayList(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerIterators(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointer(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointer2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShell(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2D(*values), data)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
assert np.array_equal(NumCpp.asarrayPointerShellTakeOwnership(*values).flatten(), values)
values = np.random.randint(0, 100, [2, ]).astype(np.double)
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
real = np.random.randint(0, 100, [2, ]).astype(np.double)
imag = np.random.randint(0, 100, [2, ]).astype(np.double)
values = real + 1j * imag
data = np.vstack([values, values])
assert np.array_equal(NumCpp.asarrayPointerShell2DTakeOwnership(*values), data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToUint32(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.uint32))
assert cArrayCast.dtype == np.uint32
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
cArrayCast = NumCpp.astypeDoubleToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex128))
assert cArrayCast.dtype == np.complex128
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToComplex(cArray).getNumpyArray()
assert np.array_equal(cArrayCast, data.astype(np.complex64))
assert cArrayCast.dtype == np.complex64
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
cArrayCast = NumCpp.astypeComplexToDouble(cArray).getNumpyArray()
warnings.filterwarnings('ignore', category=np.ComplexWarning)
assert np.array_equal(cArrayCast, data.astype(np.double))
warnings.filters.pop() # noqa
assert cArrayCast.dtype == np.double
####################################################################################
def test_average():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.round(NumCpp.average(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.average(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.average(cArray, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, axis=1), 9))
####################################################################################
def test_averageWeighted():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [shape.rows, shape.cols])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.NONE).item(), 9) == \
np.round(np.average(data, weights=weights), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.cols)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.rows])
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.ROW).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cWeights = NumCpp.NdArray(1, shape.rows)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
weights = np.random.randint(1, 5, [1, shape.cols])
cWeights.setArray(weights)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.averageWeighted(cArray, cWeights, NumCpp.Axis.COL).flatten(), 9),
np.round(np.average(data, weights=weights.flatten(), axis=1), 9))
####################################################################################
def test_binaryRepr():
value = np.random.randint(0, np.iinfo(np.uint64).max, [1, ], dtype=np.uint64).item()
assert NumCpp.binaryRepr(np.uint64(value)) == np.binary_repr(value, np.iinfo(np.uint64).bits)
####################################################################################
def test_bincount():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
assert np.array_equal(NumCpp.bincount(cArray, 0).flatten(), np.bincount(data.flatten(), minlength=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincount(cArray, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, 0).flatten(),
np.bincount(data.flatten(), minlength=0, weights=weights.flatten()))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
cWeights = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
weights = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint16)
cArray.setArray(data)
cWeights.setArray(weights)
minLength = int(np.max(data) + 10)
assert np.array_equal(NumCpp.bincountWeighted(cArray, cWeights, minLength).flatten(),
np.bincount(data.flatten(), minlength=minLength, weights=weights.flatten()))
####################################################################################
def test_bitwise_and():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_and(cArray1, cArray2), np.bitwise_and(data1, data2))
####################################################################################
def test_bitwise_not():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.bitwise_not(cArray), np.bitwise_not(data))
####################################################################################
def test_bitwise_or():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_or(cArray1, cArray2), np.bitwise_or(data1, data2))
####################################################################################
def test_bitwise_xor():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt64(shape)
cArray2 = NumCpp.NdArrayUInt64(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
data2 = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.bitwise_xor(cArray1, cArray2), np.bitwise_xor(data1, data2))
####################################################################################
def test_byteswap():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt64(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint64)
cArray.setArray(data)
assert np.array_equal(NumCpp.byteswap(cArray).shape, shapeInput)
####################################################################################
def test_cbrt():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cbrtArray(cArray), 9), np.round(np.cbrt(data), 9))
####################################################################################
def test_ceil():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.ceilArray(cArray), 9), np.round(np.ceil(data), 9))
####################################################################################
def test_center_of_mass():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.NONE).flatten(), 9),
np.round(meas.center_of_mass(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for col in range(data.shape[1]):
coms.append(np.round(meas.center_of_mass(data[:, col])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.ROW).flatten(), 9), np.round(coms, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols).astype(np.double) * 1000
cArray.setArray(data)
coms = list()
for row in range(data.shape[0]):
coms.append(np.round(meas.center_of_mass(data[row, :])[0], 9))
assert np.array_equal(np.round(NumCpp.centerOfMass(cArray, NumCpp.Axis.COL).flatten(), 9), np.round(coms, 9))
####################################################################################
def test_clip():
value = np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue)
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert NumCpp.clipScaler(value, minValue, maxValue) == np.clip(value, minValue, maxValue) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
minValue = np.random.randint(0, 10, [1, ]).item() + 1j * np.random.randint(0, 10, [1, ]).item()
maxValue = np.random.randint(90, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
assert np.array_equal(NumCpp.clipArray(cArray, minValue, maxValue), np.clip(data, minValue, maxValue)) # noqa
####################################################################################
def test_column_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.column_stack(cArray1, cArray2, cArray3, cArray4),
np.column_stack([data1, data2, data3, data4]))
####################################################################################
def test_complex():
real = np.random.rand(1).astype(np.double).item()
value = complex(real)
assert np.round(NumCpp.complexScaler(real), 9) == np.round(value, 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.complexScaler(components[0], components[1]), 9) == np.round(value, 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
assert np.array_equal(np.round(NumCpp.complexArray(realArray), 9), np.round(real + 1j * np.zeros_like(real), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
realArray = NumCpp.NdArray(shape)
imagArray = NumCpp.NdArray(shape)
real = np.random.rand(shape.rows, shape.cols)
imag = np.random.rand(shape.rows, shape.cols)
realArray.setArray(real)
imagArray.setArray(imag)
assert np.array_equal(np.round(NumCpp.complexArray(realArray, imagArray), 9), np.round(real + 1j * imag, 9))
####################################################################################
def test_concatenate():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.NONE).flatten(),
np.concatenate([data1.flatten(), data2.flatten(), data3.flatten(), data4.flatten()]))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape3 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape4 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.ROW),
np.concatenate([data1, data2, data3, data4], axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.concatenate(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.COL),
np.concatenate([data1, data2, data3, data4], axis=1))
####################################################################################
def test_conj():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.conjScaler(value), 9) == np.round(np.conj(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.conjArray(cArray), 9), np.round(np.conj(data), 9))
####################################################################################
def test_contains():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert NumCpp.contains(cArray, value, NumCpp.Axis.NONE).getNumpyArray().item() == (value in data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.COL).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
value = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
value = np.random.randint(0, 100, [1, ]).item() + 1j * np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
truth = list()
for row in data.T:
truth.append(value in row)
assert np.array_equal(NumCpp.contains(cArray, value, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.asarray(truth))
####################################################################################
def test_copy():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.copy(cArray), data)
####################################################################################
def test_copysign():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.copysign(cArray1, cArray2), np.copysign(data1, data2))
####################################################################################
def test_copyto():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray()
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
assert np.array_equal(NumCpp.copyto(cArray2, cArray1), data1)
####################################################################################
def test_cos():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.cosScaler(value), 9) == np.round(np.cos(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cosArray(cArray), 9), np.round(np.cos(data), 9))
####################################################################################
def test_cosh():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.coshScaler(value), 9) == np.round(np.cosh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.coshArray(cArray), 9), np.round(np.cosh(data), 9))
####################################################################################
def test_count_nonzero():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.count_nonzero(cArray, NumCpp.Axis.NONE) == np.count_nonzero(data)
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.ROW).flatten(), np.count_nonzero(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 3, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 3, [shape.rows, shape.cols])
imag = np.random.randint(1, 3, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.count_nonzero(cArray, NumCpp.Axis.COL).flatten(), np.count_nonzero(data, axis=1))
####################################################################################
def test_cross():
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(1, 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).item() == np.cross(data1, data2).item()
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(2, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 2)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(1, 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.cross(data1, data2).flatten())
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(3, np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.ROW).getNumpyArray(),
np.cross(data1, data2, axis=0))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
data2 = np.random.randint(1, 10, [shape.rows, shape.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
shape = NumCpp.Shape(np.random.randint(1, 100, [1, ]).item(), 3)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.cross(cArray1, cArray2, NumCpp.Axis.COL).getNumpyArray(),
np.cross(data1, data2, axis=1))
####################################################################################
def test_cube():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.cube(cArray), 9), np.round(data * data * data, 9))
####################################################################################
def test_cumprod():
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.NONE).flatten(), data.cumprod())
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.ROW), data.cumprod(axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 4, [shape.rows, shape.cols])
imag = np.random.randint(1, 4, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumprod(cArray, NumCpp.Axis.COL), data.cumprod(axis=1))
####################################################################################
def test_cumsum():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.NONE).flatten(), data.cumsum())
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.ROW), data.cumsum(axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.cumsum(cArray, NumCpp.Axis.COL), data.cumsum(axis=1))
####################################################################################
def test_deg2rad():
value = np.abs(np.random.rand(1).item()) * 360
assert np.round(NumCpp.deg2radScaler(value), 9) == np.round(np.deg2rad(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 360
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.deg2radArray(cArray), 9), np.round(np.deg2rad(data), 9))
####################################################################################
def test_degrees():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert np.round(NumCpp.degreesScaler(value), 9) == np.round(np.degrees(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.degreesArray(cArray), 9), np.round(np.degrees(data), 9))
####################################################################################
def test_deleteIndices():
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.NONE).flatten(),
np.delete(data, indicesPy, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.ROW),
np.delete(data, indicesPy, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
indices = NumCpp.Slice(0, 100, 4)
indicesPy = slice(0, 99, 4)
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesSlice(cArray, indices, NumCpp.Axis.COL),
np.delete(data, indicesPy, axis=1))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, shape.size(), [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.NONE).flatten(),
np.delete(data, index, axis=None))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.ROW), np.delete(data, index, axis=0))
shapeInput = np.asarray([100, 100])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
index = np.random.randint(0, 100, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.deleteIndicesScaler(cArray, index, NumCpp.Axis.COL), np.delete(data, index, axis=1))
####################################################################################
def test_diag():
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
elements = np.random.randint(1, 100, shapeInput)
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
shapeInput = np.random.randint(2, 25, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
k = np.random.randint(0, np.min(shapeInput), [1, ]).item()
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diag(cElements, k).flatten(), np.diag(elements, k))
####################################################################################
def test_diagflat():
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(2, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
elements = np.random.randint(1, 100, [numElements, ])
cElements = NumCpp.NdArray(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
numElements = np.random.randint(1, 25, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
k = np.random.randint(0, 10, [1, ]).item()
real = np.random.randint(1, 100, [numElements, ])
imag = np.random.randint(1, 100, [numElements, ])
elements = real + 1j * imag
cElements = NumCpp.NdArrayComplexDouble(shape)
cElements.setArray(elements)
assert np.array_equal(NumCpp.diagflat(cElements, k), np.diagflat(elements, k))
####################################################################################
def test_diagonal():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.ROW).flatten(),
np.diagonal(data, offset, axis1=0, axis2=1))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, min(shape.rows, shape.cols), [1, ]).item()
assert np.array_equal(NumCpp.diagonal(cArray, offset, NumCpp.Axis.COL).flatten(),
np.diagonal(data, offset, axis1=1, axis2=0))
####################################################################################
def test_diff():
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.NONE).flatten(),
np.diff(data.flatten()))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.ROW), np.diff(data, axis=0))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL).astype(np.uint32), np.diff(data, axis=1))
shapeInput = np.random.randint(10, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 50, [shape.rows, shape.cols])
imag = np.random.randint(1, 50, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.diff(cArray, NumCpp.Axis.COL), np.diff(data, axis=1))
####################################################################################
def test_divide():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2[data2 == 0] = 1
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
data2[data2 == complex(0)] = complex(1)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.divide(cArray1, cArray2), 9),
np.round(data1 / data2, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
while value == complex(0):
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
data[data == 0] = 1
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = 0
while value == 0:
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(cArray, value), 9),
np.round(data / value, 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[data == complex(0)] = complex(1)
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(np.round(NumCpp.divide(value, cArray), 9),
np.round(value / data, 9))
####################################################################################
def test_dot():
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols])
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 50, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert NumCpp.dot(cArray1, cArray2).item() == np.dot(data1, data2.T).item()
shapeInput = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.dot(cArray1, cArray2), np.dot(data1, data2))
shapeInput = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), np.random.randint(1, 100, [1, ]).item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
real1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 50, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 50, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.dot(cArray1, cArray2), np.dot(data1, data2))
####################################################################################
def test_empty():
shapeInput = np.random.randint(1, 100, [2, ])
cArray = NumCpp.emptyRowCol(shapeInput[0].item(), shapeInput[1].item())
assert cArray.shape[0] == shapeInput[0]
assert cArray.shape[1] == shapeInput[1]
assert cArray.size == shapeInput.prod()
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.emptyShape(shape)
assert cArray.shape[0] == shape.rows
assert cArray.shape[1] == shape.cols
assert cArray.size == shapeInput.prod()
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.empty_like(cArray1)
assert cArray2.shape().rows == shape.rows
assert cArray2.shape().cols == shape.cols
assert cArray2.size() == shapeInput.prod()
####################################################################################
def test_endianess():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert NumCpp.endianess(cArray) == NumCpp.Endian.NATIVE
####################################################################################
def test_equal():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 10, [shape.rows, shape.cols])
data2 = np.random.randint(0, 10, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.equal(cArray1, cArray2), np.equal(data1, data2))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.equal(cArray1, cArray2), np.equal(data1, data2))
####################################################################################
def test_exp2():
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.expScaler(value), 9) == np.round(np.exp(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.expScaler(value), 9) == np.round(np.exp(value), 9)
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.exp2Scaler(value), 9) == np.round(np.exp2(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.exp2Array(cArray), 9), np.round(np.exp2(data), 9))
####################################################################################
def test_exp():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expArray(cArray), 9), np.round(np.exp(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expArray(cArray), 9), np.round(np.exp(data), 9))
value = np.abs(np.random.rand(1).item())
assert np.round(NumCpp.expm1Scaler(value), 9) == np.round(np.expm1(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.expm1Scaler(value), 9) == np.round(np.expm1(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expm1Array(cArray), 9), np.round(np.expm1(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.rand(shape.rows, shape.cols)
imag = np.random.rand(shape.rows, shape.cols)
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.expm1Array(cArray), 9), np.round(np.expm1(data), 9))
####################################################################################
def test_eye():
shapeInput = np.random.randint(1, 100, [1, ]).item()
randK = np.random.randint(0, shapeInput, [1, ]).item()
assert np.array_equal(NumCpp.eye1D(shapeInput, randK), np.eye(shapeInput, k=randK))
shapeInput = np.random.randint(1, 100, [1, ]).item()
randK = np.random.randint(0, shapeInput, [1, ]).item()
assert np.array_equal(NumCpp.eye1DComplex(shapeInput, randK),
np.eye(shapeInput, k=randK) + 1j * np.zeros([shapeInput, shapeInput]))
shapeInput = np.random.randint(10, 100, [2, ])
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eye2D(shapeInput[0].item(), shapeInput[1].item(), randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK))
shapeInput = np.random.randint(10, 100, [2, ])
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eye2DComplex(shapeInput[0].item(), shapeInput[1].item(), randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK) +
1j * np.zeros(shapeInput))
shapeInput = np.random.randint(10, 100, [2, ])
cShape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eyeShape(cShape, randK), np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK))
shapeInput = np.random.randint(10, 100, [2, ])
cShape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
randK = np.random.randint(0, np.min(shapeInput), [1, ]).item()
assert np.array_equal(NumCpp.eyeShapeComplex(cShape, randK),
np.eye(shapeInput[0].item(), shapeInput[1].item(), k=randK) +
1j * np.zeros(shapeInput))
####################################################################################
def test_fill_diagonal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
NumCpp.fillDiagonal(cArray, 666)
np.fill_diagonal(data, 666)
assert np.array_equal(cArray.getNumpyArray(), data)
####################################################################################
def test_find():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
value = data.mean()
cMask = NumCpp.operatorGreater(cArray, value)
cMaskArray = NumCpp.NdArrayBool(cMask.shape[0], cMask.shape[1])
cMaskArray.setArray(cMask)
idxs = NumCpp.find(cMaskArray).astype(np.int64)
idxsPy = np.nonzero((data > value).flatten())[0]
assert np.array_equal(idxs.flatten(), idxsPy)
####################################################################################
def test_findN():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
value = data.mean()
cMask = NumCpp.operatorGreater(cArray, value)
cMaskArray = NumCpp.NdArrayBool(cMask.shape[0], cMask.shape[1])
cMaskArray.setArray(cMask)
idxs = NumCpp.findN(cMaskArray, 8).astype(np.int64)
idxsPy = np.nonzero((data > value).flatten())[0]
assert np.array_equal(idxs.flatten(), idxsPy[:8])
####################################################################################
def fix():
value = np.random.randn(1).item() * 100
assert NumCpp.fixScaler(value) == np.fix(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.fixArray(cArray), np.fix(data))
####################################################################################
def test_flatten():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flatten(cArray).getNumpyArray(), np.resize(data, [1, data.size]))
####################################################################################
def test_flatnonzero():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flatnonzero(cArray).getNumpyArray().flatten(), np.flatnonzero(data))
####################################################################################
def test_flip():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flip(cArray, NumCpp.Axis.NONE).getNumpyArray(),
np.flip(data.reshape(1, data.size), axis=1).reshape(shapeInput))
####################################################################################
def test_fliplr():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.fliplr(cArray).getNumpyArray(), np.fliplr(data))
####################################################################################
def test_flipud():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.flipud(cArray).getNumpyArray(), np.flipud(data))
####################################################################################
def test_floor():
value = np.random.randn(1).item() * 100
assert NumCpp.floorScaler(value) == np.floor(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.floorArray(cArray), np.floor(data))
####################################################################################
def test_floor_divide():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.floor_divideScaler(value1, value2) == np.floor_divide(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.floor_divideArray(cArray1, cArray2), np.floor_divide(data1, data2))
####################################################################################
def test_fmax():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.fmaxScaler(value1, value2) == np.fmax(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fmaxArray(cArray1, cArray2), np.fmax(data1, data2))
####################################################################################
def test_fmin():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.fminScaler(value1, value2) == np.fmin(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fminArray(cArray1, cArray2), np.fmin(data1, data2))
####################################################################################
def test_fmod():
value1 = np.random.randint(1, 100, [1, ]).item() * 100 + 1000
value2 = np.random.randint(1, 100, [1, ]).item() * 100 + 1000
assert NumCpp.fmodScaler(value1, value2) == np.fmod(value1, value2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32) * 100 + 1000
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.fmodArray(cArray1, cArray2), np.fmod(data1, data2))
####################################################################################
def test_fromfile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = r'C:\Temp'
if not os.path.exists(tempDir):
os.mkdir(tempDir)
tempFile = os.path.join(tempDir, 'NdArrayDump.bin')
NumCpp.dump(cArray, tempFile)
assert os.path.isfile(tempFile)
data2 = NumCpp.fromfile(tempFile, '').reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
tempFile = os.path.join(tempDir, 'NdArrayDump')
NumCpp.tofile(cArray, tempFile, '\n')
assert os.path.exists(tempFile + '.txt')
data2 = NumCpp.fromfile(tempFile + '.txt', '\n').reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile + '.txt')
####################################################################################
def test_fromiter():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.fromiter(cArray).flatten(), data.flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.fromiter(cArray).flatten(), data.flatten())
####################################################################################
def test_full():
shapeInput = np.random.randint(1, 100, [1, ]).item()
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullSquare(shapeInput, value)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput**2 and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [1, ]).item()
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullSquareComplex(shapeInput, value)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput**2 and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullRowCol(shapeInput[0].item(), shapeInput[1].item(), value)
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullRowColComplex(shapeInput[0].item(), shapeInput[1].item(), value)
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullShape(shape, value)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
value = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.fullShape(shape, value)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == value))
####################################################################################
def test_full_like():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
value = np.random.randint(1, 100, [1, ]).item()
cArray2 = NumCpp.full_like(cArray1, value)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == value))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
cArray2 = NumCpp.full_likeComplex(cArray1, value)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == value))
####################################################################################
def test_gcd():
if not NumCpp.NUMCPP_NO_USE_BOOST or NumCpp.STL_GCD_LCM:
value1 = np.random.randint(1, 1000, [1, ]).item()
value2 = np.random.randint(1, 1000, [1, ]).item()
assert NumCpp.gcdScaler(value1, value2) == np.gcd(value1, value2)
if not NumCpp.NUMCPP_NO_USE_BOOST:
size = np.random.randint(20, 100, [1, ]).item()
cArray = NumCpp.NdArrayUInt32(1, size)
data = np.random.randint(1, 1000, [size, ], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.gcdArray(cArray) == np.gcd.reduce(data) # noqa
####################################################################################
def test_gradient():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.ROW), np.gradient(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.ROW), np.gradient(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.COL), np.gradient(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.COL), np.gradient(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 1000, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.NONE).flatten(),
np.gradient(data.flatten(), axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 1000, [shape.rows, shape.cols])
imag = np.random.randint(1, 1000, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.gradient(cArray, NumCpp.Axis.NONE).flatten(),
np.gradient(data.flatten(), axis=0))
####################################################################################
def test_greater():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater(cArray1, cArray2).getNumpyArray(),
np.greater(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater(cArray1, cArray2).getNumpyArray(),
np.greater(data1, data2))
####################################################################################
def test_greater_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater_equal(cArray1, cArray2).getNumpyArray(),
np.greater_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.greater_equal(cArray1, cArray2).getNumpyArray(),
np.greater_equal(data1, data2))
####################################################################################
def test_histogram():
shape = NumCpp.Shape(1024, 1024)
cArray = NumCpp.NdArray(shape)
data = np.random.randn(1024, 1024) * np.random.randint(1, 10, [1, ]).item() + np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
numBins = np.random.randint(10, 30, [1, ]).item()
histogram, bins = NumCpp.histogram(cArray, numBins)
h, b = np.histogram(data, numBins)
assert np.array_equal(histogram.getNumpyArray().flatten().astype(np.int32), h)
assert np.array_equal(np.round(bins.getNumpyArray().flatten(), 9), np.round(b, 9))
shape = NumCpp.Shape(1024, 1024)
cArray = NumCpp.NdArray(shape)
data = np.random.randn(1024, 1024) * np.random.randint(1, 10, [1, ]).item() + np.random.randint(1, 10, [1, ]).item()
cArray.setArray(data)
binEdges = np.linspace(data.min(), data.max(), 15, endpoint=True)
cBinEdges = NumCpp.NdArray(1, binEdges.size)
cBinEdges.setArray(binEdges)
histogram = NumCpp.histogram(cArray, cBinEdges)
h, _ = np.histogram(data, binEdges)
assert np.array_equal(histogram.flatten().astype(np.int32), h)
####################################################################################
def test_hstack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape3 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
shape4 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item() + np.random.randint(1, 10, [1, ]).item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.hstack(cArray1, cArray2, cArray3, cArray4),
np.hstack([data1, data2, data3, data4]))
####################################################################################
def test_hypot():
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
assert NumCpp.hypotScaler(value1, value2) == np.hypot(value1, value2)
value1 = np.random.randn(1).item() * 100 + 1000
value2 = np.random.randn(1).item() * 100 + 1000
value3 = np.random.randn(1).item() * 100 + 1000
assert (np.round(NumCpp.hypotScalerTriple(value1, value2, value3), 9) ==
np.round(np.sqrt(value1**2 + value2**2 + value3**2), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data2 = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.hypotArray(cArray1, cArray2), 9),
np.round(np.hypot(data1, data2), 9))
####################################################################################
def test_identity():
squareSize = np.random.randint(10, 100, [1, ]).item()
assert np.array_equal(NumCpp.identity(squareSize).getNumpyArray(), np.identity(squareSize))
squareSize = np.random.randint(10, 100, [1, ]).item()
assert np.array_equal(NumCpp.identityComplex(squareSize).getNumpyArray(),
np.identity(squareSize) + 1j * np.zeros([squareSize, squareSize]))
####################################################################################
def test_imag():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.imagScaler(value), 9) == np.round(np.imag(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.imagArray(cArray), 9), np.round(np.imag(data), 9))
####################################################################################
def test_interp():
endPoint = np.random.randint(10, 20, [1, ]).item()
numPoints = np.random.randint(50, 100, [1, ]).item()
resample = np.random.randint(2, 5, [1, ]).item()
xpData = np.linspace(0, endPoint, numPoints, endpoint=True)
fpData = np.sin(xpData)
xData = np.linspace(0, endPoint, numPoints * resample, endpoint=True)
cXp = NumCpp.NdArray(1, numPoints)
cFp = NumCpp.NdArray(1, numPoints)
cX = NumCpp.NdArray(1, numPoints * resample)
cXp.setArray(xpData)
cFp.setArray(fpData)
cX.setArray(xData)
assert np.array_equal(np.round(NumCpp.interp(cX, cXp, cFp).flatten(), 9),
np.round(np.interp(xData, xpData, fpData), 9))
####################################################################################
def test_intersect1d():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.intersect1d(cArray1, cArray2).getNumpyArray().flatten(), np.intersect1d(data1, data2))
####################################################################################
def test_invert():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.invert(cArray).getNumpyArray(), np.invert(data))
####################################################################################
def test_isclose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.rand(shape.rows, shape.cols)
data2 = data1 + np.random.randn(shape.rows, shape.cols) * 1e-5
cArray1.setArray(data1)
cArray2.setArray(data2)
rtol = 1e-5
atol = 1e-8
assert np.array_equal(NumCpp.isclose(cArray1, cArray2, rtol, atol).getNumpyArray(),
np.isclose(data1, data2, rtol=rtol, atol=atol))
####################################################################################
def test_isinf():
value = np.random.randn(1).item() * 100 + 1000
assert NumCpp.isinfScaler(value) == np.isinf(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data[data > 1000] = np.inf
cArray.setArray(data)
assert np.array_equal(NumCpp.isinfArray(cArray), np.isinf(data))
####################################################################################
def test_isnan():
value = np.random.randn(1).item() * 100 + 1000
assert NumCpp.isnanScaler(value) == np.isnan(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
data[data > 1000] = np.nan
cArray.setArray(data)
assert np.array_equal(NumCpp.isnanArray(cArray), np.isnan(data))
####################################################################################
def test_lcm():
if not NumCpp.NUMCPP_NO_USE_BOOST or NumCpp.STL_GCD_LCM:
value1 = np.random.randint(1, 1000, [1, ]).item()
value2 = np.random.randint(1, 1000, [1, ]).item()
assert NumCpp.lcmScaler(value1, value2) == np.lcm(value1, value2)
if not NumCpp.NUMCPP_NO_USE_BOOST:
size = np.random.randint(2, 10, [1, ]).item()
cArray = NumCpp.NdArrayUInt32(1, size)
data = np.random.randint(1, 100, [size, ], dtype=np.uint32)
cArray.setArray(data)
assert NumCpp.lcmArray(cArray) == np.lcm.reduce(data) # noqa
####################################################################################
def test_ldexp():
value1 = np.random.randn(1).item() * 100
value2 = np.random.randint(1, 20, [1, ]).item()
assert np.round(NumCpp.ldexpScaler(value1, value2), 9) == np.round(np.ldexp(value1, value2), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayUInt8(shape)
data1 = np.random.randn(shape.rows, shape.cols) * 100
data2 = np.random.randint(1, 20, [shape.rows, shape.cols], dtype=np.uint8)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(np.round(NumCpp.ldexpArray(cArray1, cArray2), 9), np.round(np.ldexp(data1, data2), 9))
####################################################################################
def test_left_shift():
shapeInput = np.random.randint(20, 100, [2, ])
bitsToshift = np.random.randint(1, 32, [1, ]).item()
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, np.iinfo(np.uint32).max, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.left_shift(cArray, bitsToshift).getNumpyArray(),
np.left_shift(data, bitsToshift))
####################################################################################
def test_less():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less(cArray1, cArray2).getNumpyArray(),
np.less(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less(cArray1, cArray2).getNumpyArray(),
np.less(data1, data2))
####################################################################################
def test_less_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less_equal(cArray1, cArray2).getNumpyArray(),
np.less_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.less_equal(cArray1, cArray2).getNumpyArray(),
np.less_equal(data1, data2))
####################################################################################
def test_load():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
tempFile = os.path.join(tempDir, 'NdArrayDump.bin')
NumCpp.dump(cArray, tempFile)
assert os.path.isfile(tempFile)
data2 = NumCpp.load(tempFile).reshape(shape)
assert np.array_equal(data, data2)
os.remove(tempFile)
####################################################################################
def test_linspace():
start = np.random.randint(1, 10, [1, ]).item()
end = np.random.randint(start + 10, 100, [1, ]).item()
numPoints = np.random.randint(1, 100, [1, ]).item()
assert np.array_equal(np.round(NumCpp.linspace(start, end, numPoints, True).getNumpyArray().flatten(), 9),
np.round(np.linspace(start, end, numPoints, endpoint=True), 9))
start = np.random.randint(1, 10, [1, ]).item()
end = np.random.randint(start + 10, 100, [1, ]).item()
numPoints = np.random.randint(1, 100, [1, ]).item()
assert np.array_equal(np.round(NumCpp.linspace(start, end, numPoints, False).getNumpyArray().flatten(), 9),
np.round(np.linspace(start, end, numPoints, endpoint=False), 9))
####################################################################################
def test_log():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.logScaler(value), 9) == np.round(np.log(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.logScaler(value), 9) == np.round(np.log(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.logArray(cArray), 9), np.round(np.log(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.logArray(cArray), 9), np.round(np.log(data), 9))
####################################################################################
def test_log10():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log10Scaler(value), 9) == np.round(np.log10(value), 9)
components = np.random.randn(2).astype(np.double) * 100 + 100
value = complex(components[0], components[1])
assert np.round(NumCpp.log10Scaler(value), 9) == np.round(np.log10(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log10Array(cArray), 9), np.round(np.log10(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log10Array(cArray), 9), np.round(np.log10(data), 9))
####################################################################################
def test_log1p():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log1pScaler(value), 9) == np.round(np.log1p(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log1pArray(cArray), 9), np.round(np.log1p(data), 9))
####################################################################################
def test_log2():
value = np.random.randn(1).item() * 100 + 1000
assert np.round(NumCpp.log2Scaler(value), 9) == np.round(np.log2(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100 + 1000
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.log2Array(cArray), 9), np.round(np.log2(data), 9))
####################################################################################
def test_logical_and():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_and(cArray1, cArray2).getNumpyArray(), np.logical_and(data1, data2))
####################################################################################
def test_logical_not():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.logical_not(cArray).getNumpyArray(), np.logical_not(data))
####################################################################################
def test_logical_or():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_or(cArray1, cArray2).getNumpyArray(), np.logical_or(data1, data2))
####################################################################################
def test_logical_xor():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 20, [shape.rows, shape.cols])
data2 = np.random.randint(0, 20, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.logical_xor(cArray1, cArray2).getNumpyArray(), np.logical_xor(data1, data2))
####################################################################################
def test_matmul():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 20, [shape1.rows, shape1.cols])
data2 = np.random.randint(0, 20, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
data1 = np.random.randint(0, 20, [shape1.rows, shape1.cols])
real2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArrayComplexDouble(shape2)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
imag2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape1)
cArray2 = NumCpp.NdArray(shape2)
real1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
imag1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(0, 20, [shape2.rows, shape2.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.matmul(cArray1, cArray2), np.matmul(data1, data2))
####################################################################################
def test_max():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.max(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.max(cArray, NumCpp.Axis.NONE).item() == np.max(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.ROW).flatten(),
np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.ROW).flatten(),
np.max(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.COL).flatten(),
np.max(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.max(cArray, NumCpp.Axis.COL).flatten(),
np.max(data, axis=1))
####################################################################################
def test_maximum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.maximum(cArray1, cArray2), np.maximum(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.maximum(cArray1, cArray2), np.maximum(data1, data2))
####################################################################################
def test_mean():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.mean(cArray, NumCpp.Axis.NONE).getNumpyArray().item(), 9) == \
np.round(np.mean(data, axis=None).item(), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.round(NumCpp.mean(cArray, NumCpp.Axis.NONE).getNumpyArray().item(), 9) == \
np.round(np.mean(data, axis=None).item(), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.mean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.mean(data, axis=1), 9))
####################################################################################
def test_median():
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) # noqa
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.median(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item() == np.median(data, axis=None).item()
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.median(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.median(data, axis=0))
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.median(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.median(data, axis=1))
####################################################################################
def test_meshgrid():
start = np.random.randint(0, 20, [1, ]).item()
end = np.random.randint(30, 100, [1, ]).item()
step = np.random.randint(1, 5, [1, ]).item()
dataI = np.arange(start, end, step)
iSlice = NumCpp.Slice(start, end, step)
start = np.random.randint(0, 20, [1, ]).item()
end = np.random.randint(30, 100, [1, ]).item()
step = np.random.randint(1, 5, [1, ]).item()
dataJ = np.arange(start, end, step)
jSlice = NumCpp.Slice(start, end, step)
iMesh, jMesh = np.meshgrid(dataI, dataJ)
iMeshC, jMeshC = NumCpp.meshgrid(iSlice, jSlice)
assert np.array_equal(iMeshC.getNumpyArray(), iMesh)
assert np.array_equal(jMeshC.getNumpyArray(), jMesh)
####################################################################################
def test_min():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.min(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.min(cArray, NumCpp.Axis.NONE).item() == np.min(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.ROW).flatten(),
np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.ROW).flatten(),
np.min(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.COL).flatten(),
np.min(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.min(cArray, NumCpp.Axis.COL).flatten(),
np.min(data, axis=1))
####################################################################################
def test_minimum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(0, 100, [shape.rows, shape.cols])
data2 = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.minimum(cArray1, cArray2), np.minimum(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.minimum(cArray1, cArray2), np.minimum(data1, data2))
####################################################################################
def test_mod():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.mod(cArray1, cArray2).getNumpyArray(), np.mod(data1, data2))
####################################################################################
def test_multiply():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.multiply(cArray1, cArray2), data1 * data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(cArray, value), data * value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.multiply(value, cArray), data * value)
####################################################################################
def test_nan_to_num():
shapeInput = np.random.randint(50, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.size(), ]).astype(np.double)
nan_idx = np.random.choice(range(data.size), 10, replace=False)
pos_inf_idx = np.random.choice(range(data.size), 10, replace=False)
neg_inf_idx = np.random.choice(range(data.size), 10, replace=False)
data[nan_idx] = np.nan
data[pos_inf_idx] = np.inf
data[neg_inf_idx] = -np.inf
data = data.reshape(shapeInput)
cArray.setArray(data)
nan_replace = float(np.random.randint(100))
pos_inf_replace = float(np.random.randint(100))
neg_inf_replace = float(np.random.randint(100))
assert np.array_equal(NumCpp.nan_to_num(cArray, nan_replace, pos_inf_replace, neg_inf_replace),
np.nan_to_num(data, nan=nan_replace, posinf=pos_inf_replace, neginf=neg_inf_replace))
####################################################################################
def test_nanargmax():
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanargmax(cArray, NumCpp.Axis.NONE).item() == np.nanargmax(data)
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmax(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanargmax(data, axis=0))
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmax(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanargmax(data, axis=1))
####################################################################################
def test_nanargmin():
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanargmin(cArray, NumCpp.Axis.NONE).item() == np.nanargmin(data)
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmin(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanargmin(data, axis=0))
shapeInput = np.random.randint(10, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanargmin(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanargmin(data, axis=1))
####################################################################################
def test_nancumprod():
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumprod(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.nancumprod(data, axis=None))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumprod(cArray, NumCpp.Axis.ROW).getNumpyArray(), np.nancumprod(data, axis=0))
shapeInput = np.random.randint(1, 5, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 4, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumprod(cArray, NumCpp.Axis.COL).getNumpyArray(), np.nancumprod(data, axis=1))
####################################################################################
def test_nancumsum():
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumsum(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(),
np.nancumsum(data, axis=None))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumsum(cArray, NumCpp.Axis.ROW).getNumpyArray(), np.nancumsum(data, axis=0))
shapeInput = np.random.randint(1, 50, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nancumsum(cArray, NumCpp.Axis.COL).getNumpyArray(), np.nancumsum(data, axis=1))
####################################################################################
def test_nanmax():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanmax(cArray, NumCpp.Axis.NONE).item() == np.nanmax(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmax(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanmax(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmax(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanmax(data, axis=1))
####################################################################################
def test_nanmean():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanmean(cArray, NumCpp.Axis.NONE).item() == np.nanmean(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmean(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanmean(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmean(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanmean(data, axis=1))
####################################################################################
def test_nanmedian():
isEven = True
while isEven:
shapeInput = np.random.randint(20, 100, [2, ])
isEven = shapeInput.prod().item() % 2 == 0
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item()) # noqa
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert (NumCpp.nanmedian(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item() ==
np.nanmedian(data, axis=None).item())
# isEven = True
# while isEven:
# shapeInput = np.random.randint(20, 100, [2, ])
# isEven = shapeInput[0].item() % 2 == 0
# shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
# cArray = NumCpp.NdArray(shape)
# data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
# data = data.flatten()
# data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
# data = data.reshape(shapeInput)
# cArray.setArray(data)
# assert np.array_equal(NumCpp.nanmedian(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
# np.nanmedian(data, axis=0))
#
# isEven = True
# while isEven:
# shapeInput = np.random.randint(20, 100, [2, ])
# isEven = shapeInput[1].item() % 2 == 0
# shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
# cArray = NumCpp.NdArray(shape)
# data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
# data = data.flatten()
# data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
# data = data.reshape(shapeInput)
# cArray.setArray(data)
# assert np.array_equal(NumCpp.nanmedian(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
# np.nanmedian(data, axis=1))
####################################################################################
def test_nanmin():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanmin(cArray, NumCpp.Axis.NONE).item() == np.nanmin(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmin(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(),
np.nanmin(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanmin(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(),
np.nanmin(data, axis=1))
####################################################################################
def test_nanpercentile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'lower').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'higher').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'nearest').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'midpoint').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='midpoint'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.NONE, 'linear').item() ==
np.nanpercentile(data, percentile, axis=None, interpolation='linear'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.ROW, 'lower').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=0, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.ROW, 'higher').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=0, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.ROW,
'nearest').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=0, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(
np.round(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.ROW,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=0, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.ROW,
'linear').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=0, interpolation='linear'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.COL, 'lower').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=1, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.COL, 'higher').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=1, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.COL,
'nearest').getNumpyArray().flatten(),
np.nanpercentile(data, percentile, axis=1, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(
np.round(NumCpp.nanpercentile(cArray,
percentile,
NumCpp.Axis.COL,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=1, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(
np.round(NumCpp.nanpercentile(cArray, percentile, NumCpp.Axis.COL, 'linear').getNumpyArray().flatten(), 9),
np.round(np.nanpercentile(data, percentile, axis=1, interpolation='linear'), 9))
####################################################################################
def test_nanprod():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nanprod(cArray, NumCpp.Axis.NONE).item() == np.nanprod(data, axis=None)
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanprod(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.nanprod(data, axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nanprod(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.nanprod(data, axis=1))
####################################################################################
def test_nans():
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.nansSquare(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(np.isnan(cArray)))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.nansRowCol(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(np.isnan(cArray)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.nansShape(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(np.isnan(cArray)))
####################################################################################
def test_nans_like():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.nans_like(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(np.isnan(cArray2.getNumpyArray())))
####################################################################################
def test_nanstd():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.round(NumCpp.nanstdev(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.nanstd(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanstdev(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.nanstd(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanstdev(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.nanstd(data, axis=1), 9))
####################################################################################
def test_nansum():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert NumCpp.nansum(cArray, NumCpp.Axis.NONE).item() == np.nansum(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nansum(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.nansum(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(NumCpp.nansum(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.nansum(data, axis=1))
####################################################################################
def test_nanvar():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.round(NumCpp.nanvar(cArray, NumCpp.Axis.NONE).item(), 8) == np.round(np.nanvar(data), 8)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanvar(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 8),
np.round(np.nanvar(data, axis=0), 8))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
data = data.flatten()
data[np.random.randint(0, shape.size(), [shape.size() // 10, ])] = np.nan
data = data.reshape(shapeInput)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.nanvar(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 8),
np.round(np.nanvar(data, axis=1), 8))
####################################################################################
def test_nbytes():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.nbytes(cArray) == data.size * 8
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.nbytes(cArray) == data.size * 16
####################################################################################
def test_negative():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.negative(cArray).getNumpyArray(), 9),
np.round(np.negative(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.negative(cArray).getNumpyArray(), 9),
np.round(np.negative(data), 9))
####################################################################################
def test_newbyteorderArray():
value = np.random.randint(1, 100, [1, ]).item()
assert (NumCpp.newbyteorderScaler(value, NumCpp.Endian.BIG) ==
np.asarray([value], dtype=np.uint32).newbyteorder().item())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.newbyteorderArray(cArray, NumCpp.Endian.BIG),
data.newbyteorder())
####################################################################################
def test_none():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.none(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.logical_not(np.any(data).item())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.none(cArray, NumCpp.Axis.NONE).astype(bool).item() == np.logical_not(np.any(data).item())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.ROW).flatten().astype(bool),
np.logical_not(np.any(data, axis=0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.ROW).flatten().astype(bool),
np.logical_not(np.any(data, axis=0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.COL).flatten().astype(bool),
np.logical_not(np.any(data, axis=1)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.none(cArray, NumCpp.Axis.COL).flatten().astype(bool),
np.logical_not(np.any(data, axis=1)))
####################################################################################
def test_nonzero():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
row, col = np.nonzero(data)
rowC, colC = NumCpp.nonzero(cArray)
assert (np.array_equal(rowC.getNumpyArray().flatten(), row) and
np.array_equal(colC.getNumpyArray().flatten(), col))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
row, col = np.nonzero(data)
rowC, colC = NumCpp.nonzero(cArray)
assert (np.array_equal(rowC.getNumpyArray().flatten(), row) and
np.array_equal(colC.getNumpyArray().flatten(), col))
####################################################################################
def test_norm():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.norm(cArray, NumCpp.Axis.NONE).flatten() == np.linalg.norm(data.flatten())
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.norm(cArray, NumCpp.Axis.NONE).item() is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
norms = NumCpp.norm(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten()
allPass = True
for idx, row in enumerate(data.transpose()):
if norms[idx] != np.linalg.norm(row):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
norms = NumCpp.norm(cArray, NumCpp.Axis.COL).getNumpyArray().flatten()
allPass = True
for idx, row in enumerate(data):
if norms[idx] != np.linalg.norm(row):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
norms = NumCpp.norm(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten()
assert norms is not None
####################################################################################
def test_not_equal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.not_equal(cArray1, cArray2).getNumpyArray(), np.not_equal(data1, data2))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.not_equal(cArray1, cArray2).getNumpyArray(), np.not_equal(data1, data2))
####################################################################################
def test_ones():
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.onesSquare(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(cArray == 1))
shapeInput = np.random.randint(1, 100, [1, ]).item()
cArray = NumCpp.onesSquareComplex(shapeInput)
assert (cArray.shape[0] == shapeInput and cArray.shape[1] == shapeInput and
cArray.size == shapeInput ** 2 and np.all(cArray == complex(1, 0)))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.onesRowCol(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == 1))
shapeInput = np.random.randint(20, 100, [2, ])
cArray = NumCpp.onesRowColComplex(shapeInput[0].item(), shapeInput[1].item())
assert (cArray.shape[0] == shapeInput[0] and cArray.shape[1] == shapeInput[1] and
cArray.size == shapeInput.prod() and np.all(cArray == complex(1, 0)))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.onesShape(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == 1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.onesShapeComplex(shape)
assert (cArray.shape[0] == shape.rows and cArray.shape[1] == shape.cols and
cArray.size == shapeInput.prod() and np.all(cArray == complex(1, 0)))
####################################################################################
def test_ones_like():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.ones_like(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == 1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.ones_likeComplex(cArray1)
assert (cArray2.shape().rows == shape.rows and cArray2.shape().cols == shape.cols and
cArray2.size() == shapeInput.prod() and np.all(cArray2.getNumpyArray() == complex(1, 0)))
####################################################################################
def test_outer():
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.outer(cArray1, cArray2), np.outer(data1, data2))
size = np.random.randint(1, 100, [1, ]).item()
shape = NumCpp.Shape(1, size)
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 50, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 50, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 50, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.outer(cArray1, cArray2), np.outer(data1, data2))
####################################################################################
def test_pad():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
padWidth = np.random.randint(1, 10, [1, ]).item()
padValue = np.random.randint(1, 100, [1, ]).item()
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray = NumCpp.NdArray(shape)
cArray.setArray(data)
assert np.array_equal(NumCpp.pad(cArray, padWidth, padValue).getNumpyArray(),
np.pad(data, padWidth, mode='constant', constant_values=padValue))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
padWidth = np.random.randint(1, 10, [1, ]).item()
padValue = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray = NumCpp.NdArrayComplexDouble(shape)
cArray.setArray(data)
assert np.array_equal(NumCpp.pad(cArray, padWidth, padValue).getNumpyArray(),
np.pad(data, padWidth, mode='constant', constant_values=padValue))
####################################################################################
def test_partition():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput.prod(), [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.NONE).getNumpyArray().flatten()
assert (np.all(partitionedArray[kthElement] <= partitionedArray[kthElement]) and
np.all(partitionedArray[kthElement:] >= partitionedArray[kthElement]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput.prod(), [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.NONE).getNumpyArray().flatten()
assert (np.all(partitionedArray[kthElement] <= partitionedArray[kthElement]) and
np.all(partitionedArray[kthElement:] >= partitionedArray[kthElement]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[0], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.ROW).getNumpyArray().transpose()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[0], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.ROW).getNumpyArray().transpose()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[1], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.COL).getNumpyArray()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
kthElement = np.random.randint(0, shapeInput[1], [1, ], dtype=np.uint32).item()
partitionedArray = NumCpp.partition(cArray, kthElement, NumCpp.Axis.COL).getNumpyArray()
allPass = True
for row in partitionedArray:
if not (np.all(row[kthElement] <= row[kthElement]) and
np.all(row[kthElement:] >= row[kthElement])):
allPass = False
break
assert allPass
####################################################################################
def test_percentile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'lower').item() ==
np.percentile(data, percentile, axis=None, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'higher').item() ==
np.percentile(data, percentile, axis=None, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'nearest').item() ==
np.percentile(data, percentile, axis=None, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'midpoint').item() ==
np.percentile(data, percentile, axis=None, interpolation='midpoint'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert (NumCpp.percentile(cArray, percentile, NumCpp.Axis.NONE, 'linear').item() ==
np.percentile(data, percentile, axis=None, interpolation='linear'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'lower').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'higher').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.ROW, 'nearest').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=0, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.ROW,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=0, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.ROW,
'linear').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=0, interpolation='linear'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'lower').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='lower'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'higher').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='higher'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(NumCpp.percentile(cArray, percentile, NumCpp.Axis.COL, 'nearest').getNumpyArray().flatten(),
np.percentile(data, percentile, axis=1, interpolation='nearest'))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.COL,
'midpoint').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=1, interpolation='midpoint'), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
percentile = np.random.rand(1).item() * 100
assert np.array_equal(np.round(NumCpp.percentile(cArray,
percentile,
NumCpp.Axis.COL,
'linear').getNumpyArray().flatten(), 9),
np.round(np.percentile(data, percentile, axis=1, interpolation='linear'), 9))
####################################################################################
def test_polar():
components = np.random.rand(2).astype(np.double)
assert NumCpp.polarScaler(components[0], components[1])
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
magArray = NumCpp.NdArray(shape)
angleArray = NumCpp.NdArray(shape)
mag = np.random.rand(shape.rows, shape.cols)
angle = np.random.rand(shape.rows, shape.cols)
magArray.setArray(mag)
angleArray.setArray(angle)
assert NumCpp.polarArray(magArray, angleArray) is not None
####################################################################################
def test_power():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponent = np.random.randint(0, 5, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponent = np.random.randint(0, 5, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cExponents = NumCpp.NdArrayUInt8(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponents = np.random.randint(0, 5, [shape.rows, shape.cols]).astype(np.uint8)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cExponents = NumCpp.NdArrayUInt8(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponents = np.random.randint(0, 5, [shape.rows, shape.cols]).astype(np.uint8)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
####################################################################################
def test_powerf():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
exponent = np.random.rand(1).item() * 3
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerfArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponent = np.random.rand(1).item() * 3
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.powerfArrayScaler(cArray, exponent), 9),
np.round(np.power(data, exponent), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
cExponents = NumCpp.NdArray(shape)
data = np.random.randint(0, 20, [shape.rows, shape.cols])
exponents = np.random.rand(shape.rows, shape.cols) * 3
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerfArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
cExponents = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
exponents = np.random.rand(shape.rows, shape.cols) * 3 + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
cExponents.setArray(exponents)
assert np.array_equal(np.round(NumCpp.powerfArrayArray(cArray, cExponents), 9),
np.round(np.power(data, exponents), 9))
####################################################################################
def test_prod():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert NumCpp.prod(cArray, NumCpp.Axis.NONE).item() == data.prod()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.prod(cArray, NumCpp.Axis.NONE).item() == data.prod()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), data.prod(axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), data.prod(axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), data.prod(axis=1))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 15, [shape.rows, shape.cols])
imag = np.random.randint(1, 15, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.prod(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), data.prod(axis=1))
####################################################################################
def test_proj():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert NumCpp.projScaler(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cData = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cData.setArray(data)
assert NumCpp.projArray(cData) is not None
####################################################################################
def test_ptp():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.ptp(cArray, NumCpp.Axis.NONE).getNumpyArray().item() == data.ptp()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.ptp(cArray, NumCpp.Axis.NONE).getNumpyArray().item() == data.ptp()
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.ptp(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten().astype(np.uint32),
data.ptp(axis=0))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 15, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.ptp(cArray, NumCpp.Axis.COL).getNumpyArray().flatten().astype(np.uint32),
data.ptp(axis=1))
####################################################################################
def test_put():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
numIndices = np.random.randint(0, shape.size())
indices = np.asarray(range(numIndices), np.uint32)
value = np.random.randint(1, 500)
cIndices = NumCpp.NdArrayUInt32(1, numIndices)
cIndices.setArray(indices)
NumCpp.put(cArray, cIndices, value)
data.put(indices, value)
assert np.array_equal(cArray.getNumpyArray(), data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 50, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
numIndices = np.random.randint(0, shape.size())
indices = np.asarray(range(numIndices), dtype=np.uint32)
values = np.random.randint(1, 500, [numIndices, ])
cIndices = NumCpp.NdArrayUInt32(1, numIndices)
cValues = NumCpp.NdArray(1, numIndices)
cIndices.setArray(indices)
cValues.setArray(values)
NumCpp.put(cArray, cIndices, cValues)
data.put(indices, values)
assert np.array_equal(cArray.getNumpyArray(), data)
####################################################################################
def test_rad2deg():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert np.round(NumCpp.rad2degScaler(value), 9) == np.round(np.rad2deg(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rad2degArray(cArray), 9), np.round(np.rad2deg(data), 9))
####################################################################################
def test_radians():
value = np.abs(np.random.rand(1).item()) * 360
assert np.round(NumCpp.radiansScaler(value), 9) == np.round(np.radians(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 360
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.radiansArray(cArray), 9), np.round(np.radians(data), 9))
####################################################################################
def test_ravel():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
cArray2 = NumCpp.ravel(cArray)
assert np.array_equal(cArray2.getNumpyArray().flatten(), np.ravel(data))
####################################################################################
def test_real():
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.realScaler(value), 9) == np.round(np.real(value), 9) # noqa
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.realArray(cArray), 9), np.round(np.real(data), 9))
####################################################################################
def test_reciprocal():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.reciprocal(cArray), 9), np.round(np.reciprocal(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.double)
imag = np.random.randint(1, 100, [shape.rows, shape.cols]).astype(np.double)
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.reciprocal(cArray), 9), np.round(np.reciprocal(data), 9))
####################################################################################
def test_remainder():
# numpy and cmath remainders are calculated differently, so convert for testing purposes
values = np.random.rand(2) * 100
values = np.sort(values)
res = NumCpp.remainderScaler(values[1].item(), values[0].item())
if res < 0:
res += values[0].item()
assert np.round(res, 9) == np.round(np.remainder(values[1], values[0]), 9)
# numpy and cmath remainders are calculated differently, so convert for testing purposes
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.rand(shape.rows, shape.cols) * 100 + 10
data2 = data1 - np.random.rand(shape.rows, shape.cols) * 10
cArray1.setArray(data1)
cArray2.setArray(data2)
res = NumCpp.remainderArray(cArray1, cArray2)
res[res < 0] = res[res < 0] + data2[res < 0]
assert np.array_equal(np.round(res, 9), np.round(np.remainder(data1, data2), 9))
####################################################################################
def test_replace():
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
oldValue = np.random.randint(1, 100, 1).item()
newValue = np.random.randint(1, 100, 1).item()
dataCopy = data.copy()
dataCopy[dataCopy == oldValue] = newValue
assert np.array_equal(NumCpp.replace(cArray, oldValue, newValue), dataCopy)
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
oldValue = np.random.randint(1, 100, 1).item() + 1j * np.random.randint(1, 100, 1).item()
newValue = np.random.randint(1, 100, 1).item() + 1j * np.random.randint(1, 100, 1).item()
dataCopy = data.copy()
dataCopy[dataCopy == oldValue] = newValue
assert np.array_equal(NumCpp.replace(cArray, oldValue, newValue), dataCopy)
####################################################################################
def test_reshape():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newShape = data.size
NumCpp.reshape(cArray, newShape)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(1, newShape))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newShape = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
NumCpp.reshape(cArray, newShape)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(shapeInput[::-1]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newShape = NumCpp.Shape(shapeInput[1].item(), shapeInput[0].item())
NumCpp.reshapeList(cArray, newShape)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(shapeInput[::-1]))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newNumCols = np.random.choice(np.array(list(factors(data.size))), 1).item()
NumCpp.reshape(cArray, -1, newNumCols)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(-1, newNumCols))
shapeInput = np.random.randint(1, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
newNumRows = np.random.choice(np.array(list(factors(data.size))), 1).item()
NumCpp.reshape(cArray, newNumRows, -1)
assert np.array_equal(cArray.getNumpyArray(), data.reshape(newNumRows, -1))
####################################################################################
def test_resize():
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput2 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput2[0].item(), shapeInput2[1].item())
cArray = NumCpp.NdArray(shape1)
data = np.random.randint(1, 100, [shape1.rows, shape1.cols], dtype=np.uint32)
cArray.setArray(data)
NumCpp.resizeFast(cArray, shape2)
assert cArray.shape().rows == shape2.rows
assert cArray.shape().cols == shape2.cols
shapeInput1 = np.random.randint(1, 100, [2, ])
shapeInput2 = np.random.randint(1, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput2[0].item(), shapeInput2[1].item())
cArray = NumCpp.NdArray(shape1)
data = np.random.randint(1, 100, [shape1.rows, shape1.cols], dtype=np.uint32)
cArray.setArray(data)
NumCpp.resizeSlow(cArray, shape2)
assert cArray.shape().rows == shape2.rows
assert cArray.shape().cols == shape2.cols
####################################################################################
def test_right_shift():
shapeInput = np.random.randint(20, 100, [2, ])
bitsToshift = np.random.randint(1, 32, [1, ]).item()
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayUInt32(shape)
data = np.random.randint(1, np.iinfo(np.uint32).max, [shape.rows, shape.cols], dtype=np.uint32)
cArray.setArray(data)
assert np.array_equal(NumCpp.right_shift(cArray, bitsToshift).getNumpyArray(),
np.right_shift(data, bitsToshift))
####################################################################################
def test_rint():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert NumCpp.rintScaler(value) == np.rint(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(NumCpp.rintArray(cArray), np.rint(data))
####################################################################################
def test_rms():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert (np.round(NumCpp.rms(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item(), 9) ==
np.round(np.sqrt(np.mean(np.square(data), axis=None)).item(), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert (np.round(NumCpp.rms(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten().item(), 9) ==
np.round(np.sqrt(np.mean(np.square(data), axis=None)).item(), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=0)), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=0)), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=1)), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.rms(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.sqrt(np.mean(np.square(data), axis=1)), 9))
####################################################################################
def test_roll():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(0, data.size, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.roll(cArray, amount, NumCpp.Axis.NONE).getNumpyArray(),
np.roll(data, amount, axis=None))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(0, shape.cols, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.roll(cArray, amount, NumCpp.Axis.ROW).getNumpyArray(),
np.roll(data, amount, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(0, shape.rows, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.roll(cArray, amount, NumCpp.Axis.COL).getNumpyArray(),
np.roll(data, amount, axis=1))
####################################################################################
def test_rot90():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
amount = np.random.randint(1, 4, [1, ]).item()
cArray.setArray(data)
assert np.array_equal(NumCpp.rot90(cArray, amount).getNumpyArray(), np.rot90(data, amount))
####################################################################################
def test_round():
value = np.abs(np.random.rand(1).item()) * 2 * np.pi
assert NumCpp.roundScaler(value, 10) == np.round(value, 10)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols) * 2 * np.pi
cArray.setArray(data)
assert np.array_equal(NumCpp.roundArray(cArray, 9), np.round(data, 9))
####################################################################################
def test_row_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shape2 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape3 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
shape4 = NumCpp.Shape(shapeInput[0].item() + np.random.randint(1, 10, [1, ]).item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
cArray3 = NumCpp.NdArray(shape3)
cArray4 = NumCpp.NdArray(shape4)
data1 = np.random.randint(1, 100, [shape1.rows, shape1.cols])
data2 = np.random.randint(1, 100, [shape2.rows, shape2.cols])
data3 = np.random.randint(1, 100, [shape3.rows, shape3.cols])
data4 = np.random.randint(1, 100, [shape4.rows, shape4.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.row_stack(cArray1, cArray2, cArray3, cArray4),
np.row_stack([data1, data2, data3, data4]))
####################################################################################
def test_setdiff1d():
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayUInt32(shape)
cArray2 = NumCpp.NdArrayUInt32(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
data2 = np.random.randint(1, 100, [shape.rows, shape.cols], dtype=np.uint32)
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.setdiff1d(cArray1, cArray2).getNumpyArray().flatten(),
np.setdiff1d(data1, data2))
shapeInput = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.setdiff1d(cArray1, cArray2).getNumpyArray().flatten(),
np.setdiff1d(data1, data2))
####################################################################################
def test_shape():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert cArray.shape().rows == shape.rows and cArray.shape().cols == shape.cols
####################################################################################
def test_sign():
value = np.random.randn(1).item() * 100
assert NumCpp.signScaler(value) == np.sign(value)
value = np.random.randn(1).item() * 100 + 1j * np.random.randn(1).item() * 100
assert NumCpp.signScaler(value) == np.sign(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.signArray(cArray), np.sign(data))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.signArray(cArray), np.sign(data))
####################################################################################
def test_signbit():
value = np.random.randn(1).item() * 100
assert NumCpp.signbitScaler(value) == np.signbit(value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols) * 100
cArray.setArray(data)
assert np.array_equal(NumCpp.signbitArray(cArray), np.signbit(data))
####################################################################################
def test_sin():
value = np.random.randn(1).item()
assert np.round(NumCpp.sinScaler(value), 9) == np.round(np.sin(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.sinScaler(value), 9) == np.round(np.sin(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinArray(cArray), 9), np.round(np.sin(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinArray(cArray), 9), np.round(np.sin(data), 9))
####################################################################################
def test_sinc():
value = np.random.randn(1)
assert np.round(NumCpp.sincScaler(value.item()), 9) == np.round(np.sinc(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sincArray(cArray), 9), np.round(np.sinc(data), 9))
####################################################################################
def test_sinh():
value = np.random.randn(1).item()
assert np.round(NumCpp.sinhScaler(value), 9) == np.round(np.sinh(value), 9)
value = np.random.randn(1).item() + 1j * np.random.randn(1).item()
assert np.round(NumCpp.sinhScaler(value), 9) == np.round(np.sinh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinhArray(cArray), 9), np.round(np.sinh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.randn(shape.rows, shape.cols) + 1j * np.random.randn(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sinhArray(cArray), 9), np.round(np.sinh(data), 9))
####################################################################################
def test_size():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
assert cArray.size() == shapeInput.prod().item()
####################################################################################
def test_sort():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
d = data.flatten()
d.sort()
assert np.array_equal(NumCpp.sort(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(), d)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
d = data.flatten()
d.sort()
assert np.array_equal(NumCpp.sort(cArray, NumCpp.Axis.NONE).getNumpyArray().flatten(), d)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pSorted = np.sort(data, axis=0)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.ROW).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pSorted = np.sort(data, axis=0)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.ROW).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
pSorted = np.sort(data, axis=1)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.COL).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
pSorted = np.sort(data, axis=1)
cSorted = NumCpp.sort(cArray, NumCpp.Axis.COL).getNumpyArray()
assert np.array_equal(cSorted, pSorted)
####################################################################################
def test_sqrt():
value = np.random.randint(1, 100, [1, ]).item()
assert np.round(NumCpp.sqrtScaler(value), 9) == np.round(np.sqrt(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.sqrtScaler(value), 9) == np.round(np.sqrt(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sqrtArray(cArray), 9), np.round(np.sqrt(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.sqrtArray(cArray), 9), np.round(np.sqrt(data), 9))
####################################################################################
def test_square():
value = np.random.randint(1, 100, [1, ]).item()
assert np.round(NumCpp.squareScaler(value), 9) == np.round(np.square(value), 9)
value = np.random.randint(1, 100, [1, ]).item() + 1j * np.random.randint(1, 100, [1, ]).item()
assert np.round(NumCpp.squareScaler(value), 9) == np.round(np.square(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.squareArray(cArray), 9), np.round(np.square(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.squareArray(cArray), 9), np.round(np.square(data), 9))
####################################################################################
def test_stack():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
cArray4 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data3 = np.random.randint(1, 100, [shape.rows, shape.cols])
data4 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.stack(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.ROW),
np.vstack([data1, data2, data3, data4]))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
cArray3 = NumCpp.NdArray(shape)
cArray4 = NumCpp.NdArray(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data3 = np.random.randint(1, 100, [shape.rows, shape.cols])
data4 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
cArray3.setArray(data3)
cArray4.setArray(data4)
assert np.array_equal(NumCpp.stack(cArray1, cArray2, cArray3, cArray4, NumCpp.Axis.COL),
np.hstack([data1, data2, data3, data4]))
####################################################################################
def test_stdev():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.round(NumCpp.stdev(cArray, NumCpp.Axis.NONE).item(), 9) == np.round(np.std(data), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.stdev(cArray, NumCpp.Axis.NONE).item() is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.stdev(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), 9),
np.round(np.std(data, axis=0), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.stdev(cArray, NumCpp.Axis.ROW) is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.stdev(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), 9),
np.round(np.std(data, axis=1), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.stdev(cArray, NumCpp.Axis.COL) is not None
####################################################################################
def test_subtract():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArray(shape)
data1 = np.random.randint(-100, 100, [shape.rows, shape.cols])
data2 = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArrayComplexDouble(shape)
cArray2 = NumCpp.NdArray(shape)
real1 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag1 = np.random.randint(1, 100, [shape.rows, shape.cols])
data1 = real1 + 1j * imag1
data2 = np.random.randint(1, 100, [shape.rows, shape.cols])
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray1 = NumCpp.NdArray(shape)
cArray2 = NumCpp.NdArrayComplexDouble(shape)
data1 = np.random.randint(1, 100, [shape.rows, shape.cols])
real2 = np.random.randint(1, 100, [shape.rows, shape.cols])
imag2 = np.random.randint(1, 100, [shape.rows, shape.cols])
data2 = real2 + 1j * imag2
cArray1.setArray(data1)
cArray2.setArray(data2)
assert np.array_equal(NumCpp.subtract(cArray1, cArray2), data1 - data2)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(-100, 100, [shape.rows, shape.cols])
cArray.setArray(data)
value = np.random.randint(-100, 100) + 1j * np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(cArray, value), data - value)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
value = np.random.randint(-100, 100)
assert np.array_equal(NumCpp.subtract(value, cArray), value - data)
####################################################################################
def test_sumo():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert NumCpp.sum(cArray, NumCpp.Axis.NONE).item() == np.sum(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert NumCpp.sum(cArray, NumCpp.Axis.NONE).item() == np.sum(data)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.sum(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.sum(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.sum(cArray, NumCpp.Axis.ROW).getNumpyArray().flatten(), np.sum(data, axis=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.sum(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.sum(data, axis=1))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
assert np.array_equal(NumCpp.sum(cArray, NumCpp.Axis.COL).getNumpyArray().flatten(), np.sum(data, axis=1))
####################################################################################
def test_swap():
shapeInput1 = np.random.randint(20, 100, [2, ])
shapeInput2 = np.random.randint(20, 100, [2, ])
shape1 = NumCpp.Shape(shapeInput1[0].item(), shapeInput1[1].item())
shape2 = NumCpp.Shape(shapeInput2[0].item(), shapeInput2[1].item())
cArray1 = NumCpp.NdArray(shape1)
cArray2 = NumCpp.NdArray(shape2)
data1 = np.random.randint(0, 100, [shape1.rows, shape1.cols]).astype(np.double)
data2 = np.random.randint(0, 100, [shape2.rows, shape2.cols]).astype(np.double)
cArray1.setArray(data1)
cArray2.setArray(data2)
NumCpp.swap(cArray1, cArray2)
assert (np.array_equal(cArray1.getNumpyArray(), data2) and
np.array_equal(cArray2.getNumpyArray(), data1))
####################################################################################
def test_swapaxes():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.swapaxes(cArray).getNumpyArray(), data.T)
####################################################################################
def test_tan():
value = np.random.rand(1).item() * np.pi
assert np.round(NumCpp.tanScaler(value), 9) == np.round(np.tan(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.tanScaler(value), 9) == np.round(np.tan(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.tanArray(cArray), 9), np.round(np.tan(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.tanArray(cArray), 9), np.round(np.tan(data), 9))
####################################################################################
def test_tanh():
value = np.random.rand(1).item() * np.pi
assert np.round(NumCpp.tanhScaler(value), 9) == np.round(np.tanh(value), 9)
components = np.random.rand(2).astype(np.double)
value = complex(components[0], components[1])
assert np.round(NumCpp.tanhScaler(value), 9) == np.round(np.tanh(value), 9)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.tanhArray(cArray), 9), np.round(np.tanh(data), 9))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
data = np.random.rand(shape.rows, shape.cols) + 1j * np.random.rand(shape.rows, shape.cols)
cArray.setArray(data)
assert np.array_equal(np.round(NumCpp.tanhArray(cArray), 9), np.round(np.tanh(data), 9))
####################################################################################
def test_tile():
shapeInput = np.random.randint(1, 10, [2, ])
shapeRepeat = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shapeR = NumCpp.Shape(shapeRepeat[0].item(), shapeRepeat[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.tileRectangle(cArray, shapeR.rows, shapeR.cols), np.tile(data, shapeRepeat))
shapeInput = np.random.randint(1, 10, [2, ])
shapeRepeat = np.random.randint(1, 10, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
shapeR = NumCpp.Shape(shapeRepeat[0].item(), shapeRepeat[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
assert np.array_equal(NumCpp.tileShape(cArray, shapeR), np.tile(data, shapeRepeat))
####################################################################################
def test_tofile():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
filename = os.path.join(tempDir, 'temp.bin')
NumCpp.tofile(cArray, filename, '')
assert os.path.exists(filename)
data2 = np.fromfile(filename, np.double).reshape(shapeInput)
assert np.array_equal(data, data2)
os.remove(filename)
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
tempDir = tempfile.gettempdir()
filename = os.path.join(tempDir, 'temp.txt')
NumCpp.tofile(cArray, filename, '\n')
assert os.path.exists(filename)
data2 = np.fromfile(filename, dtype=np.double, sep='\n').reshape(shapeInput)
assert np.array_equal(data, data2)
os.remove(filename)
####################################################################################
def test_toStlVector():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
out = np.asarray(NumCpp.toStlVector(cArray))
assert np.array_equal(out, data.flatten())
####################################################################################
def test_trace():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
offset = np.random.randint(0, shape.rows, [1, ]).item()
assert np.array_equal(NumCpp.trace(cArray, offset, NumCpp.Axis.ROW), data.trace(offset, axis1=1, axis2=0))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
offset = np.random.randint(0, shape.rows, [1, ]).item()
assert np.array_equal(NumCpp.trace(cArray, offset, NumCpp.Axis.COL), data.trace(offset, axis1=0, axis2=1))
####################################################################################
def test_transpose():
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
assert np.array_equal(NumCpp.transpose(cArray).getNumpyArray(), np.transpose(data))
####################################################################################
def test_trapz():
shape = NumCpp.Shape(np.random.randint(10, 20, [1, ]).item(), 1)
cArray = NumCpp.NdArray(shape)
coeffs = np.random.randint(0, 10, [2, ])
dx = np.random.rand(1).item()
data = np.array([x ** 2 - coeffs[0] * x + coeffs[1] for x in range(shape.size())])
cArray.setArray(data)
integralC = NumCpp.trapzDx(cArray, dx, NumCpp.Axis.NONE).item()
integralPy = np.trapz(data, dx=dx)
assert np.round(integralC, 8) == np.round(integralPy, 8)
shape = NumCpp.Shape(np.random.randint(10, 20, [1, ]).item(), np.random.randint(10, 20, [1, ]).item())
cArray = NumCpp.NdArray(shape)
coeffs = np.random.randint(0, 10, [2, ])
dx = np.random.rand(1).item()
data = np.array([x ** 2 - coeffs[0] * x - coeffs[1] for x in range(shape.size())]).reshape(shape.rows, shape.cols)
cArray.setArray(data)
integralC = NumCpp.trapzDx(cArray, dx, NumCpp.Axis.ROW).flatten()
integralPy = np.trapz(data, dx=dx, axis=0)
assert np.array_equal(np.round(integralC, 8), np.round(integralPy, 8))
shape = NumCpp.Shape(np.random.randint(10, 20, [1, ]).item(), np.random.randint(10, 20, [1, ]).item())
cArray = NumCpp.NdArray(shape)
coeffs = np.random.randint(0, 10, [2, ])
dx = np.random.rand(1).item()
data = np.array([x ** 2 - coeffs[0] * x - coeffs[1] for x in range(shape.size())]).reshape(shape.rows, shape.cols)
cArray.setArray(data)
integralC = NumCpp.trapzDx(cArray, dx, NumCpp.Axis.COL).flatten()
integralPy = np.trapz(data, dx=dx, axis=1)
assert np.array_equal(np.round(integralC, 8), np.round(integralPy, 8))
shape = NumCpp.Shape(1, np.random.randint(10, 20, [1, ]).item())
cArrayY = NumCpp.NdArray(shape)
cArrayX = NumCpp.NdArray(shape)
coeffs = np.random.randint(0, 10, [2, ])
dx = np.random.rand(shape.rows, shape.cols)
data = np.array([x ** 2 - coeffs[0] * x + coeffs[1] for x in range(shape.size())])
cArrayY.setArray(data)
cArrayX.setArray(dx)
integralC = NumCpp.trapz(cArrayY, cArrayX, NumCpp.Axis.NONE).item()
integralPy = np.trapz(data, x=dx)
assert np.round(integralC, 8) == np.round(integralPy, 8)
shape = NumCpp.Shape(np.random.randint(10, 20, [1, ]).item(), np.random.randint(10, 20, [1, ]).item())
cArrayY = NumCpp.NdArray(shape)
cArrayX = NumCpp.NdArray(shape)
coeffs = np.random.randint(0, 10, [2, ])
dx = np.random.rand(shape.rows, shape.cols)
data = np.array([x ** 2 - coeffs[0] * x + coeffs[1] for x in range(shape.size())]).reshape(shape.rows, shape.cols)
cArrayY.setArray(data)
cArrayX.setArray(dx)
integralC = NumCpp.trapz(cArrayY, cArrayX, NumCpp.Axis.ROW).flatten()
integralPy = np.trapz(data, x=dx, axis=0)
assert np.array_equal(np.round(integralC, 8), np.round(integralPy, 8))
shape = NumCpp.Shape(np.random.randint(10, 20, [1, ]).item(), np.random.randint(10, 20, [1, ]).item())
cArrayY = NumCpp.NdArray(shape)
cArrayX = NumCpp.NdArray(shape)
coeffs = np.random.randint(0, 10, [2, ])
dx = np.random.rand(shape.rows, shape.cols)
data = np.array([x ** 2 - coeffs[0] * x + coeffs[1] for x in range(shape.size())]).reshape(shape.rows, shape.cols)
cArrayY.setArray(data)
cArrayX.setArray(dx)
integralC = NumCpp.trapz(cArrayY, cArrayX, NumCpp.Axis.COL).flatten()
integralPy = np.trapz(data, x=dx, axis=1)
assert np.array_equal(np.round(integralC, 8), np.round(integralPy, 8))
####################################################################################
def test_tril():
squareSize = np.random.randint(10, 100, [1, ]).item()
offset = np.random.randint(0, squareSize // 2, [1, ]).item()
assert np.array_equal(NumCpp.trilSquare(squareSize, offset),
np.tri(squareSize, k=offset))
squareSize = np.random.randint(10, 100, [1, ]).item()
offset = np.random.randint(0, squareSize // 2, [1, ]).item()
assert np.array_equal(NumCpp.trilSquareComplex(squareSize, offset),
np.tri(squareSize, k=offset) + 1j * np.zeros([squareSize, squareSize]))
shapeInput = np.random.randint(10, 100, [2, ])
offset = np.random.randint(0, np.min(shapeInput) // 2, [1, ]).item()
assert np.array_equal(NumCpp.trilRect(shapeInput[0].item(), shapeInput[1].item(), offset),
np.tri(shapeInput[0].item(), shapeInput[1].item(), k=offset))
shapeInput = np.random.randint(10, 100, [2, ])
offset = np.random.randint(0, np.min(shapeInput) // 2, [1, ]).item()
assert np.array_equal(NumCpp.trilRectComplex(shapeInput[0].item(), shapeInput[1].item(), offset),
np.tri(shapeInput[0].item(), shapeInput[1].item(), k=offset) + 1j * np.zeros(shapeInput))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols])
cArray.setArray(data)
offset = np.random.randint(0, shape.rows // 2, [1, ]).item()
assert np.array_equal(NumCpp.trilArray(cArray, offset),
np.tril(data, k=offset))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, shape.rows // 2, [1, ]).item()
assert np.array_equal(NumCpp.trilArray(cArray, offset),
np.tril(data, k=offset))
####################################################################################
def test_triu():
squareSize = np.random.randint(10, 100, [1, ]).item()
offset = np.random.randint(0, squareSize // 2, [1, ]).item()
assert np.array_equal(NumCpp.triuSquare(squareSize, offset),
np.tri(squareSize, k=-offset).T)
squareSize = np.random.randint(10, 100, [1, ]).item()
offset = np.random.randint(0, squareSize // 2, [1, ]).item()
assert np.array_equal(NumCpp.triuSquareComplex(squareSize, offset),
np.tri(squareSize, k=-offset).T + 1j * np.zeros([squareSize, squareSize]))
shapeInput = np.random.randint(10, 100, [2, ])
offset = np.random.randint(0, np.min(shapeInput), [1, ]).item()
# NOTE: numpy triu appears to have a bug... just check that NumCpp runs without error
assert NumCpp.triuRect(shapeInput[0].item(), shapeInput[1].item(), offset) is not None
shapeInput = np.random.randint(10, 100, [2, ])
offset = np.random.randint(0, np.min(shapeInput), [1, ]).item()
# NOTE: numpy triu appears to have a bug... just check that NumCpp runs without error
assert NumCpp.triuRectComplex(shapeInput[0].item(), shapeInput[1].item(), offset) is not None
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArray(shape)
data = np.random.randint(0, 100, [shape.rows, shape.cols]).astype(np.double)
cArray.setArray(data)
offset = np.random.randint(0, shape.rows // 2, [1, ]).item()
assert np.array_equal(NumCpp.triuArray(cArray, offset), np.triu(data, k=offset))
shapeInput = np.random.randint(20, 100, [2, ])
shape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
cArray.setArray(data)
offset = np.random.randint(0, shape.rows // 2, [1, ]).item()
assert np.array_equal(NumCpp.triuArray(cArray, offset), np.triu(data, k=offset))
####################################################################################
def test_trim_zeros():
numElements = np.random.randint(50, 100, [1, ]).item()
offsetBeg = np.random.randint(0, 10, [1, ]).item()
offsetEnd = np.random.randint(10, numElements, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
cArray = NumCpp.NdArray(shape)
data = np.random.randint(1, 100, [shape.rows, shape.cols])
data[0, :offsetBeg] = 0
data[0, -offsetEnd:] = 0
cArray.setArray(data)
assert np.array_equal(NumCpp.trim_zeros(cArray, 'f').getNumpyArray().flatten(),
np.trim_zeros(data.flatten(), 'f'))
numElements = np.random.randint(50, 100, [1, ]).item()
offsetBeg = np.random.randint(0, 10, [1, ]).item()
offsetEnd = np.random.randint(10, numElements, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
cArray = NumCpp.NdArrayComplexDouble(shape)
real = np.random.randint(1, 100, [shape.rows, shape.cols])
imag = np.random.randint(1, 100, [shape.rows, shape.cols])
data = real + 1j * imag
data[0, :offsetBeg] = complex(0, 0)
data[0, -offsetEnd:] = complex(0, 0)
cArray.setArray(data)
assert np.array_equal(NumCpp.trim_zeros(cArray, 'f').getNumpyArray().flatten(),
np.trim_zeros(data.flatten(), 'f'))
numElements = np.random.randint(50, 100, [1, ]).item()
offsetBeg = np.random.randint(0, 10, [1, ]).item()
offsetEnd = np.random.randint(10, numElements, [1, ]).item()
shape = NumCpp.Shape(1, numElements)
cArray = NumCpp.NdArray(shape)
data = | np.random.randint(1, 100, [shape.rows, shape.cols]) | numpy.random.randint |
import pytest
from itertools import product
import numpy as np
from referenceqvm.unitary_generator import tensor_up
from pyquil.paulis import PauliTerm, PauliSum
from forestopenfermion.pyquil_connector import qubitop_to_pyquilpauli
from representability.fermions.density.symm_sz_density import SymmOrbitalDensity
from representability.fermions.utils import get_molecule_openfermion
from representability.config import *
from openfermion.transforms import jordan_wigner
from openfermion.hamiltonians import MolecularData
from openfermion.ops import FermionOperator
from openfermionpsi4 import run_psi4
# probably want to upgrade this with yield fixture. This will need to be an object
def system():
print('Running System Setup')
basis = 'sto-3g'
multiplicity = 1
charge = 1
geometry = [('He', [0.0, 0.0, 0.0]), ('H', [0, 0, 0.75])]
molecule = MolecularData(geometry, basis, multiplicity, charge)
# Run Psi4.
molecule = run_psi4(molecule,
run_scf=True,
run_mp2=False,
run_cisd=False,
run_ccsd=False,
run_fci=True,
delete_input=False)
molecule, gs_wf, n_density, eigen_val = get_molecule_openfermion(molecule, eigen_index=2)
rdm_generator = SymmOrbitalDensity(n_density, molecule.n_qubits)
transform = jordan_wigner
return n_density, rdm_generator, transform, molecule
def test_construct_opdm():
"""
Test the construction of one-particle density matrix
<psi|a_{p}^{\dagger}a_{q}|psi>
"""
rho, rdm_generator, transform, molecule = system()
dim = molecule.n_qubits
opdm_a = np.zeros((int(dim/2), int(dim/2)), dtype=complex)
opdm_b = np.zeros((int(dim/2), int(dim/2)), dtype=complex)
for p, q in product(range(int(dim/2)), repeat=2):
pauli_proj_op = transform(FermionOperator(((2 * p, 1), (2 * q, 0))))
pauli_proj_op = qubitop_to_pyquilpauli(pauli_proj_op)
if isinstance(pauli_proj_op, PauliTerm):
pauli_proj_op = PauliSum([pauli_proj_op])
lifted_op = tensor_up(pauli_proj_op, molecule.n_qubits)
opdm_element = np.trace(lifted_op.dot(rho))
opdm_a[p, q] = opdm_element
pauli_proj_op = transform(FermionOperator(((2 * p + 1, 1), (2 * q + 1, 0))))
pauli_proj_op = qubitop_to_pyquilpauli(pauli_proj_op)
if isinstance(pauli_proj_op, PauliTerm):
pauli_proj_op = PauliSum([pauli_proj_op])
lifted_op = tensor_up(pauli_proj_op, molecule.n_qubits)
opdm_element = np.trace(lifted_op.dot(rho))
opdm_b[p, q] = opdm_element
opdm_a_test, opdm_b_test = rdm_generator.construct_opdm()
assert np.allclose(opdm_b, opdm_b_test)
assert np.allclose(opdm_a, opdm_a_test)
opdm_b_test = rdm_generator._tensor_construct(2, [-1, 1], [1, 1])
assert np.allclose(opdm_b_test, opdm_b)
opdm_a_test = rdm_generator._tensor_construct(2, [-1, 1], [0, 0])
assert np.allclose(opdm_a_test, opdm_a)
def test_construct_ohdm():
"""
Test the construction of the one-hole density matrix
<psi|a_{p}a_{q}^{\dagger}|psi>
"""
rho, rdm_generator, transform, molecule = system()
dim = molecule.n_qubits
ohdm_a = np.zeros((int(dim/2), int(dim/2)), dtype=complex)
ohdm_b = np.zeros((int(dim/2), int(dim/2)), dtype=complex)
for p, q in product(range(int(dim/2)), repeat=2):
pauli_proj_op = transform(FermionOperator(((2 * p, 0), (2 * q, 1))))
pauli_proj_op = qubitop_to_pyquilpauli(pauli_proj_op)
if isinstance(pauli_proj_op, PauliTerm):
pauli_proj_op = PauliSum([pauli_proj_op])
lifted_op = tensor_up(pauli_proj_op, molecule.n_qubits)
ohdm_element = np.trace(lifted_op.dot(rho))
ohdm_a[p, q] = ohdm_element
pauli_proj_op = transform(FermionOperator(((2 * p + 1, 0), (2 * q + 1, 1))))
pauli_proj_op = qubitop_to_pyquilpauli(pauli_proj_op)
if isinstance(pauli_proj_op, PauliTerm):
pauli_proj_op = PauliSum([pauli_proj_op])
lifted_op = tensor_up(pauli_proj_op, molecule.n_qubits)
ohdm_element = np.trace(lifted_op.dot(rho))
ohdm_b[p, q] = ohdm_element
ohdm_a_test, ohdm_b_test = rdm_generator.construct_ohdm()
assert np.allclose(ohdm_a_test, ohdm_a)
assert np.allclose(ohdm_b_test, ohdm_b)
ohdm_a_test = rdm_generator._tensor_construct(2, [1, -1], [0, 0])
assert np.allclose(ohdm_a_test, ohdm_a)
ohdm_b_test = rdm_generator._tensor_construct(2, [1, -1], [1, 1])
assert | np.allclose(ohdm_b_test, ohdm_b) | numpy.allclose |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 21 18:09:14 2017
@author: tkoller
"""
from experiments.journal_experiment_configs.defaultconfig_exploration import DefaultConfigExploration
from .defaultconfig_episode import DefaultConfigEpisode
import numpy as np
class Config(DefaultConfigEpisode):
"""
Pendulum in the episodic setting, using CEM MPC (rather than Casadi).
Use the DefaultConfigExploration rather than DefaultConfigEpisode, because the former is set up for pendulum and the
latter is set up for cart pole. This means we have to override a load of exploration config values.
"""
verbose = 0
obs_frequency = 1 # Only take an observation every k-th time step (k = obs_frequency)
# environment
env_name = "LunarLander"
solver_type = "safempc_cem"
lqr_wx_cost = np.diag([1., 2., 2., 2.])
lqr_wu_cost = 1 * np.eye(2)
lin_prior = True
prior_model = dict()
prior_m = .1
prior_b = 0.0
prior_model["m"] = prior_m
prior_model["b"] = prior_b
env_options = dict()
init_std = np.array([.05, .05])
env_options["init_std"] = init_std
init_std_initial_data = np.array([1., 1., 1., 1.])
init_m_initial_data = | np.array([0., 0., 0., 0.]) | numpy.array |
# import numpy as np
# import os
# from sklearn import *
# from sklearn.metrics import classification_report, confusion_matrix
# from sklearn.neighbors import KNeighborsClassifier
# from sklearn.cluster import MiniBatchKMeans
# import cv2
# import matplotlib.mlab as mlab
# import matplotlib.pyplot as plt
# import pickle
# from sklearn import preprocessing
# from sklearn.cluster import KMeans
# # defining feature extractor that we want to use
# extractor = cv2.xfeatures2d.SIFT_create()
# def features(image, extractor):
# keypoints, descriptors = extractor.detectAndCompute(image, None)
# #print (keypoints)
# return keypoints, descriptors
# def sift_kmeans():
# labels=['coast','highway','forest','inside_city','mountain','opencountry','street','tallbuilding']
# sift_keypoints=[]
# for label in labels:
# path='train/'+label
# for imgfile in os.listdir(path):
# img = cv2.imread(os.path.join(path,imgfile),1)
# kp,des = features(img,extractor)
# sift_keypoints.append(des)
# sift_keypoints=np.asarray(sift_keypoints)
# sift_keypoints=np.concatenate(sift_keypoints, axis=0)
# #with the descriptors detected, lets clusterize them
# print("Training kmeans")
# for num_cluster in range(100,500,100):
# print("No. of cluster = "+str(num_cluster))
# kmeans = MiniBatchKMeans(n_clusters=num_cluster,random_state=0,init_size=int(num_cluster*1.2)).fit(sift_keypoints)
# print("Done Kmeans")
# pkl_filename = "pickle_model"+str(num_cluster)+".pkl"
# with open(pkl_filename, 'wb') as pkl_file:
# pickle.dump(kmeans,pkl_file)
# #return the learned model
# def histogram_test(model,num_cluster):
# feature_vectors=[]
# class_vectors=[]
# labels=['coast','highway','forest','inside_city','mountain','opencountry','street','tallbuilding']
# for label in labels:
# print("Testing")
# path='test/'+label
# print(label)
# # dir_hist=os.path.join('hist',label)
# # if os.path.isdir(dir_hist)==False:
# # os.makedirs(dir_hist)
# for imgfile in os.listdir(path):
# img = cv2.imread(os.path.join(path,imgfile),1)
# kp,des = features(img,extractor)
# predict_kmeans=model.predict(des)
# # print(predict_kmeans)
# #calculates the histogram
# hist=[0 for m in range(0,num_cluster)]
# for f in predict_kmeans:
# hist[f]+=1
# # hist, bin_edges=np.histogram(predict_kmeans,bins=num_cluster)
# # n, bins, patches = plt.hist(hist, bin_edges, facecolor='blue', alpha=0.5)
# # print(dir_hist+'/'+imgfile[:-3]+'png')
# # plt.savefig(dir_hist+'/'+imgfile[:-3]+'png')
# feature_vectors.append(hist)
# class_vectors.append(label)
# feature_vectors=np.asarray(feature_vectors)
# class_vectors=np.asarray(class_vectors)
# return feature_vectors,class_vectors
# def histogram(model,num_cluster):
# feature_vectors=[]
# class_vectors=[]
# labels=['coast','highway','forest','inside_city','mountain','opencountry','street','tallbuilding']
# for label in labels:
# path='train/'+label
# print(label)
# # dir_hist=os.path.join('hist',label)
# # if os.path.isdir(dir_hist)==False:
# # os.makedirs(dir_hist)
# for imgfile in os.listdir(path):
# img = cv2.imread(os.path.join(path,imgfile),1)
# kp,des = features(img,extractor)
# predict_kmeans=model.predict(des)
# # print(predict_kmeans)
# # print(predict_kmeans)
# #calculates the histogram
# hist=[0 for m in range(0,num_cluster)]
# for f in predict_kmeans:
# hist[f]+=1
# # hist, bin_edges=np.histogram(np.array(predict_kmeans),bins=num_cluster)
# # print(hist)
# # print(bin_edges)
# # n, bins, patches = plt.hist(hist, bin_edges, facecolor='blue', alpha=0.5)
# # print(dir_hist+'/'+imgfile[:-3]+'png')
# # plt.savefig(dir_hist+'/'+imgfile[:-3]+'png')
# feature_vectors.append(hist)
# class_vectors.append(label)
# feature_vectors=np.asarray(feature_vectors)
# class_vectors=np.asarray(class_vectors)
# return feature_vectors,class_vectors
# #print(desc)
# #img2 = cv2.drawKeypoints(img,kp,None)
# #img3 = cv2.drawKeypoints(img1,kp1,None)
# #cv2.imshow('photo',img2)
# #cv2.imshow('photo1',img3)
# #cv2.waitKey(0)
# #cv2.destroyAllWindows()
# if __name__ == "__main__":
# sift_kmeans()
# # for i in range(100,500,100):
# # filename="pickle_model"+str(i)+".pkl"
# # model=pickle.load(open(filename, 'rb'))
# # # print (len(model.cluster_centers_))
# # # for m in model.cluster_centers_:
# # # print(len(m))
# # # # print(len(model))
# # # break
# # train_ft,train_label=histogram(model,i)
# # le = preprocessing.LabelEncoder()
# # train_enc_label=le.fit_transform(list(train_label))
# # # print(enc_label)
# # test_ft,test_label=histogram_test(model,i)
# # le1 = preprocessing.LabelEncoder()
# # test_enc_label=le1.fit_transform(list(test_label))
# # error=[]
# # for j in range(5, 45):
# # knn = KNeighborsClassifier(n_neighbors=j)
# # knn.fit(list(train_ft), train_enc_label)
# # pred_i = knn.predict(list(test_ft))
# # print(confusion_matrix(test_enc_label, pred_i))
# # print(classification_report(test_enc_label, pred_i))
# # error.append(np.mean(pred_i != test_enc_label))
# # plt.figure(figsize=(12, 6))
# # plt.plot(range(5, 45), error, color='red', linestyle='dashed', marker='o',
# # markerfacecolor='blue', markersize=10)
# # plt.title('Error Rate K Value')
# # plt.xlabel('K Value')
# # plt.ylabel('Mean Error')
# # plt.savefig("Error_for_"+str(i)+"words.png")
import numpy as np
import os
from sklearn import *
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import MiniBatchKMeans
import cv2
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import pickle
from sklearn import preprocessing
from sklearn.cluster import KMeans
# defining feature extractor that we want to use
extractor = cv2.xfeatures2d.SIFT_create()
def features(image, extractor):
keypoints, descriptors = extractor.detectAndCompute(image, None)
#print (keypoints)
return keypoints, descriptors
def sift_kmeans():
labels=['coast','highway','forest','inside_city','mountain','opencountry','street','tallbuilding']
sift_keypoints=[]
for label in labels:
path='train/'+label
for imgfile in os.listdir(path):
img = cv2.imread(os.path.join(path,imgfile),1)
kp,des = features(img,extractor)
sift_keypoints.append(des)
sift_keypoints=np.asarray(sift_keypoints)
sift_keypoints=np.concatenate(sift_keypoints, axis=0)
#with the descriptors detected, lets clusterize them
print("Training kmeans")
for num_cluster in range(100,500,100):
print("No. of cluster = "+str(num_cluster))
kmeans = MiniBatchKMeans(n_clusters=num_cluster,random_state=0,init_size=int(num_cluster*1.2)).fit(sift_keypoints)
print("Done Kmeans")
pkl_filename = "pickle_model"+str(num_cluster)+".pkl"
with open(pkl_filename, 'wb') as pkl_file:
pickle.dump(kmeans,pkl_file)
#return the learned model
def histogram_test(model,num_cluster):
feature_vectors=[]
class_vectors=[]
labels=['coast','highway','forest','inside_city','mountain','opencountry','street','tallbuilding']
for label in labels:
print("Testing")
path='test/'+label
print(label)
# dir_hist=os.path.join('hist',label)
# if os.path.isdir(dir_hist)==False:
# os.makedirs(dir_hist)
for imgfile in os.listdir(path):
img = cv2.imread(os.path.join(path,imgfile),1)
kp,des = features(img,extractor)
predict_kmeans=model.predict(des)
# print(predict_kmeans)
#calculates the histogram
hist=[0 for m in range(0,num_cluster)]
for f in predict_kmeans:
hist[f]+=1
# hist, bin_edges=np.histogram(predict_kmeans,bins=num_cluster)
# n, bins, patches = plt.hist(hist, bin_edges, facecolor='blue', alpha=0.5)
# print(dir_hist+'/'+imgfile[:-3]+'png')
# plt.savefig(dir_hist+'/'+imgfile[:-3]+'png')
feature_vectors.append(hist)
class_vectors.append(label)
feature_vectors=np.asarray(feature_vectors)
class_vectors= | np.asarray(class_vectors) | numpy.asarray |
"""Compare free Green's function solvers in imaginary time,
DLR, and Matsubara space.
Copyright 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied. See the License for the specific language governing
permissions and limitations under the License."""
import itertools
import numpy as np
from pydlr import dlr, kernel
def test_free_greens_function_scalar_bosonic(verbose=False):
xi = 1
lamb = 30.
beta = 10.234
H_aa = np.array([[0.3]])
S_aa = np.array([[1.]])
N = 100
w_n = 1.j*np.pi/beta * 2*np.arange(-N+1, N)
# -- DLR
d = dlr(lamb=lamb, python_impl=True, xi=xi)
tau_l = d.get_tau(beta)
G_laa_free = d.free_greens_function_tau(H_aa, beta, S_aa=S_aa)
G_xaa = d.dlr_from_tau(G_laa_free)
G_laa_dlr = d.tau_from_dlr(G_xaa)
G_laa_eval = d.eval_dlr_tau(G_xaa, tau_l, beta)
w_q = d.get_matsubara_frequencies(beta)
G_qaa_dlr = d.matsubara_from_dlr(G_xaa, beta)
G_qaa_exact = (1./(w_q - H_aa[0,0])).reshape((len(d), 1, 1))
G_qaa_free = d.free_greens_function_matsubara(H_aa, beta, S_aa=S_aa)
G_naa_exact = (1./(w_n - H_aa[0,0])).reshape((len(w_n), 1, 1))
G_naa_eval = d.eval_dlr_freq(G_xaa, w_n, beta)
G_xaa_matfree = d.dlr_from_matsubara(G_qaa_free, beta)
G_laa_matfree = d.tau_from_dlr(G_xaa_matfree)
G_xaa_free = d.free_greens_function_dlr(H_aa, beta)
G_laa_dlrfree = d.tau_from_dlr(G_xaa_free)
if verbose:
triqs_ref = False
if triqs_ref:
# -- TRIQS ref calc
from triqs.gf import Gf, MeshImFreq, inverse, iOmega_n, make_gf_from_fourier
mesh = MeshImFreq(beta, 'Boson', N)
g_n = Gf(mesh=mesh, target_shape=[])
g_n << inverse(iOmega_n - H_aa[0,0])
g_t = make_gf_from_fourier(g_n)
#t_t = np.array([ complex(x) for x in g_t.mesh ])
w_n_ref = np.array([ complex(x) for x in mesh ])
np.testing.assert_array_almost_equal(w_n, w_n_ref)
from triqs.plot.mpl_interface import oplot, oploti, oplotr, plt
sidx = np.argsort(tau_l)
for val in [tau_l, G_laa_free, G_laa_dlr, G_laa_eval]:
val = val[sidx]
# -- Viz
import matplotlib.pyplot as plt
plt.figure(figsize=(5, 8))
subp = [2, 1, 1]
plt.subplot(*subp); subp[-1] += 1
if triqs_ref: oplotr(g_t)
plt.plot(tau_l, G_laa_free[:, 0, 0], '+', label='free')
plt.plot(tau_l, G_laa_dlr[:, 0, 0], 'x', label='dlr')
plt.plot(tau_l, G_laa_matfree[:, 0, 0], '.', label='mat free')
plt.plot(tau_l, G_laa_dlrfree[:, 0, 0], '.', label='dlr free')
plt.plot(tau_l, G_laa_eval[:, 0, 0], 'o', alpha=0.5, label='eval')
plt.legend(loc='upper right')
plt.xlabel(r'$\tau$')
subp = [2, 2, 3]
plt.subplot(*subp); subp[-1] += 1
if triqs_ref: oplotr(g_n)
plt.plot(w_q.imag, G_qaa_free[:, 0, 0].real, '+', label='free')
plt.plot(w_q.imag, G_qaa_dlr[:, 0, 0].real, 'x', label='dlr')
plt.plot(w_n.imag, G_naa_eval[:, 0, 0].real, '.', label='eval', alpha=0.2)
plt.legend(loc='upper right')
plt.xlabel(r'$i\omega_n$')
plt.ylabel(r'Re[$G(i\omega_n)$]')
plt.subplot(*subp); subp[-1] += 1
if triqs_ref: oploti(g_n)
plt.plot(w_q.imag, G_qaa_free[:, 0, 0].imag, '+', label=f'free')
plt.plot(w_q.imag, G_qaa_dlr[:, 0, 0].imag, 'x', label='dlr')
plt.plot(w_n.imag, G_naa_eval[:, 0, 0].imag, '.', label='eval', alpha=0.2)
plt.legend(loc='upper right')
plt.xlabel(r'$i\omega_n$')
plt.ylabel(r'Im[$G(i\omega_n)$]')
plt.tight_layout()
plt.show()
np.testing.assert_array_almost_equal(G_laa_free, G_laa_dlr)
| np.testing.assert_array_almost_equal(G_laa_free, G_laa_matfree) | numpy.testing.assert_array_almost_equal |
import lasagne.layers as L
import lasagne.nonlinearities as NL
import lasagne.init
import theano.tensor as TT
import theano
import lasagne
from rllab.core.lasagne_powered import LasagnePowered
from rllab.core.serializable import Serializable
from rllab.core.network import MLP
from rllab.misc import ext
from rllab.misc import logger
import scipy.io as sio
import numpy as np
import pickle
class Mlp_Discriminator(LasagnePowered, Serializable):
def __init__(
self,
iteration,
disc_window=16,
disc_joints_dim=20,
learning_rate=0.005,
train_threshold=0.25, # train when average_disc_loss > train_threshold
a_max=1.0,
a_min=1.0,
batch_size = 64,
iter_per_train = 1,
decent_portion=0.8,
hidden_sizes=(32, 32),
hidden_nonlinearity=NL.tanh,
output_nonlinearity=None,
downsample_factor=1,
disc_network=None,
reg=0.08,
mocap_framerate=120,
mujoco_apirate=20,
exper_spec='none'
):
Serializable.quick_init(self, locals())
self.batch_size=64
self.iter_per_train=iter_per_train
self.disc_window = disc_window
self.downsample_factor = downsample_factor
self.disc_joints_dim = disc_joints_dim
self.disc_window_downsampled = (self.disc_window-1)//self.downsample_factor + 1
self.disc_dim = self.disc_window_downsampled*self.disc_joints_dim
self.end_iter = int(iteration*decent_portion)
self.iter_count = 0
self.learning_rate = learning_rate
self.train_threshold=train_threshold
self.reg =reg
self.rate_factor=mocap_framerate//mujoco_apirate
self.disc_data = {'avg_loss_data':[],
'avg_loss_gen':[]}
self.exper_spec=exper_spec
out_dim = 1
target_var = TT.imatrix('targets')
# create network
if disc_network is None:
disc_network = MLP(
input_shape=(self.disc_dim,),
output_dim=out_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity,
)
self._disc_network = disc_network
disc_score = disc_network.output_layer
self.disc_score = disc_network.output_layer
obs_var = disc_network.input_layer.input_var
disc_var, = L.get_output([disc_score])
self._disc_var = disc_var
exp_reward = TT.nnet.sigmoid(disc_var)
LasagnePowered.__init__(self, [disc_score])
self._f_disc = ext.compile_function(
inputs=[obs_var],
outputs=[exp_reward],
log_name="f_discriminate_forward",
)
params = L.get_all_params(disc_network, trainable=True)
batch_loss = TT.nnet.binary_crossentropy(TT.nnet.sigmoid(disc_var), target_var)
batch_entropy = self.logit_bernoulli_entropy(disc_var)
loss = (batch_loss-self.reg * batch_entropy).mean()
updates = lasagne.updates.adam(loss, params, learning_rate=self.learning_rate)
self._f_disc_train = ext.compile_function(
inputs=[obs_var, target_var],
outputs=[loss],
updates=updates,
log_name="f_discriminate_train"
)
self._f_disc_loss = ext.compile_function(
inputs=[obs_var, target_var],
outputs=[loss],
log_name="f_discriminate_loss"
)
self.data = self.load_data()
self.a = np.linspace(a_min, a_max, self.end_iter)
def get_reward(self, observation):
if len(observation.shape)==1:
observation = observation.reshape((1, observation.shape[0]))
disc_ob = self.get_disc_obs(observation)
# print(self.disc_dim)
# print(disc_ob.shape)
assert(disc_ob.shape[1] == self.disc_dim)
reward = self._f_disc(disc_ob)[0]
return reward[0][0]
def train(self, observations):
'''
observations: length trj_num list of np.array with shape (trj_length, dim)
'''
#print("state len: ", len(observations))
logger.log("fitting discriminator...")
loss={"obs":[], "mocap":[]}
for i in range(self.iter_per_train):
batch_obs = self.get_batch_obs(observations, self.batch_size)
#print(batch_obs[10]/3.14*180)
batch_mocap = self.get_batch_mocap(self.batch_size)
disc_obs = self.get_disc_obs(batch_obs)
disc_mocap = batch_mocap
#print("\n\n\n")
#print(disc_obs[10])
#print("\n\n")
#print(disc_mocap[10])
#print("\n\n\n")
X = np.vstack((disc_obs, disc_mocap))
targets = np.zeros([2*self.batch_size, 1])
targets[self.batch_size :]=1
obs_loss = self._f_disc_loss(disc_obs, np.zeros([self.batch_size, 1]))
mocap_loss = self._f_disc_loss(disc_mocap, np.ones([self.batch_size, 1]))
if np.mean(obs_loss) > self.train_threshold:
self._f_disc_train(X, targets)
logger.log("fitted!")
else:
logger.log("yield training: avg_loss under threshold")
loss["obs"].append(obs_loss)
loss["mocap"].append(mocap_loss)
avg_disc_loss_obs = np.mean(loss["obs"])
avg_disc_loss_mocap = | np.mean(loss["mocap"]) | numpy.mean |
import sys
from scipy import special, stats
from numpy import array as A
def compoundPartitioning(agents):
"""Compute and return sections with compound criteria
agents is a dict with keys "d", "id", "od", "s", "is", "os"
with sectorialized_agents__ with each of these criteria
"""
exc_h=set( agents["d"][-1][2]) & \
set(agents["id"][-1][2]) & \
set(agents["od"][-1][2]) & \
set( agents["s"][-1][2]) & \
set(agents["is"][-1][2]) & \
set(agents["os"][-1][2])
exc_i=set( agents["d"][-1][1]) & \
set(agents["id"][-1][1]) & \
set(agents["od"][-1][1]) & \
set( agents["s"][-1][1]) & \
set(agents["is"][-1][1]) & \
set(agents["os"][-1][1])
exc_p=set( agents["d"][-1][0]) & \
set(agents["id"][-1][0]) & \
set(agents["od"][-1][0]) & \
set( agents["s"][-1][0]) & \
set(agents["is"][-1][0]) & \
set(agents["os"][-1][0])
exc=exc_p,exc_i,exc_h
inc_h=set( agents["d"][-1][2]) | \
set(agents["id"][-1][2]) | \
set(agents["od"][-1][2]) | \
set( agents["s"][-1][2]) | \
set(agents["is"][-1][2]) | \
set(agents["os"][-1][2])
inc_i=set( agents["d"][-1][1]) | \
set(agents["id"][-1][1]) | \
set(agents["od"][-1][1]) | \
set( agents["s"][-1][1]) | \
set(agents["is"][-1][1]) | \
set(agents["os"][-1][1])
inc_p=set( agents["d"][-1][0]) | \
set(agents["id"][-1][0]) | \
set(agents["od"][-1][0]) | \
set( agents["s"][-1][0]) | \
set(agents["is"][-1][0]) | \
set(agents["os"][-1][0])
inc=inc_p, inc_i, inc_h
total=set(agents["d"][-1][0]+agents["d"][-1][1]+agents["d"][-1][2])
excc_h=exc[2]
excc_p=inc[0]
#excc_i=total - (exc[2] & inc[0])
excc_i=total - (exc[2] | inc[0])
excc=excc_p,excc_i,excc_h
incc_h=inc[2]
incc_p=excc[0]
incc_i=total-(incc_h | incc_p)
incc=incc_p,incc_i,incc_h
exce_h=exc[2]
exce_i=inc[1]
exce_p=total-(exce_h | exce_i)
exce=exce_p,exce_i,exce_h
ince_h=inc[2]
ince_i=exc[1]
ince_p=total-(ince_h | ince_i)
ince=ince_p,ince_i,ince_h
return dict(total=total, exc=exc, inc=inc, excc=excc, incc=incc, exce=exce, ince=ince)
class NetworkPartitioning:
network_count=0
def __init__(self,networkMeasures=None, minimum_incidence=1,metric="strength"):
if not networkMeasures:
networkMeasures=g.NetworkMeasures()
self.metric=metric
metric_=self.standardizeName(metric)
prob, max_degree_empirical, max_degree_possible = \
self.basicMeasures( networkMeasures , metric_)
incident_degrees, incident_degrees_, agent_degrees = \
self.makeDegreeLists( networkMeasures, metric_)
empirical_distribution = self.makeEmpiricalDistribution(
incident_degrees, incident_degrees_, networkMeasures.N )
binomial_distribution=self.makeBinomialDistribution(
prob, max_degree_possible, incident_degrees_)
binomial=stats.binom(max_degree_possible,prob)
#sectorialized_degrees= self.sectorializeDegrees(
# empirical_distribution, binomial_distribution, incident_degrees_)
#sectorialized_degrees_= self.newSectorializeDegrees(
# empirical_distribution, binomial_distribution, incident_degrees_)
sectorialized_degrees__= self.newerSectorializeDegrees(
empirical_distribution, binomial, incident_degrees_,
max_degree_empirical,minimum_incidence,networkMeasures.N )
#sectorialized_agents= self.sectorializeAgents(
# sectorialized_degrees, networkMeasures.degrees)
#sectorialized_agents_= self.sectorializeAgents(
# sectorialized_degrees_, networkMeasures.degrees)
sectorialized_agents__= self.sectorializeAgents(
sectorialized_degrees__, agent_degrees)
NetworkPartitioning.network_count+=1 # to keep track of how may partitions have been done
self.makeSelf("incident_degrees_ ",incident_degrees_ ,
"incident_degrees ",incident_degrees ,
#"sectorialized_agents ",sectorialized_agents ,
#"sectorialized_agents_ ",sectorialized_agents_ ,
"sectorialized_agents__ ",sectorialized_agents__ ,
#"sectorialized_degrees ",sectorialized_degrees ,
#"sectorialized_degrees_ ",sectorialized_degrees_ ,
"sectorialized_degrees__ ",sectorialized_degrees__ ,
"binomial_distribution ",binomial_distribution ,
"prob" ,prob,
"max" ,(max_degree_possible, max_degree_empirical),
"empirical_distribution",empirical_distribution,
"binomial",binomial,
"metric_",metric_,
"minimum_incidence",minimum_incidence,
"binomial_distribution" ,binomial_distribution)
def makeSelf(self, *args):
for signifier, signified in zip(args[::2], args[1::2]):
#try:
exec("self.{} = signified".format(signifier))
#thing=signified
#exec("self.{} = thing".format(signifier))
#exec("self.{} = {}".format(signifier, signified))
#exec("self.{} = ".format(signifier), signified)
#except:
# self.binomial=signified
def standardizeName(self,name):
if name in (["s","strength","st"]+["f","força","forca","fo"]):
name_="s"
elif name in (["is","in_strength","ist"]+["fe","força_e","forca_e","fe"]):
name_="is"
elif name in (["os","out_strength","ost"]+["fs","força_s","forca_s","fs"]):
name_="os"
elif name in (["d","degree","dg"]+["g","grau","gr"]):
name_="d"
elif name in (["id","in_degree","idg"]+["ge","grau_e","gre"]):
name_="id"
elif name in (["od","out_degree","odg"]+["gs","grau_s","grs"]):
name_="od"
return name_
def basicMeasures(self,networkMeasures,metric_):
nm=networkMeasures
if metric_ in ("s","is","os"):
edge_weights=[i[2]["weight"] for i in nm.edges]
average_edge_weight=sum(edge_weights)/nm.E
self.average_edge_weight=average_edge_weight
if metric_=="s":
max_degree_empirical=round(max(nm.strengths_) / average_edge_weight)
elif metric_=="is":
max_degree_empirical=round(2*max(nm.in_strengths_) / average_edge_weight)
elif metric_=="os":
max_degree_empirical=round(2*max(nm.out_strengths_) / average_edge_weight)
elif metric_=="d":
max_degree_empirical=max(nm.degrees_)
elif metric_=="id":
max_degree_empirical=2*max(nm.in_degrees_)
elif metric_=="od":
max_degree_empirical=2*max(nm.out_degrees_)
prob=nm.E/(nm.N*(nm.N-1)) # edge probability
max_degree_possible=2*(nm.N-1) # max d given N
return prob, max_degree_empirical, max_degree_possible
def makeDegreeLists(self, networkMeasures,metric_):
if metric_=="s":
agent_degrees={i:round(j/self.average_edge_weight) for i,j in networkMeasures.strengths.items()}
incident_degrees=list(agent_degrees.values())
elif metric_=="is":
agent_degrees={i:round((2*j)/self.average_edge_weight) for i,j in networkMeasures.in_strengths.items()}
incident_degrees=list(agent_degrees.values())
elif metric_=="os":
agent_degrees={i:round((2*j)/self.average_edge_weight) for i,j in networkMeasures.out_strengths.items()}
incident_degrees=list(agent_degrees.values())
elif metric_=="d":
agent_degrees=networkMeasures.degrees
incident_degrees=networkMeasures.degrees_
elif metric_=="id":
agent_degrees={i:(2*j) for i,j in networkMeasures.in_degrees.items()}
incident_degrees=list(agent_degrees.values())
elif metric_=="od":
agent_degrees={i:(2*j) for i,j in networkMeasures.out_degrees.items()}
incident_degrees=list(agent_degrees.values())
incident_degrees_=list(set(incident_degrees))
incident_degrees_.sort()
return incident_degrees, incident_degrees_, agent_degrees
def makeEmpiricalDistribution(self, incident_degrees, incident_degrees_, N):
empirical_distribution=[]
for degree in incident_degrees_:
empirical_distribution.append(incident_degrees.count(degree)/N)
return empirical_distribution
def makeBinomialDistribution(self,prob,max_degree_possible,incident_degrees_):
"""If max_degree_possible == max_degree_empirical, makeBinomial ==1"""
binomial_distribution=[] # occurance probability of degrees
for degree in incident_degrees_:
if len(binomial_distribution) and binomial_distribution[-1]==0.0:
binomial_distribution.append(0.0)
else:
n_occurrences=special.binom(max_degree_possible,degree)
prob_degree=n_occurrences * (prob**degree)*((1-prob)**(max_degree_possible-degree))
binomial_distribution.append(prob_degree)
return binomial_distribution
def sectorializeAgents(self,sectorialized_degrees,agent_degrees):
periphery=[x for x in agent_degrees
if agent_degrees[x] in sectorialized_degrees[0]]
intermediary=[x for x in agent_degrees
if agent_degrees[x] in sectorialized_degrees[1]]
hubs=[x for x in agent_degrees
if agent_degrees[x] in sectorialized_degrees[2]]
return periphery, intermediary, hubs
def newerSectorializeDegrees(self,empirical_distribution,binomial,incident_degrees_,max_degree_empirical,minimum_count,num_agents):
# compute bins [start, end]
prob_min=minimum_count/num_agents
llimit=0
rlimit=0
self.bins=bins=[]
self.empirical_probs=empirical_probs=[]
while (rlimit < len(incident_degrees_)):
if (sum(empirical_distribution[llimit:])>prob_min):
prob_empirical=0
while True:
prob_empirical=sum(
empirical_distribution[llimit:rlimit+1] )
if prob_empirical >= prob_min:
break
else:
rlimit+=1
bins.append((llimit,rlimit))
empirical_probs.append(prob_empirical)
rlimit+=1
llimit=rlimit
else: # last bin
print("last bin less probable than prob_min")
rlimit=len(incident_degrees_)-1
bins.append((llimit,rlimit))
prob_empirical=sum(
empirical_distribution[llimit:rlimit+1] )
empirical_probs.append(prob_empirical)
rlimit+=1
binomial_probs=[]
for i, bin_ in enumerate(bins):
llimit=bin_[0]
rlimit=bin_[1]
ldegree=incident_degrees_[llimit]-1
rdegree=incident_degrees_[rlimit]
binomial_prob=binomial.cdf(rdegree)-binomial.cdf(ldegree)
binomial_probs.append(binomial_prob)
# calcula probabilidades em cada bin
# compara as probabilidades
distribution_compare = list(A(empirical_probs) < | A(binomial_probs) | numpy.array |
from pydantic import BaseModel, Field
from ROAR.control_module.controller import Controller
from ROAR.utilities_module.vehicle_models import VehicleControl, Vehicle
from ROAR.utilities_module.data_structures_models import Transform, Location
from collections import deque
import numpy as np
import math
import logging
from ROAR.agent_module.agent import Agent
from typing import Tuple
import json
from pathlib import Path
class Stanley_controller(Controller):
def __init__(self, agent, steering_boundary: Tuple[float, float],
throttle_boundary: Tuple[float, float], **kwargs):
super().__init__(agent, **kwargs)
self.max_speed = self.agent.agent_settings.max_speed
self.throttle_boundary = throttle_boundary
self.steering_boundary = steering_boundary
self.config = json.load(Path(agent.agent_settings.pid_config_file_path).open(mode='r'))
self.long_pid_controller = LongPIDController(agent=agent,
throttle_boundary=throttle_boundary,
max_speed=self.max_speed,
config=self.config["longitudinal_controller"])
self.lat_stanley_controller = LatStanley_controller(
agent=agent,
config=self.config["latitudinal_controller"],
steering_boundary=steering_boundary
)
self.logger = logging.getLogger(__name__)
def run_in_series(self, next_waypoint: Transform, **kwargs) -> VehicleControl:
throttle = self.long_pid_controller.run_in_series(next_waypoint=next_waypoint,
target_speed=kwargs.get("target_speed", self.max_speed))
steering = self.lat_stanley_controller.run_in_series(next_waypoint=next_waypoint)
return VehicleControl(throttle=throttle, steering=steering)
@staticmethod
def find_k_values(vehicle: Vehicle, config: dict) -> np.array:
current_speed = Vehicle.get_speed(vehicle=vehicle)
k_p, k_d, k_i = 1, 0, 0
for speed_upper_bound, kvalues in config.items():
speed_upper_bound = float(speed_upper_bound)
if current_speed < speed_upper_bound:
k_p, k_d, k_i = kvalues["Kp"], kvalues["Kd"], kvalues["Ki"]
break
return np.clip([k_p, k_d, k_i], a_min=0, a_max=1)
class LongPIDController(Controller):
def __init__(self, agent, config: dict, throttle_boundary: Tuple[float, float], max_speed: float,
dt: float = 0.03, **kwargs):
super().__init__(agent, **kwargs)
self.config = config
self.max_speed = max_speed
self.throttle_boundary = throttle_boundary
self._error_buffer = deque(maxlen=10)
self._dt = dt
def run_in_series(self, next_waypoint: Transform, **kwargs) -> float:
target_speed = min(self.max_speed, kwargs.get("target_speed", self.max_speed))
current_speed = Vehicle.get_speed(self.agent.vehicle)
k_p, k_d, k_i = Stanley_controller.find_k_values(vehicle=self.agent.vehicle, config=self.config)
error = target_speed - current_speed
self._error_buffer.append(error)
if len(self._error_buffer) >= 2:
# print(self._error_buffer[-1], self._error_buffer[-2])
_de = (self._error_buffer[-2] - self._error_buffer[-1]) / self._dt
_ie = sum(self._error_buffer) * self._dt
else:
_de = 0.0
_ie = 0.0
output = float(np.clip((k_p * error) + (k_d * _de) + (k_i * _ie), self.throttle_boundary[0],
self.throttle_boundary[1]))
# self.logger.debug(f"curr_speed: {round(current_speed, 2)} | kp: {round(k_p, 2)} | kd: {k_d} | ki = {k_i} | "
# f"err = {round(error, 2)} | de = {round(_de, 2)} | ie = {round(_ie, 2)}")
# f"self._error_buffer[-1] {self._error_buffer[-1]} | self._error_buffer[-2] = {self._error_buffer[-2]}")
return output
class LatStanley_controller(Controller):
def __init__(self, agent, config: dict, steering_boundary: Tuple[float, float],
dt: float = 0.03, **kwargs):
super().__init__(agent, **kwargs)
self.config = config
self.steering_boundary = steering_boundary
self._error_buffer = deque(maxlen=10)
self._dt = dt
def run_in_series(self, next_waypoint: Transform, **kwargs) -> float:
# calculate a vector that represent where you are going
v_begin = self.agent.vehicle.transform.location
v_end = v_begin + Location(
x=math.cos(math.radians(self.agent.vehicle.transform.rotation.pitch)),
y=v_begin.y,
z=math.sin(math.radians(self.agent.vehicle.transform.rotation.pitch)),
)
v_vec = np.array([v_end.x - v_begin.x,v_end.y - v_begin.y, v_end.z - v_begin.z])
# calculate error projection
w_vec = np.array(
[
next_waypoint.location.x - v_begin.x,
next_waypoint.location.y - v_begin.y,
next_waypoint.location.z - v_begin.z,
]
)
_dot = math.acos(
np.clip(
np.dot(w_vec, v_vec) / ( | np.linalg.norm(w_vec) | numpy.linalg.norm |
import sys
import glob
import copy
import math
import time
from pathlib import Path
import numpy as np
from scipy.spatial.transform import Rotation
import open3d as o3d
import matplotlib.pyplot as plt
import cv2 as cv
from . image_depth import ImageDepth
from cpp.pose_graph import optimize_pose_graph_with_matches, optimize_pose_graph_with_odometry
def process3d(args):
if args.method >= 4:
sys.exit("Unsupported registration method")
image_files = sorted(glob.glob(f"{args.folder}/video*.bin"))#[0:3]
depth_files = sorted(glob.glob(f"{args.folder}/depth*.bin"))#[0:3]
calibration_file =f"{args.folder}/calibration.json"
if len(image_files) == 0:
print("No image files found")
sys.exit(0)
if len(depth_files) == 0:
print("No depth files found")
sys.exit(0)
# generate some colors for the point cloudi
if args.uniform_color:
val = np.arange(len(depth_files)) / len(depth_files)
colors = plt.cm.jet(val)
colors = colors[:, 0:3]
point_clouds = []
# load data
for i, (image_file, depth_file) in enumerate(zip(image_files, depth_files)):
obj = ImageDepth(
calibration_file,
image_file,
depth_file,
args.width,
args.height,
args.min_depth,
args.max_depth,
args.normal_radius)
if args.uniform_color:
obj.pcd.paint_uniform_color(colors[i])
point_clouds.append(obj)
if args.view_only:
img = cv.cvtColor(obj.img, cv.COLOR_RGB2BGR)
depth = np.maximum(0.0, obj.depth_map)
depth = (depth / | np.max(depth) | numpy.max |
import numpy as np
from IPython.core.debugger import Pdb as pdb
import sklearn.neighbors as nn
import util
import caffe
class NNEncode():
# Encode points as a linear combination of unordered points
# using NN search and RBF kernel
def __init__(self,NN,sigma,km_filepath='./data/color_bins/pts_in_hull.npy',cc=-1):
if(util.check_value(cc,-1)):
self.cc = np.load(km_filepath)
else:
self.cc = cc
self.K = self.cc.shape[0]
self.NN = int(NN)
self.sigma = sigma
self.nbrs = nn.NearestNeighbors(n_neighbors=self.NN, algorithm='auto').fit(self.cc)
def encode_points_mtx_nd(self,pts_nd,axis=1,returnSparse=False):
t = util.Timer()
pts_flt = util.flatten_nd_array(pts_nd,axis=axis)
P = pts_flt.shape[0]
(dists,inds) = self.nbrs.kneighbors(pts_flt)
pts_enc_flt = np.zeros((P,self.K))
wts = | np.exp(-dists**2/(2*self.sigma**2)) | numpy.exp |
import numpy as np
from shutil import copyfile
import os
correct = np.array([0, 3, 9, 10, 12, 13, 15, 18, 21, 26, 38, 53, 58, 60, 62])
wrong = | np.array([33, 35, 36, 41, 42, 45, 59, 61, 65, 72, 90, 93, 95, 101, 107]) | numpy.array |
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import pickle
import os
import glob
########## Helper functions
############### Calibration / Undistortion
def compute_calib_from_chessboards(nx, ny, filename_pattern):
"""
This function calculates the objectpoints and imagepoints given calibration images containing a chessboard.
Copied/adapted from: https://github.com/udacity/CarND-Camera-Calibration/blob/master/camera_calibration.ipynb
:param nx: chessboard dimension in x
:param ny: chessboard dimension in y
:param filename_pattern: calibration images to take into account
:return: camera matrix and distortion coefficients
"""
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((ny * nx, 3), np.float32)
objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob(filename_pattern)
print("get_objpoints_imgpoints:filename:" + filename_pattern)
# Step through the list and search for chessboard corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
else:
print("warning: chessboard corners not found in file " + fname)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
return mtx, dist, rvecs, tvecs
def chessboard_calibration():
#correction coefficients
nx = 9
ny = 6
filename_pattern = 'camera_cal/calibration*.jpg'
mtx, dist, rvecs, tvecs = compute_calib_from_chessboards(nx, ny, filename_pattern)
return mtx, dist, rvecs, tvecs
def correct_imgs_in_folder(mtx, dist, rvecs, tvec, folder):
"""
This functions iterates through a folder and undistorts all images into <folder>_undistorted
:param mtx:
:param dist:
:param rvecs:
:param tvec:
:param folder: the folder where the images to be undistorted are in
:return:
"""
# iterate through all files in the folder and apply the pipeline functions
for filename in os.listdir(folder + "/"):
#image = mpimg.imread('camera_cal/' + filename)
image = cv2.imread(folder + "/" + filename)
undistorted = cv2.undistort(image, mtx, dist, None, mtx)
cv2.imwrite(folder + '_undistorted/' + filename, undistorted)
return
############### Creation of Binary Image
def white_yellow_mask(img):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# For HSV, Hue range is [0,179], Saturation range is [0,255] and Value range is [0,255].
white_mask = cv2.inRange(hsv, np.array([0, 0, 150]), np.array([179, 25, 255]))
white_image = cv2.bitwise_and(img, img, mask=white_mask)
#yellow_mask = cv2.inRange(hsv, np.array([90, 120, 0]), np.array([120, 255, 255]))
yellow_mask = cv2.inRange(hsv, np.array([90, 100, 0]), np.array([120, 255, 255]))
yellow_image = cv2.bitwise_and(img, img, mask=yellow_mask)
#combined_mask = cv2.bitwise_or(yellow_mask, white_mask);
final_image = cv2.add(white_image, yellow_image)
return final_image
def grayscale(img):
"""
Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')
"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def cut_area(img):
"""
Makes black everything outside of the area of interest.
:param img: the image that will be cut
:return: the cut image
"""
vertices = np.array([[(100, 700), (650, 400), (1200, 700)]], dtype=np.int32)
masked_image = region_of_interest(img, vertices)
return masked_image
def create_binary_image(initial_image):
"""
Applies various filters on the given image
:param initial_image: the image to apply the filters on
:return: the filtered images
"""
white_yellow_masked = white_yellow_mask(initial_image)
gray_image = grayscale(white_yellow_masked)
cut_image = cut_area(gray_image)
s_thresh_min = 100
s_thresh_max = 255
binary_img = np.zeros_like(cut_image)
binary_img[(cut_image >= s_thresh_min) & (cut_image <= s_thresh_max)] = 255
plt.figure()
plt.imshow(binary_img, cmap='gray')
plt.title('binary_img')
return white_yellow_masked, gray_image, gray_image, gray_image, cut_image, binary_img
################### Perspective Transform
def determine_perspective_transform_matrix():
"""
Determines the perspective transform matrix.
Figures with the original and transformed image are opened in order
to easily change the transformation parameters.
:return: The perspective transformation matrix
"""
img = mpimg.imread("test_images_undistorted/straight_lines1.jpg")
plt.imshow(img)
plt.show()
img_size = (img.shape[1], img.shape[0])
src = np.float32([[203, 719], [580, 460], [1091, 717], [702, 460]])
dst = np.float32([[203, 719], [203, 100], [1091, 717], [1091, 100]])
# Given src and dst points, calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, img_size)
#cv2.imwrite('output_images/straight_lines1_transformed2.png', warped)
plt.imshow(warped)
plt.show()
return M
def perspective_transform(img):
"""
Applies the perspective transformation to the image. If the pickle file
does not exist, the transformation is determined first and saved in the
pickle file.
:param img: The image to be transformed
:return: The warped/transformed image and the transformation Matrix
"""
if not os.path.isfile('M_pickle.p'):
M = determine_perspective_transform_matrix()
pickle.dump(M, open( "M_pickle.p", "wb" ))
else:
M = pickle.load(open("M_pickle.p", "rb"))
img_size = (img.shape[1], img.shape[0])
warped = cv2.warpPerspective(img, M, img_size)
return warped, M
################# Detection of Lane Pixels And Polygon Generation
def find_lane_pixels(binary_warped):
"""
Find the pixels that are part of the image.
:param binary_warped: the binary warped image
:return: The x and y coordinates of the lane pixels and a visualization image
"""
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0] // 2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = 9
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0] // nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = | np.array(nonzero[0]) | numpy.array |
import collections
from math import ceil
import torch
import torch.nn as nn
import numpy as np
import pygsp
import scipy
from tqdm import tqdm
from .utils import get_device
class logistic_regression(nn.Module):
def __init__(self, n_feature, n_way):
super(logistic_regression, self).__init__()
self.linear = nn.Linear(n_feature, n_way)
def forward(self, inputs):
outputs = self.linear(inputs) # softmax computed via CrossEntropyLoss
return outputs
def get_optimizer_myriam(classifier, epoch, n_epoch):
lr = 10
if epoch >= (1/3)*n_epoch:
lr *= 0.1
if epoch >= (2/3)*n_epoch:
lr *= 0.1
optimizer = torch.optim.SGD(classifier.parameters(), lr=lr, momentum=0.9)
return optimizer
def get_optimizer_xuqing(classifier):
lr = 0.01
return torch.optim.Adam(classifier.parameters(), lr=lr, weight_decay=5e-6)
def train_logistic_regression(data, labels, n_way, device):
""" Return a trained logistic regression"""
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
classifier = logistic_regression(data.shape[1], n_way)
classifier.to(device)
criterion = nn.CrossEntropyLoss()
n_steps = 100
batch_size = 5
loss_history = []
steps_per_epoch = int(ceil(data.shape[0] / batch_size))
n_epoch = n_steps // steps_per_epoch
for epoch in tqdm(range(n_epoch), leave=False):
# optimizer = get_optimizer_myriam(classifier, epoch, n_epoch)
optimizer = get_optimizer_xuqing(classifier)
permut = | np.random.permutation(data.shape[0]) | numpy.random.permutation |
"""Testing GeneExpressionDataset with eager backend."""
import os
import unittest
import numpy as np
import pandas as pd
from torch.utils.data import DataLoader
from pytoda.datasets import GeneExpressionDataset
from pytoda.tests.utils import TestFileContent
CONTENT = os.linesep.join(
[
'genes,A,C,B,D',
'sample_3,9.45,4.984,7.016,8.336',
'sample_2,7.188,0.695,10.34,6.047',
'sample_1,9.25,6.133,5.047,5.6',
]
)
MORE_CONTENT = os.linesep.join(
[
'genes,B,C,D,E,F',
'sample_10,4.918,0.0794,1.605,3.463,10.18',
'sample_11,3.043,8.56,1.961,0.6226,5.027',
'sample_12,4.76,1.124,6.06,0.3743,11.05',
'sample_13,0.626,5.164,4.277,4.414,2.7',
]
)
class TestGeneExpressionDatasetEagerBackend(unittest.TestCase):
"""Testing GeneExpressionDataset with eager backend."""
def setUp(self):
self.backend = 'eager'
print(f'backend is {self.backend}')
self.content = CONTENT
self.other_content = MORE_CONTENT
def test___len__(self) -> None:
"""Test __len__."""
with TestFileContent(self.content) as a_test_file:
with TestFileContent(self.other_content) as another_test_file:
gene_expression_dataset = GeneExpressionDataset(
a_test_file.filename,
another_test_file.filename,
backend=self.backend,
index_col=0,
)
self.assertEqual(len(gene_expression_dataset), 7)
def test___getitem__(self) -> None:
"""Test __getitem__."""
with TestFileContent(self.content) as a_test_file:
with TestFileContent(self.other_content) as another_test_file:
df = pd.concat(
[
pd.read_csv(a_test_file.filename, index_col=0),
pd.read_csv(another_test_file.filename, index_col=0),
],
sort=False,
)
gene_expression_dataset = GeneExpressionDataset(
a_test_file.filename,
another_test_file.filename,
backend=self.backend,
index_col=0,
)
gene_list = gene_expression_dataset.gene_list
mean = df.mean()[gene_list].values
std = df.std(ddof=0)[gene_list].values
for i, (key, row) in enumerate(df[gene_list].iterrows()):
np.testing.assert_almost_equal(
gene_expression_dataset[i].numpy(), (row.values - mean) / std, 5
)
| np.testing.assert_almost_equal(gene_expression_dataset.mean, mean, 5) | numpy.testing.assert_almost_equal |
import numpy as np
import torch
from torch import nn
from termcolor import cprint
def split_data(x, y, rand_seed, ratio):
"""
split data into two portions.
:param x: numpy arr. data, the 1st dimension is number of data.
:param y: numpy arr. labels of data x. Could be None for unlabeled data
:param rand_seed: random seed for data shuffling. if None then don't shuffle
:param ratio: the ratio of the first portion.
:return (x1, y1), (x2, y2): numpy arr. split data
"""
if rand_seed is not None:
rng = np.random.RandomState(rand_seed)
shuffle_idx = rng.permutation(len(x))
x, y = x[shuffle_idx], y[shuffle_idx]
# detect and handle exceptions for ratio
if isinstance(ratio, float):
if ratio < 0.0:
cprint(f'[Warning] float ratio = {ratio:.5f} < 0.0, set it to 0.0', color='yellow', attrs=['bold'])
ratio = 0.0
elif ratio > 1.0:
cprint(f'[Warning] float ratio = {ratio:.5f} > 1.0, set it to 1.0', color='yellow', attrs=['bold'])
ratio = 1.0
else:
if ratio < 0:
cprint(f'[Warning] int ratio = {ratio} < 0, set it to 0', color='yellow', attrs=['bold'])
ratio = 0
elif ratio > len(x):
cprint(f'[Warning] int ratio = {ratio} > Nx = {len(x)}, set it to {len(x)}', color='yellow', attrs=['bold'])
ratio = len(x)
if y is not None:
r = ratio if isinstance(ratio, float) else ratio/len(x)
x1, x2 = [], []
y1, y2 = [], []
for ci in np.unique(y):
idx_ci = np.where(y == ci)[0]
i = int(round(len(idx_ci)*r))
x1.append(x[idx_ci[:i]])
y1.append(y[idx_ci[:i]])
x2.append(x[idx_ci[i:]])
y2.append(y[idx_ci[i:]])
x1, y1 = np.concatenate(x1), np.concatenate(y1)
x2, y2 = | np.concatenate(x2) | numpy.concatenate |
import argparse
import datetime
import models.ProbabilityDistGAN
import numpy as np
import os
import shutil
import torch
from utils.sampling import sample, uniform_noise
from utils.plot import Plot
from torch.autograd import Variable
parser = argparse.ArgumentParser(description="Train ProbabilityDistGAN")
parser.add_argument('pth_to_train_data', help='the path to the data used to train the GAN')
parser.add_argument('output_dir', help='the directory to save training results')
args = parser.parse_args()
# ========== OUTPUT DIRECTORIES ==========
shutil.rmtree(args.output_dir, ignore_errors=True)
os.makedirs(args.output_dir)
# ========== Hyperparameters ==========
NUM_TRAINING_STEPS = 200000
BATCH_SIZE = 64
MODEL_DIMENSIONALITY = 64
NOISE_SAMPLE_LENGTH = 64
CRITIC_UPDATES_PER_GENERATOR_UPDATE = 5
LAMBDA = 1
description_f = open(args.output_dir + 'description.txt', 'w')
description_f.write('NUM_TRAINING_STEPS: {0}\n'.format(NUM_TRAINING_STEPS))
description_f.write('DATE: {0}\n\n'.format(datetime.datetime.now().strftime('%b-%d-%I%M%p-%G')))
description_f.write('BATCH_SIZE: {0}\n'.format(BATCH_SIZE))
description_f.write('MODEL_DIMENSIONALITY: {0}\n'.format(MODEL_DIMENSIONALITY))
description_f.write('NOISE_SAMPLE_LENGTH: {0}\n'.format(NOISE_SAMPLE_LENGTH))
description_f.write('CRITIC_UPDATES_PER_GENERATOR_UPDATE: {0}\n'.format(CRITIC_UPDATES_PER_GENERATOR_UPDATE))
description_f.write('LAMBDA: {0}\n'.format(LAMBDA))
description_f.close()
# ========== HOUSEKEEPING ==========
CUDA = torch.cuda.is_available()
| np.random.seed(1) | numpy.random.seed |
import os
import threading
import json
import numpy as np
import pytest
from skimage import io
from skimage._shared._tempfile import temporary_file
from scipy import ndimage as ndi
from gala import features, serve, evaluate as ev
D = os.path.dirname(os.path.abspath(__file__))
os.chdir(os.path.join(D, 'example-data/snemi-mini'))
import os
import zmq
pytest.skip(
'Server is hanging - ports issue?',
allow_module_level=True,
)
def id_serve(port=5555, curr_id=1):
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind('tcp://*:%s' % port)
while True:
count = socket.recv_json()['count']
ids_rep = {'begin': curr_id, 'end': curr_id + count}
curr_id += count
socket.send_json(ids_rep)
@pytest.fixture
def dummy_data():
frag0 = np.arange(1, 17, dtype=int).reshape((4, 4))
gt0 = np.array([[1, 1, 2, 2], [1, 1, 2, 2], [3] * 4, [3] * 4], dtype=int)
frag, gt = (ndi.zoom(image, 4, order=0, mode='reflect')
for image in [frag0, gt0])
fman = features.base.Mock(frag, gt)
return frag, gt, fman
@pytest.fixture
def dummy_data2(dummy_data):
frag, gt, _ = dummy_data
frag[7, 7:9] = 17
frag[7:10, -1] = 18
fman = features.base.Mock(frag, gt)
return frag, gt, fman
def test_server(dummy_data):
frag, gt, fman = dummy_data
host, port = 'tcp://localhost', 5588
solver = serve.Solver(frag, feature_manager=fman,
address='tcp://*:' + str(port))
thread = threading.Thread(target=solver.listen, name='solver')
thread.start()
_, dst = serve.proofread(frag, gt, host=host, port=port, num_operations=2,
stop_when_finished=True, random_state=0)
result = np.array(dst)[frag]
# test: resulting segmentation should be improvement over fragments alone
assert (ev.vi(result, gt, ignore_x=[], ignore_y=[]) <
ev.vi(frag, gt, ignore_x=[], ignore_y=[]))
thread.join()
def test_server_imperfect_fragments(dummy_data2):
frag, gt, fman = dummy_data2
host, port = 'tcp://localhost', 5589
solver = serve.Solver(frag, feature_manager=fman,
address='tcp://*:' + str(port))
thread = threading.Thread(target=solver.listen, name='solver')
thread.start()
_, dst = serve.proofread(frag, gt, host=host, port=port, num_operations=2,
stop_when_finished=True, random_state=0)
result = np.array(dst)[frag]
# test: resulting segmentation should be improvement over fragments alone
assert (ev.vi(result, gt, ignore_x=[], ignore_y=[]) <
ev.vi(frag, gt, ignore_x=[], ignore_y=[]))
thread.join()
def test_server_with_id_service(dummy_data):
frag, gt, fman = dummy_data
id_service_port = 5600
config = {'client_url': 'tcp://*:5590',
'id_service_url': 'tcp://localhost:%i' % id_service_port,
'solver_url': 'tcp://localhost:5590'}
with temporary_file('.json') as config_filename:
with open(config_filename, 'w') as fout:
json.dump(config, fout)
solver = serve.Solver(frag, feature_manager=fman,
config_file=config_filename)
starting_id = 23461
id_thread = threading.Thread(target=id_serve, name='id-service',
daemon=True,
kwargs=dict(port=id_service_port,
curr_id=starting_id))
id_thread.start()
thread = threading.Thread(target=solver.listen, name='solver')
thread.start()
host, port = config['solver_url'].rsplit(':', maxsplit=1)
_, dst = serve.proofread(frag, gt, host=host, port=int(port),
num_operations=2, stop_when_finished=True,
random_state=0)
result = np.array(dst)[frag]
# test: resulting segmentation should be improvement over fragments alone
assert (ev.vi(result, gt, ignore_x=[], ignore_y=[]) <
ev.vi(frag, gt, ignore_x=[], ignore_y=[]))
# test 2: make sure ID service worked: starting ID should be as above
# should be equal but boundary ID messes things up
assert | np.min(result) | numpy.min |
__author__ = "<NAME> <<EMAIL>>"
__date__ = "2021/01/09 19:34:07"
import numpy as np
from functions import *
from sys import exit
import argparse
from scipy.interpolate import BSpline
from scipy import optimize
import matplotlib as mpl
from matplotlib import cm
import sys
sys.path.append("/home/gridsan/dingxq/my_package_on_github/CLCG")
from utils.functions import *
argparser = argparse.ArgumentParser()
argparser.add_argument("--alpha", type = float)
args = argparser.parse_args()
alpha = args.alpha
with open("./output/range.pkl", 'rb') as file_handle:
data = pickle.load(file_handle)
x1_min, x1_max = data['x1_min'], data['x1_max']
x2_min, x2_max = data['x2_min'], data['x2_max']
num_samples = 30
x1 = np.random.rand(30)*(x1_max - x1_min) + x1_min
x2 = np.random.rand(30)*(x2_max - x2_min) + x2_min
x = np.vstack([x1, x2]).T
y = compute_cubic_spline_basis(x)
## samples from p
with open("./output/TREMC/x_record_alpha_{:.3f}.pkl".format(alpha), 'rb') as file_handle:
data = pickle.load(file_handle)
xp = data['x_record'][:, -1, :]
num_samples_p = xp.shape[0]
## samples from q
num_samples_q = num_samples_p
x1_q = np.random.rand(num_samples_q)*(x1_max - x1_min) + x1_min
x2_q = np.random.rand(num_samples_q)*(x2_max - x2_min) + x2_min
xq = | np.vstack([x1_q, x2_q]) | numpy.vstack |
############################################################################
# Generic routines for doing things on graphs #
# #
############################################################################
import numpy as np
from .lattice import Lattice, INVALID
from typing import Tuple
def plaquette_spanning_tree(lattice: Lattice, shortest_edges_only = True):
"""Given a lattice this returns a list of edges that form a spanning tree over all the plaquettes (aka a spanning tree of the dual lattice!)
The optional argument shortest_edge_only automatically sorts the edges to ensure that only the shortest connections are used
(which is kind of a fudgey way of stopping the algorithm from picking edges that connect over the periodic boundaries). If you're hungry for
speed you might want to turn it off. The algorith is basically prim's algorithm - so it should run in linear time.
:param lattice: the lattice you want the tree on
:type lattice: Lattice
:param shortest_edges_only: do you want a minimum spanning tree - distance wise, defaults to True
:type shortest_edges_only: bool, optional
:return: a list of the edges that form the tree
:rtype: np.ndarray
"""
plaquettes_in = np.full(lattice.n_plaquettes, -1)
edges_in = np.full(lattice.n_plaquettes-1, -1)
plaquettes_in[0] = 0
boundary_edges = np.copy(lattice.plaquettes[0].edges)
for n in range(lattice.n_plaquettes-1):
# if we want to keep the edges short - sort the available boundaries
if shortest_edges_only:
def find_plaq_distance(edge):
p1,p2 = lattice.edges.adjacent_plaquettes[edge]
c1 = 10 if p1 == INVALID else lattice.plaquettes[p1].center
c2 = 10 if p2 == INVALID else lattice.plaquettes[p2].center
distance = np.sum((c1 - c2)**2)
return distance
distances = np.vectorize(find_plaq_distance)( boundary_edges )
order = np.argsort(distances)
else:
order = np.arange(len(boundary_edges))
for edge_index in boundary_edges[order]:
edge_plaq = lattice.edges.adjacent_plaquettes[edge_index]
if INVALID in edge_plaq:
continue
lattice.plaquettes[edge_plaq[0]].center
outisde_plaquette_present = [x not in plaquettes_in for x in edge_plaq]
inside_plaquette_present = [x in plaquettes_in for x in edge_plaq]
# if this edge links an inside and outside plaquette
if np.any(outisde_plaquette_present) and np.any(inside_plaquette_present):
# add the new plaquette to the list of inside ones
position = np.where(outisde_plaquette_present)[0][0]
new_plaquette = edge_plaq[position]
plaquettes_in[n+1] = new_plaquette
edges_in[n] = edge_index
# add the new edges to the boundary edges
boundary_edges = np.append(boundary_edges, lattice.plaquettes[new_plaquette].edges)
# remove any doubled edges - these will be internal
a, c = np.unique(boundary_edges, return_counts=True)
boundary_edges = a[c == 1]
break
return edges_in
# FIXME: change function signature to take lattice object instead of adjacency list
def vertex_neighbours(lattice, vertex_i):
"""
Return the neighbouring nodes of a point
Args:
vertex_i: int the index into vertices of the node we want the neighbours of
adjacency: (M, 2) A list of pairs of vertices representing edges
Returns:
vertex_indices: (k), the indices into vertices of the neighbours
edge_indices: (k), the indices into adjacency of the edges that link vertex_i to its neighbours
Note that previous version of this function broke the expectation that edge_indices[i] is the edge that links
vertex_i to vertex_indices[i], make sure to preserve this property.
"""
adjacency = lattice.edges.indices
edge_indices = np.where(np.any(vertex_i == adjacency, axis=-1))[0]
edges = adjacency[edge_indices]
# print(f"{edges = }, {edge_indices = }")
#the next two lines replaces the simpler vertex_indices = edges[edges != vertex_i] because the allow points to neighbour themselves
start_or_end = (edges != vertex_i)[:, 1] #this is true if vertex_i starts the edge and false if it ends it
vertex_indices = np.take_along_axis(edges, start_or_end[:, None].astype(int), axis = 1).flatten() #this gets the index of the other end of each edge
# print(f"{start_or_end = }, {vertex_indices = }")
#vertex_indices = edges[edges != vertex_i]
# print(vertex_indices.shape, edge_indices.shape)
assert(vertex_indices.shape == edge_indices.shape)
return vertex_indices, edge_indices
def edge_neighbours(lattice, edge_i):
"""
Return the neighbouring edges of an edge (the edges connected to the same nodes as this edge)
:param lattice: The lattice
:type lattice: Lattice
:param edge_i: the index of the edge we want the neighbours of
:type edge_i: integer
:return: edge_indices: (k), the indices into adjacency of the edges that link vertex_i to its neighbours
:rtype: np.ndarray (k,)
"""
edge = lattice.edges.indices[edge_i]
v1 = edge[0]
v2 = edge[1]
mask = np.any(v1 == lattice.edges.indices, axis = -1) | np.any(v2 == lattice.edges.indices, axis=-1)
mask[edge_i] = False #not a neighbour of itself
return np.where(mask)[0]
def clockwise_about(vertex_i : int, g : Lattice) -> np.ndarray:
"""
Finds the vertices/edges that border vertex_i, order them clockwise starting from the positive x axis
and returns those indices in order.
Args:
vertex_i (int): int the index into g.vertices.positions of the node we want to use. Generally use 0
g (Lattice): a graph object with keys vertices, adjacency, adjacency_crossing
Returns:
ordered_edge_indices: np.ndarray (n_neighbours_of_vertex_i) ordered indices of the edges.
"""
#get the edges and vertices around vertex 0
vertex_indices, edge_indices = vertex_neighbours(g, vertex_i)
edge_vectors = get_edge_vectors(vertex_i, edge_indices, g)
#order them clockwise from the positive x axis
angles = np.arctan2(edge_vectors[:, 1], edge_vectors[:,0])
angles = np.where(angles > 0, angles, 2*np.pi + angles) #move from [-pi, pi] to [0, 2*pi]
ordering = np.argsort(angles)
ordered_edge_indices = edge_indices[ordering]
ordered_vertex_indices = vertex_indices[ordering]
return ordered_vertex_indices, ordered_edge_indices
def clockwise_edges_about(vertex_i : int, g : Lattice) -> np.ndarray:
"""
Finds the edges that border vertex_i, orders them clockwise starting from the positive x axis
and returns those indices in order. Use this to break the degeneracy of graph coloring.
Args:
vertex_i (int): int the index into g.vertices.positions of the node we want to use. Generally use 0
g (Lattice): a graph object with keys vertices, adjacency, adjacency_crossing
Returns:
ordered_edge_indices: np.ndarray (n_neighbours_of_vertex_i) ordered indices of the edges.
"""
return clockwise_about(vertex_i, g)[1]
def get_edge_vectors(vertex_i : int, edge_indices : np.ndarray, l : Lattice) -> np.ndarray:
"""
Get the vector starting from vertex_i along edge_i, taking into account boundary conditions
Args:
vertex_i (int): the index of the vertex
edge_i (int): the index of the edge
lattice (Lattice): the lattice to use
Returns:
np.ndarray (2,):
"""
#this is a bit nontrivial, g.adjacency_crossing tells us if the edge crossed into another unit cell but
#it is directional, hence we need to check for each edge if vertex_i was the first of second vertex stored
#the next few lines do that so we can add g.edges.indices_crossing with the right sign
edges = l.edges.indices[edge_indices]
start_or_end = (edges != vertex_i)[:, 1] #this is true if vertex_i starts the edge and false if it ends it
other_vertex_indices = np.take_along_axis(edges, start_or_end[:, None].astype(int), axis = 1).squeeze() #this gets the index of the other end of each edge
offset_sign = (2*start_or_end - 1) #now it's +/-1
#get the vectors along the edges
return l.vertices.positions[other_vertex_indices] - l.vertices.positions[vertex_i][None, :] + offset_sign[:, None] * l.edges.crossing[edge_indices]
def adjacent_plaquettes(l : Lattice, p_index : int) -> Tuple[np.ndarray, np.ndarray]:
"""For a given lattice, compute the plaquettes that share an edge with lattice.plaquettes[p_index] and the shared edge.
Returns a list of plaquettes indices and a matching list of edge indices.
:param l: The lattice.
:type l: Lattice
:param p_index: The index of the plaquette to find the neighbours of.
:type p_index: int
:return: (plaque_indices, edge_indices)
:rtype: Tuple[np.ndarray, np.ndarray]
"""
p = l.plaquettes[p_index]
edges = p.edges
neighbouring_plaquettes = l.edges.adjacent_plaquettes[edges]
#remove edges that are only part of this plaquette
valid = ~np.any(neighbouring_plaquettes == INVALID, axis = -1)
edges, neighbouring_plaquettes = edges[valid], neighbouring_plaquettes[valid, :]
# get just the other plaquette of each set
p_index_location = neighbouring_plaquettes[:, 1] == p_index
other_index = 1 - p_index_location.astype(int)[:, None]
neighbouring_plaquettes = np.take_along_axis(neighbouring_plaquettes, other_index, axis = 1).squeeze(axis = -1)
return neighbouring_plaquettes, edges
def rotate(vector, angle):
rm = np.array([
[ | np.cos(angle) | numpy.cos |
import numpy as np
from scipy.interpolate import interp1d
class NormalizedPulseTemplate:
"""
Class for handling the template for the pulsed response of the pixels
of the camera to a single photo-electron in high and low gain.
"""
def __init__(self, amplitude_HG, amplitude_LG, time, amplitude_HG_err=None,
amplitude_LG_err=None):
"""
Save the pulse template and optional error
and create an interpolation.
Parameters
----------
amplitude_HG/LG: array
Amplitude of the signal produced in a pixel by a photo-electron
in high gain (HG) and low gain (LG) for successive time samples
time: array
Times of the samples
amplitude_HG/LG_err: array
Error on the pulse template amplitude
"""
self.time = np.array(time)
self.amplitude_HG = np.array(amplitude_HG)
self.amplitude_LG = np.array(amplitude_LG)
if amplitude_HG_err is not None:
assert np.array(amplitude_HG_err).shape == self.amplitude_HG.shape
self.amplitude_HG_err = np.array(amplitude_HG_err)
else:
self.amplitude_HG_err = np.zeros(self.amplitude_HG.shape)
if amplitude_LG_err is not None:
assert np.array(amplitude_LG_err).shape == self.amplitude_LG.shape
self.amplitude_LG_err = np.array(amplitude_LG_err)
else:
self.amplitude_LG_err = self.amplitude_LG * 0
self._template = self._interpolate()
self._template_err = self._interpolate_err()
def __call__(self, time, gain, amplitude=1, t_0=0, baseline=0):
"""
Use the interpolated template to access the value of the pulse at
time = time in gain regime = gain. Additionally, an alternative
normalisation, origin of time and baseline can be used.
Parameters
----------
time: float array
Time after the origin to estimate the value of the pulse
gain: string array
Identifier of the gain channel used for each pixel
Either "HG" or "LG"
amplitude: float
Normalisation factor to apply to the template
t_0: float
Shift in the origin of time
baseline: float array
Baseline to be subtracted for each pixel
Return
----------
y: array
Value of the template in each pixel at the requested times
"""
y = amplitude * self._template[gain](time - t_0) + baseline
return np.array(y)
def get_error(self, time, gain, amplitude=1, t_0=0):
"""
Use the interpolated error on the template to access the value
of the pulse at time = time in gain regime = gain.
Additionally, an alternative normalisation and origin of time
can be used.
Parameters
----------
time: float array
Time after the origin to estimate the value of the error
gain: string array
Identifier of the gain channel used for each pixel
Either "HG" or "LG"
amplitude: float
Normalisation factor to apply to the error
t_0: float
Shift in the origin of time
Return
----------
y: array
Value of the template in each pixel at the requested times
"""
y = amplitude * self._template_err[gain](time - t_0)
return np.array(y)
def save(self, filename):
"""
Save a loaded template to a text file.
Parameters
----------
filename: string
Location of the output text file
"""
data = np.vstack([self.time, self.amplitude_HG, self.amplitude_HG_err,
self.amplitude_LG, self.amplitude_LG_err])
np.savetxt(filename, data.T)
@classmethod
def load_from_file(cls, filename):
"""
Load a pulse template from a text file.
Allows for only one gain channel and no errors,
two gain channels and no errors or two gain channels with errors.
Parameters
----------
cls: This class
filename: string
Location of the template file
Return
----------
cls(): Instance of NormalizedPulseTemplate receiving the information
from the input file
"""
data = np.loadtxt(filename).T
assert len(data) in [2, 3, 5]
if len(data) == 2: # one shape in file
t, x = data
return cls(amplitude_HG=x, amplitude_LG=x, time=t)
if len(data) == 3: # no error in file
t, hg, lg = data
return cls(amplitude_HG=hg, amplitude_LG=lg, time=t)
elif len(data) == 5: # two gains and errors
t, hg, lg, dhg, dlg = data
return cls(amplitude_HG=hg, amplitude_LG=lg, time=t,
amplitude_HG_err=dhg, amplitude_LG_err=dlg)
@classmethod
def load_from_eventsource(cls, eventsource_camera_readout):
"""
Load a pulse template from an event source camera readout.
Read the sampling rate to create a time variable reaching
9 ns at the HG maximum
Parameters
----------
cls: This class
eventsource_camera_readout: `CameraReadout`
CameraReadout object obtained from the LST event source
Return
----------
cls(): Instance of NormalizedPulseTemplate receiving the information
from the input file
"""
t = eventsource_camera_readout.reference_pulse_sample_time.to_value('ns')
hg, lg = eventsource_camera_readout.reference_pulse_shape
i = np.argmax(hg)
t = t - t[i] + 9.0
return cls(amplitude_HG=hg, amplitude_LG=lg, time=t)
@staticmethod
def _normalize(time, amplitude, error):
"""
Normalize the pulse template in p.e/ns.
"""
normalization = np.sum(amplitude) * (np.max(time) - np.min(time)) / (len(time)-1)
return amplitude / normalization, error / normalization
def _interpolate(self):
"""
Creates a normalised interpolation of the pulse template from a
discrete and non-normalised input. Also normalises the error.
Return
----------
A dictionary containing a 1d cubic interpolation of the normalised
amplitude of the template versus time,
for the high and low gain channels.
"""
self.amplitude_HG, self.amplitude_HG_err = self._normalize(self.time,
self.amplitude_HG,
self.amplitude_HG_err)
self.amplitude_LG, self.amplitude_LG_err = self._normalize(self.time,
self.amplitude_LG,
self.amplitude_LG_err)
return {"HG": interp1d(self.time, self.amplitude_HG, kind='cubic',
bounds_error=False, fill_value=0.,
assume_sorted=True),
"LG": interp1d(self.time, self.amplitude_LG, kind='cubic',
bounds_error=False, fill_value=0.,
assume_sorted=True)}
def _interpolate_err(self):
"""
Creates an interpolation of the error on the pulse template
from a discrete and normalised input.
Return
----------
A dictionary containing a 1d cubic interpolation of the error on the
normalised amplitude of the template versus time,
for the high and low gain channels.
"""
return {"HG": interp1d(self.time, self.amplitude_HG_err, kind='cubic',
bounds_error=False, fill_value=np.inf,
assume_sorted=True),
"LG": interp1d(self.time, self.amplitude_LG_err, kind='cubic',
bounds_error=False, fill_value=np.inf,
assume_sorted=True)}
def compute_time_of_max(self):
"""
Find the average of the times of maximum
of the high and low gain pulse shapes.
Returns
-------
t_max: float
Time of maximum of the pulse shapes (averaged)
"""
t_max = (self.time[ | np.argmax(self.amplitude_HG) | numpy.argmax |
import unittest
import numpy as np
from op_test import OpTest
class TestCosSimOp(OpTest):
def setUp(self):
self.op_type = "cos_sim"
self.inputs = {
'X': np.random.random((6, 5)).astype("float32"),
'Y': np.random.random((6, 5)).astype("float32")
}
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1)
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1)
expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=1) / \
expect_x_norm / expect_y_norm
self.outputs = {
'XNorm': np.expand_dims(expect_x_norm, 1),
'YNorm': np.expand_dims(expect_y_norm, 1),
'Out': np.expand_dims(expect_out, 1)
}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.06)
def test_check_grad_ingore_x(self):
self.check_grad(
['Y'], 'Out', max_relative_error=0.06, no_grad_set=set("X"))
def test_check_grad_ingore_y(self):
self.check_grad(
['X'], 'Out', max_relative_error=0.06, no_grad_set=set('Y'))
class TestCosSimOp2(TestCosSimOp):
def setUp(self):
self.op_type = "cos_sim"
self.inputs = {
'X': np.random.random((6, 5)).astype("float32"),
'Y': np.random.random((1, 5)).astype("float32")
}
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1)
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1)
expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=1) / \
expect_x_norm / expect_y_norm
self.outputs = {
'XNorm': np.expand_dims(expect_x_norm, 1),
'YNorm': np.expand_dims(expect_y_norm, 1),
'Out': np.expand_dims(expect_out, 1)
}
class TestCosSimOp3(TestCosSimOp):
def setUp(self):
self.op_type = "cos_sim"
self.inputs = {
'X': np.random.random((6, 5, 2)).astype("float32"),
'Y': np.random.random((6, 5, 2)).astype("float32")
}
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2))
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2))
expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=(1, 2)) / \
expect_x_norm / expect_y_norm
self.outputs = {
'XNorm': np.expand_dims(expect_x_norm, 1),
'YNorm': np.expand_dims(expect_y_norm, 1),
'Out': np.expand_dims(expect_out, 1)
}
class TestCosSimOp4(TestCosSimOp):
def setUp(self):
self.op_type = "cos_sim"
self.inputs = {
'X': np.random.random((6, 5, 2)).astype("float32"),
'Y': np.random.random((1, 5, 2)).astype("float32")
}
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2))
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2))
expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=(1, 2)) / \
expect_x_norm / expect_y_norm
self.outputs = {
'XNorm': np.expand_dims(expect_x_norm, 1),
'YNorm': np.expand_dims(expect_y_norm, 1),
'Out': | np.expand_dims(expect_out, 1) | numpy.expand_dims |
from gc import collect
from math import dist
from sqlite3 import SQLITE_CREATE_TABLE
from turtle import back, distance, update
from types import CellType
from matplotlib import pyplot as plt
from matplotlib.patches import ConnectionPatch
from numpy.lib.arraysetops import unique
from numpy.lib.type_check import _nan_to_num_dispatcher
from scipy import sparse
import collections
import pandas as pd
import numpy as np
from sklearn.neighbors import NearestNeighbors
from skimage.transform import rescale #from scikit-image import , resize, downscale_local_mean
class PixelMap():
def __init__(self,
pixel_data,
name=None,
upscale=1,
pixel_data_original=None):
self.data = pixel_data
if pixel_data_original is not None:
self.pixeld_data_original = pixel_data_original
else:
self.pixeld_data_original = pixel_data
self.n_channels = 1 if len(
pixel_data.shape) == 2 else pixel_data.shape[-1]
if not isinstance(upscale, collections.Iterable) or len(upscale) == 1:
self.scale = (upscale, upscale)
else:
self.scale = upscale
self.extent = (0, pixel_data.shape[0] / self.scale[0], 0,
pixel_data.shape[1] / self.scale[1])
@property
def shape(self):
return self.extent[1], self.extent[3]
# def rescale(self, factor):
# if not isinstance(factor, collections.Iterable) or len(factor)==1:
# factor = (factor,factor)
# origin = (self.extent[0],self.extent[2])
# self.extent = (origin[0], origin[0] + self.pixel_data.shape[1] * factor[0],
# origin[1], origin[1] + self.pixel_data.shape[0] * factor[1])
def imshow(self, **kwargs):
extent = np.array(self.extent)
# print(extent)
plt.imshow(self.data**0.2, extent=extent[[0, 3, 1, 2]], **kwargs)
def __getitem__(self, indices):
# print(indices)
if not isinstance(indices, collections.Iterable):
index_x = indices
index_y = slice(0, None, None)
else:
index_x = indices[0]
if len(indices) > 1:
index_y = indices[1]
else:
index_y = slice(0, None, None)
if (index_x.start is None): start_x = 0 #self.extent[0]
else: start_x = index_x.start
if (index_x.stop is None): stop_x = self.extent[1]
else: stop_x = index_x.stop
if (index_y.start is None): start_y = 0 #self.extent[2]
else: start_y = index_y.start
if (index_y.stop is None): stop_y = self.extent[3]
else: stop_y = index_y.stop
data = self.data[int(start_y * self.scale[1]):int(stop_y *
self.scale[1]),
int(start_x * self.scale[0]):int(stop_x *
self.scale[0]), ]
return PixelMap(
data,
upscale=self.scale,
)
class SpatialGraph():
def __init__(self, df, n_neighbors=10) -> None:
self.df = df
self.n_neighbors = n_neighbors
self._neighbors = None
self._neighbor_types = None
self._distances = None
@property
def neighbors(self):
if self._neighbors is None:
self._distances, self._neighbors, self._neighbor_types = self.update_knn(
self.n_neighbors)
return self._neighbors
@property
def distances(self):
if self._distances is None:
self._distances, self._neighbors, self._neighbor_types = self.update_knn(
self.n_neighbors)
return self._distances
@property
def neighbor_types(self):
if self._neighbor_types is None:
self._distances, self._neighbors, self._neighbor_types = self.update_knn(
self.n_neighbors)
return self._neighbor_types
def update_knn(self, n_neighbors, re_run=False):
if self._neighbors is not None and n_neighbors < self._neighbors.shape[1]:
return (self._neighbors[:, n_neighbors],
self._distances[:, n_neighbors],
self._neighbor_types[:, n_neighbors])
else:
coordinates = np.stack([self.df.X, self.df.Y]).T
knn = NearestNeighbors(n_neighbors=n_neighbors)
knn.fit(coordinates)
distances, neighbors = knn.kneighbors(coordinates)
neighbor_types = np.array(self.df.gene_ids)[neighbors]
self.n_neighbors = n_neighbors
return distances, neighbors, neighbor_types
class SpatialIndexer():
def __init__(self, df):
self.df = df
@property
def shape(self):
if self.df.background is None:
return np.ceil(self.df.X.max() - self.df.X.min()).astype(
int), np.ceil(self.df.Y.max() - self.df.Y.min()).astype(int)
def create_cropping_mask(self, start, stop, series):
# print(start, stop)
if start is None:
start = 0
if stop is None:
stop = series.max()
return ((series > start) & (series < stop))
def join_cropping_mask(self, xlims, ylims):
return self.create_cropping_mask(
*xlims, self.df.X) & self.create_cropping_mask(*ylims, self.df.Y)
def crop(self, xlims, ylims):
mask = self.join_cropping_mask(xlims, ylims)
pixel_maps = []
if xlims[0] is None:
start_x = 0
else:
start_x = xlims[0]
if ylims[0] is None:
start_y = 0
else:
start_y = ylims[0]
for pm in self.df.pixel_maps:
pixel_maps.append(pm[xlims[0]:xlims[1], ylims[0]:ylims[1]])
return SpatialData(self.df.gene_annotations[mask],
self.df.X[mask] - start_x,
self.df.Y[mask] - start_y, pixel_maps)
def __getitem__(self, indices):
if not isinstance(indices, collections.Iterable):
indices = (indices, )
if len(indices) == 1:
ylims = (0, None)
else:
ylims = (indices[1].start, indices[1].stop)
xlims = (indices[0].start, indices[0].stop)
return self.crop(xlims, ylims)
class SpatialData(pd.DataFrame):
def __init__(self,
gene_annotations,
x_coordinates,
y_coordinates,
pixel_maps=[]):
super(SpatialData, self).__init__({
'gene_annotations': gene_annotations,
'X': x_coordinates,
'Y': y_coordinates
})
self._metadata = ['uns', 'stats', 'pixel_maps']
self.uns = {'background': None}
self.pixel_maps = []
self.graph = SpatialGraph(self)
for pm in pixel_maps:
if not type(pm) == PixelMap:
self.pixel_maps.append(PixelMap(pm))
else:
self.pixel_maps.append(pm)
self.update_stats()
# @property
# def count_idcs(self):
# return self.stats.count_indices
@property
def gene_ids(self):
return self.gene_id
@property
def counts(self):
return self.stats['counts']
@property
def counts_sorted(self):
return self.stats.counts[self.stats.count_indices]
@property
def gene_classes_sorted(self):
return self.gene_classes[self.stats.count_indices]
@property
def gene_classes(self):
return self.stats.index
# @property
# def count_ranks(self):
# return self.stats['count_ranks']
@property
def spatial(self):
return SpatialIndexer(self)
@property
def background(self):
if len(self.pixel_maps):
return self.pixel_maps[0]
#.__getitem__(self, arg):
def __getitem__(self, *arg):
if (len(arg) == 1):
if type(arg[0]) == str:
return super().__getitem__(arg[0])
if (type(arg[0]) == slice):
new_data = super().iloc.__getitem__(arg)
elif (type(arg[0]) == int):
new_data = super().iloc.__getitem__(slice(arg[0], arg[0] + 1))
elif isinstance(arg[0], pd.Series):
# print(arg[0].values)
new_data = super().iloc.__getitem__(arg[0].values)
elif isinstance(arg[0], np.ndarray):
new_data = super().iloc.__getitem__(arg[0])
elif isinstance(arg[0], collections.Sequence):
if all([a in self.keys() for a in arg[0]]):
return super().__getitem__(*arg)
new_data = super().iloc.__getitem__(arg[0])
new_frame = SpatialData(new_data.gene_annotations, new_data.X,
new_data.Y, self.pixel_maps)
new_frame.update_stats()
return (new_frame)
print('Converting to generic Pandas.')
return super().__getitem__(*arg)
def update_stats(self):
gene_classes, indicers, inverse, counts = np.unique(
super().__getitem__(['gene_annotations']),
return_index=True,
return_inverse=True,
return_counts=True,
)
count_idcs = np.argsort(counts)
count_ranks = np.argsort(count_idcs)
self.stats = pd.DataFrame(
{
# 'gene_classes': gene_classes,
'counts': counts,
'count_ranks': count_ranks,
'count_indices': count_idcs,
'gene_ids': np.arange(len(gene_classes))
},
index=gene_classes)
self['gene_id'] = inverse
self.graph = SpatialGraph(self)
# if 'knn_distances' in self.uns:
# del self.uns['knn_distances']
# if 'knn_indices' in self.uns:
# del self.uns['knn_indices']
def get_count(self, gene):
if gene in self.gene_classes.values:
return int(self.stats.counts[self.gene_classes == gene])
def get_id(self, gene_name):
return int(self.stats.gene_ids[self.gene_classes == gene_name])
def get_count_rank(self, gene):
if gene in self.gene_classes.values:
return int(self.stats.count_ranks[self.gene_classes == gene])
# def knn(self, n_neighbors=4, re_run=False):
# if not all(
# (k in self.uns for k in ['knn_distances', 'knn_indices'])) or (
# n_neighbors > self.uns['knn_distances'].shape[1]) or re_run:
# knn = NearestNeighbors(n_neighbors=n_neighbors)
# coordinates = np.stack([self.X, self.Y]).T
# knn.fit(coordinates)
# distances, indices = knn.kneighbors(coordinates)
# self.uns['knn_distances'] = distances
# self.uns['knn_indices'] = indices
# self.uns['knn_types'] = np.array(self.gene_ids)[indices]
# return self.uns['knn_distances'], self.uns['knn_indices'], self.uns[
# 'knn_types']
def knn_entropy(self, n_neighbors=4):
# _, indices, _ = self.knn(n_neighbors=n_neighbors)
self.graph.update_knn(n_neighbors=n_neighbors)
indices = self.graph.neighbors#(n_neighbors=n_neighbors)
knn_cells = np.zeros_like(indices)
for i in range(indices.shape[1]):
knn_cells[:, i] = self['gene_id'].iloc[indices[:, i]]
H = np.zeros((len(self.gene_classes), ))
for i, gene in enumerate(self.gene_classes):
x = knn_cells[self['gene_id'] == i]
_, n_x = np.unique(x[:, 1:], return_counts=True)
p_x = n_x / n_x.sum()
h_x = -(p_x * np.log2(p_x)).sum()
H[i] = h_x
return (H)
def plot_entropy(self, n_neighbors=4):
H = self.knn_entropy(n_neighbors)
idcs = np.argsort(H)
plt.figure(figsize=(25, 25))
fig, axd = plt.subplot_mosaic([
['scatter_1', 'scatter_2', 'scatter_3', 'scatter_4'],
['bar', 'bar', 'bar', 'bar'],
['scatter_5', 'scatter_6', 'scatter_7', 'scatter_8'],
],
figsize=(11, 7),
constrained_layout=True)
dem_plots = np.array([
0,
2,
len(H) - 3,
len(H) - 1,
1,
int(len(H) / 2),
int(len(H) / 2) + 1,
len(H) - 2,
])
colors = ('royalblue', 'goldenrod', 'red', 'purple', 'lime',
'turquoise', 'green', 'yellow')
axd['bar'].bar(
range(len(H)),
H[idcs],
color=[
colors[np.where(
dem_plots == i)[0][0]] if i in dem_plots else 'grey'
for i in range(len(self.stats.counts))
])
axd['bar'].set_xticks(range(len(H)),
[self.gene_classes[h] for h in idcs],
rotation=90)
axd['bar'].set_ylabel('knn entropy, k=' + str(n_neighbors))
for i in range(8):
idx = idcs[dem_plots[i]]
gene = self.gene_classes[idx]
plot_name = 'scatter_' + str(i + 1)
axd[plot_name].set_title(gene)
axd[plot_name].scatter(self.X, self.Y, color=(0.5, 0.5, 0.5, 0.1))
axd[plot_name].scatter(self.X[self['gene_id'] == idx],
self.Y[self['gene_id'] == idx],
color=colors[i],
marker='.')
axd[plot_name].set_xticks([], [])
# if i>0:
axd[plot_name].set_yticks([], [])
if i < 4:
y_ = (H[idcs])[i]
_y = 0
else:
y_ = 0
_y = 1
con = ConnectionPatch(xyA=(dem_plots[i], y_),
coordsA=axd['bar'].transData,
xyB=(np.mean(axd[plot_name].get_xlim()),
axd[plot_name].get_ylim()[_y]),
coordsB=axd[plot_name].transData,
color='white',
linewidth=1,
linestyle='dotted')
fig.add_artist(con)
def scatter(self,
c=None,
color=None,
gene=None,
axd=None,
plot_bg=True,
**kwargs):
if axd is None:
axd = plt.subplot(111)
if self.background and plot_bg:
self.background.imshow(cmap='Greys', alpha=0.3)
if c is None and color is None:
c = self.gene_ids
# axd.set_title(gene)
axd.scatter(self.X,
self.Y,
c=c,
color=color,
cmap='nipy_spectral',
**kwargs)
def plot_bars(self, axis=None, **kwargs):
if axis is None:
axis = plt.subplot(111)
axis.bar(np.arange(len(self.stats.counts)), self.counts_sorted,
**kwargs)
axis.set_yscale('log')
axis.set_xticks(
np.arange(len(self.gene_classes_sorted)),
self.gene_classes_sorted,
# fontsize=12,
rotation=90)
axis.set_ylabel('molecule count')
def plot_overview(self):
plt.style.use('dark_background')
colors = ('royalblue', 'goldenrod', 'red', 'lime')
scatter_idcs = np.round(np.linspace(0,
len(self.stats.counts) - 1,
4)).astype(int)
fig, axd = plt.subplot_mosaic(
[['scatter_1', 'scatter_2', 'scatter_3', 'scatter_4'],
['bar', 'bar', 'bar', 'bar']],
figsize=(11, 7),
constrained_layout=True)
self.plot_bars(
axd['bar'],
color=[
colors[np.where(
scatter_idcs == i)[0][0]] if i in scatter_idcs else 'grey'
for i in range(len(self.stats.counts))
])
for i in range(4):
idx = self.stats.count_indices[scatter_idcs[i]]
gene = self.gene_classes[idx]
plot_name = 'scatter_' + str(i + 1)
axd[plot_name].set_title(gene)
axd[plot_name].scatter(self.X, self.Y, color=(0.5, 0.5, 0.5, 0.1))
axd[plot_name].scatter(self.X[self['gene_id'] == idx],
self.Y[self['gene_id'] == idx],
color=colors[i],
marker='.')
axd[plot_name].set_xticks([], [])
# if i>0:
axd[plot_name].set_yticks([], [])
con = ConnectionPatch(xyA=(scatter_idcs[i],
self.stats.counts[idx]),
coordsA=axd['bar'].transData,
xyB=(np.mean(axd[plot_name].get_xlim()),
axd[plot_name].get_ylim()[0]),
coordsB=axd[plot_name].transData,
color='white',
linewidth=1,
linestyle='dotted')
fig.add_artist(con)
plt.suptitle('Selected Expression Densities:', fontsize=18)
def plot_radial_distribution(self, n_neighbors=30, **kwargs):
# distances, _, _ = self.knn(n_neighbors=n_neighbors)
self.graph.update_knn(n_neighbors=n_neighbors)
distances = self.graph.distances
plt.hist(distances[:, 1:n_neighbors].flatten(), **kwargs)
def spatial_decomposition(
self,
mRNAs_center=None,
mRNAs_neighbor=None,
n_neighbors=10,
):
if mRNAs_center is None:
mRNAs_center = self.gene_classes
if mRNAs_neighbor is None:
mRNAs_neighbor = self.gene_classes
# _, neighbors, _ = self.knn(n_neighbors=n_neighbors)
self.graph.update_knn(n_neighbors=n_neighbors)
neighbors = self.graph.neighbors
neighbor_classes = | np.array(self.gene_ids) | numpy.array |
import os
import sys
import platform
import os.path
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtCore import (QCoreApplication, QPropertyAnimation, QDate, QDateTime, QMetaObject, QObject, QPoint, QRect, QSize, QTime, QUrl, Qt, QEvent)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont, QFontDatabase, QIcon, QKeySequence, QLinearGradient, QPalette, QPainter, QPixmap, QRadialGradient)
from PySide2.QtWidgets import *
from PyQt5 import QtCore, QtGui, QtWidgets
import sympy as sp
from scipy import integrate
from scipy.optimize import fsolve
import math
from math import sin, cos, tan, exp, log, log10
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(900, 760)
MainWindow.setMaximumSize(QtCore.QSize(900, 760))
self.Main = QtWidgets.QWidget(MainWindow)
self.Main.setMinimumSize(QtCore.QSize(900, 760))
font = QtGui.QFont()
font.setFamily("Microsoft JhengHei")
self.Main.setFont(font)
self.Main.setObjectName("Main")
self.verticalLayout = QtWidgets.QVBoxLayout(self.Main)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.Top_bar = QtWidgets.QFrame(self.Main)
self.Top_bar.setMaximumSize(QtCore.QSize(16777215, 64))
self.Top_bar.setFrameShape(QtWidgets.QFrame.NoFrame)
self.Top_bar.setFrameShadow(QtWidgets.QFrame.Raised)
self.Top_bar.setObjectName("Top_bar")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.Top_bar)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.Top_Left_menu = QtWidgets.QFrame(self.Top_bar)
self.Top_Left_menu.setMinimumSize(QtCore.QSize(0, 0))
self.Top_Left_menu.setMaximumSize(QtCore.QSize(128, 16777215))
self.Top_Left_menu.setStyleSheet("background-color: rgb(40,40,40);\n"
"border:0px solid;")
self.Top_Left_menu.setFrameShape(QtWidgets.QFrame.NoFrame)
self.Top_Left_menu.setFrameShadow(QtWidgets.QFrame.Raised)
self.Top_Left_menu.setObjectName("Top_Left_menu")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.Top_Left_menu)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.Menu_button = QtWidgets.QPushButton(self.Top_Left_menu)
self.Menu_button.setMinimumSize(QtCore.QSize(128, 64))
self.Menu_button.setStyleSheet("\n"
"\n"
"QPushButton {\n"
" border-style: outset;\n"
"border: 0px solid; \n"
"color:white;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
" );\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
"\n"
" background: qradialgradient(\n"
" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #ddd);\n"
"}\n"
"")
self.Menu_button.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icons/list-white-g.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Menu_button.setIcon(icon)
self.Menu_button.setIconSize(QtCore.QSize(26, 26))
self.Menu_button.setObjectName("Menu_button")
self.horizontalLayout_2.addWidget(self.Menu_button)
self.horizontalLayout.addWidget(self.Top_Left_menu)
self.Top_Right_menu = QtWidgets.QFrame(self.Top_bar)
font.setFamily("Microsoft JhengHei")
self.Top_Right_menu.setFont(font)
self.Top_Right_menu.setStyleSheet("background-color: rgb(40,40,40);")
self.Top_Right_menu.setFrameShape(QtWidgets.QFrame.NoFrame)
self.Top_Right_menu.setFrameShadow(QtWidgets.QFrame.Raised)
self.Top_Right_menu.setObjectName("Top_Right_menu")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.Top_Right_menu)
self.verticalLayout_2.setContentsMargins(32, 12, 32, 12)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.Top_right_title = QtWidgets.QFrame(self.Top_Right_menu)
self.Top_right_title.setMaximumSize(QtCore.QSize(700, 16777215))
self.Top_right_title.setStyleSheet("")
self.Top_right_title.setFrameShape(QtWidgets.QFrame.NoFrame)
self.Top_right_title.setFrameShadow(QtWidgets.QFrame.Raised)
self.Top_right_title.setObjectName("Top_right_title")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.Top_right_title)
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_3.setSpacing(0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.stackedWidget_2 = QtWidgets.QStackedWidget(self.Top_right_title)
self.stackedWidget_2.setObjectName("stackedWidget_2")
self.Home_title = QtWidgets.QWidget()
self.Home_title.setObjectName("Home_title")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.Home_title)
self.verticalLayout_7.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_7.setSpacing(0)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.Home_title_label = QtWidgets.QLabel(self.Home_title)
self.Home_title_label.setFont(font)
self.Home_title_label.setStyleSheet("color: rgb(255, 255, 255);\n"
"font-size: 26px;")
self.Home_title_label.setAlignment(QtCore.Qt.AlignCenter)
self.Home_title_label.setObjectName("Home_title_label")
self.verticalLayout_7.addWidget(self.Home_title_label)
self.stackedWidget_2.addWidget(self.Home_title)
self.Derivative_title = QtWidgets.QWidget()
self.Derivative_title.setObjectName("Derivative_title")
self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.Derivative_title)
self.verticalLayout_8.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_8.setSpacing(0)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.Derivative_label = QtWidgets.QLabel(self.Derivative_title)
self.Derivative_label.setFont(font)
self.Derivative_label.setStyleSheet("color: rgb(255, 255, 255);\n"
"font-size: 26px;")
self.Derivative_label.setAlignment(QtCore.Qt.AlignCenter)
self.Derivative_label.setObjectName("Derivative_label")
self.verticalLayout_8.addWidget(self.Derivative_label)
self.stackedWidget_2.addWidget(self.Derivative_title)
self.Integral = QtWidgets.QWidget()
self.Integral.setObjectName("Integral")
self.verticalLayout_10 = QtWidgets.QVBoxLayout(self.Integral)
self.verticalLayout_10.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_10.setSpacing(0)
self.verticalLayout_10.setObjectName("verticalLayout_10")
self.Integral_label = QtWidgets.QLabel(self.Integral)
self.Integral_label.setFont(font)
self.Integral_label.setStyleSheet("color: rgb(255, 255, 255);\n"
"font-size: 26px;")
self.Integral_label.setMidLineWidth(0)
self.Integral_label.setAlignment(QtCore.Qt.AlignCenter)
self.Integral_label.setObjectName("Integral_label")
self.verticalLayout_10.addWidget(self.Integral_label)
self.stackedWidget_2.addWidget(self.Integral)
self.d_Integral_title = QtWidgets.QWidget()
self.d_Integral_title.setObjectName("d_Integral_title")
self.verticalLayout_11 = QtWidgets.QVBoxLayout(self.d_Integral_title)
self.verticalLayout_11.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_11.setSpacing(0)
self.verticalLayout_11.setObjectName("verticalLayout_11")
self.d_integral_title_label = QtWidgets.QLabel(self.d_Integral_title)
self.d_integral_title_label.setFont(font)
self.d_integral_title_label.setStyleSheet("color: rgb(255, 255, 255);\n"
"font-size: 26px;")
self.d_integral_title_label.setAlignment(QtCore.Qt.AlignCenter)
self.d_integral_title_label.setObjectName("d_integral_title_label")
self.verticalLayout_11.addWidget(self.d_integral_title_label)
self.stackedWidget_2.addWidget(self.d_Integral_title)
self.c_Integral_title = QtWidgets.QWidget()
self.c_Integral_title.setObjectName("c_Integral_title")
self.verticalLayout_12 = QtWidgets.QVBoxLayout(self.c_Integral_title)
self.verticalLayout_12.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_12.setSpacing(0)
self.verticalLayout_12.setObjectName("verticalLayout_12")
self.c_integral_title_label = QtWidgets.QLabel(self.c_Integral_title)
self.c_integral_title_label.setFont(font)
self.c_integral_title_label.setStyleSheet("color: rgb(255, 255, 255);\n"
"font-size: 26px;")
self.c_integral_title_label.setAlignment(QtCore.Qt.AlignCenter)
self.c_integral_title_label.setObjectName("c_integral_title_label")
self.verticalLayout_12.addWidget(self.c_integral_title_label)
self.stackedWidget_2.addWidget(self.c_Integral_title)
self.Plot_title = QtWidgets.QWidget()
self.Plot_title.setObjectName("Plot_title")
self.verticalLayout_9 = QtWidgets.QVBoxLayout(self.Plot_title)
self.verticalLayout_9.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_9.setSpacing(0)
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.Plot_title_label = QtWidgets.QLabel(self.Plot_title)
self.Plot_title_label.setFont(font)
self.Plot_title_label.setStyleSheet("color: rgb(255, 255, 255);\n"
"font-size: 26px;")
self.Plot_title_label.setAlignment(QtCore.Qt.AlignCenter)
self.Plot_title_label.setObjectName("Plot_title_label")
self.verticalLayout_9.addWidget(self.Plot_title_label)
self.stackedWidget_2.addWidget(self.Plot_title)
self.delta_title = QtWidgets.QWidget()
self.delta_title.setObjectName("delta_title")
self.verticalLayout_13 = QtWidgets.QVBoxLayout(self.delta_title)
self.verticalLayout_13.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_13.setSpacing(0)
self.verticalLayout_13.setObjectName("verticalLayout_13")
self.delta_title_label = QtWidgets.QLabel(self.delta_title)
self.delta_title_label.setFont(font)
self.delta_title_label.setStyleSheet("color: rgb(255, 255, 255);\n"
"font-size: 26px;")
self.delta_title_label.setAlignment(QtCore.Qt.AlignCenter)
self.delta_title_label.setObjectName("delta_title_label")
self.verticalLayout_13.addWidget(self.delta_title_label)
self.stackedWidget_2.addWidget(self.delta_title)
self.horizontalLayout_3.addWidget(self.stackedWidget_2)
self.verticalLayout_2.addWidget(self.Top_right_title)
self.horizontalLayout.addWidget(self.Top_Right_menu)
self.verticalLayout.addWidget(self.Top_bar)
self.Bottom_bar = QtWidgets.QFrame(self.Main)
self.Bottom_bar.setStyleSheet("border:0px solid;")
self.Bottom_bar.setFrameShape(QtWidgets.QFrame.NoFrame)
self.Bottom_bar.setFrameShadow(QtWidgets.QFrame.Raised)
self.Bottom_bar.setObjectName("Bottom_bar")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.Bottom_bar)
self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_4.setSpacing(0)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.Bottom_left_icons_out = QtWidgets.QFrame(self.Bottom_bar)
self.Bottom_left_icons_out.setMinimumSize(QtCore.QSize(128, 0))
self.Bottom_left_icons_out.setMaximumSize(QtCore.QSize(128, 16777215))
self.Bottom_left_icons_out.setStyleSheet("background-color: rgb(60,60,60);")
self.Bottom_left_icons_out.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.Bottom_left_icons_out.setFrameShadow(QtWidgets.QFrame.Raised)
self.Bottom_left_icons_out.setObjectName("Bottom_left_icons_out")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.Bottom_left_icons_out)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.Bottom_left_icons_in = QtWidgets.QFrame(self.Bottom_left_icons_out)
self.Bottom_left_icons_in.setMinimumSize(QtCore.QSize(72, 0))
self.Bottom_left_icons_in.setMaximumSize(QtCore.QSize(72, 16777215))
self.Bottom_left_icons_in.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.Bottom_left_icons_in.setFrameShadow(QtWidgets.QFrame.Raised)
self.Bottom_left_icons_in.setObjectName("Bottom_left_icons_in")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.Bottom_left_icons_in)
self.verticalLayout_4.setContentsMargins(0, 24, 0, 24)
self.verticalLayout_4.setSpacing(24)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.Home_icon = QtWidgets.QFrame(self.Bottom_left_icons_in)
self.Home_icon.setMinimumSize(QtCore.QSize(72, 72))
self.Home_icon.setStyleSheet("QPushButton {\n"
" border-radius: 32px;\n"
" border-style: outset;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
" );\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
" border-style: inset;\n"
" background: qradialgradient(\n"
" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #ddd);\n"
" }\n"
"")
self.Home_icon.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.Home_icon.setFrameShadow(QtWidgets.QFrame.Raised)
self.Home_icon.setObjectName("Home_icon")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.Home_icon)
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_5.setSpacing(0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.Home_btn = QtWidgets.QPushButton(self.Home_icon)
self.Home_btn.setMinimumSize(QtCore.QSize(72, 72))
self.Home_btn.setStyleSheet("QPushButton {\n"
" border-radius: 32px;\n"
" border-style: outset;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
" );\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
" border-style: inset;\n"
" background: qradialgradient(\n"
" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #ddd);\n"
" }\n"
"")
self.Home_btn.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("icons/Home-white-g.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Home_btn.setIcon(icon1)
self.Home_btn.setIconSize(QtCore.QSize(64, 64))
self.Home_btn.setObjectName("Home_btn")
self.verticalLayout_5.addWidget(self.Home_btn)
self.verticalLayout_4.addWidget(self.Home_icon)
self.Plot_icon = QtWidgets.QFrame(self.Bottom_left_icons_in)
self.Plot_icon.setMinimumSize(QtCore.QSize(72, 72))
self.Plot_icon.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.Plot_icon.setFrameShadow(QtWidgets.QFrame.Raised)
self.Plot_icon.setObjectName("Plot_icon")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.Plot_icon)
self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_5.setSpacing(0)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.Plot_btn = QtWidgets.QPushButton(self.Plot_icon)
self.Plot_btn.setMinimumSize(QtCore.QSize(72, 72))
self.Plot_btn.setStyleSheet("QPushButton {\n"
" border-radius: 32px;\n"
" border-style: outset;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
" );\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
" border-style: inset;\n"
" background: qradialgradient(\n"
" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #ddd);\n"
" }\n"
"")
self.Plot_btn.setText("")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("icons/plot-white-g.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Plot_btn.setIcon(icon2)
self.Plot_btn.setIconSize(QtCore.QSize(64, 64))
self.Plot_btn.setObjectName("Plot_btn")
self.horizontalLayout_5.addWidget(self.Plot_btn)
self.verticalLayout_4.addWidget(self.Plot_icon)
self.Derviate_icon = QtWidgets.QFrame(self.Bottom_left_icons_in)
self.Derviate_icon.setMinimumSize(QtCore.QSize(72, 72))
self.Derviate_icon.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.Derviate_icon.setFrameShadow(QtWidgets.QFrame.Raised)
self.Derviate_icon.setObjectName("Derviate_icon")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.Derviate_icon)
self.horizontalLayout_6.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_6.setSpacing(0)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.Derviate_btn = QtWidgets.QPushButton(self.Derviate_icon)
self.Derviate_btn.setMinimumSize(QtCore.QSize(72, 72))
self.Derviate_btn.setStyleSheet("QPushButton {\n"
" border-radius: 32px;\n"
" border-style: outset;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
" );\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
" border-style: inset;\n"
" background: qradialgradient(\n"
" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #ddd);\n"
" }\n"
"")
self.Derviate_btn.setText("")
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("icons/poch-white-g.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Derviate_btn.setIcon(icon3)
self.Derviate_btn.setIconSize(QtCore.QSize(64, 64))
self.Derviate_btn.setObjectName("Derviate_btn")
self.horizontalLayout_6.addWidget(self.Derviate_btn)
self.verticalLayout_4.addWidget(self.Derviate_icon)
self.Integral_1st_icon = QtWidgets.QFrame(self.Bottom_left_icons_in)
self.Integral_1st_icon.setMinimumSize(QtCore.QSize(72, 72))
self.Integral_1st_icon.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.Integral_1st_icon.setFrameShadow(QtWidgets.QFrame.Raised)
self.Integral_1st_icon.setObjectName("Integral_1st_icon")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.Integral_1st_icon)
self.horizontalLayout_7.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_7.setSpacing(0)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.Integral_1st_btn = QtWidgets.QPushButton(self.Integral_1st_icon)
self.Integral_1st_btn.setMinimumSize(QtCore.QSize(72, 72))
self.Integral_1st_btn.setStyleSheet("QPushButton {\n"
" border-radius: 32px;\n"
" border-style: outset;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
" );\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
" border-style: inset;\n"
" background: qradialgradient(\n"
" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #ddd);\n"
" }\n"
"")
self.Integral_1st_btn.setText("")
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("icons/Calka1-white-g.PNG"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Integral_1st_btn.setIcon(icon4)
self.Integral_1st_btn.setIconSize(QtCore.QSize(64, 64))
self.Integral_1st_btn.setObjectName("Integral_1st_btn")
self.horizontalLayout_7.addWidget(self.Integral_1st_btn)
self.verticalLayout_4.addWidget(self.Integral_1st_icon)
self.Integral_2x_icon = QtWidgets.QFrame(self.Bottom_left_icons_in)
self.Integral_2x_icon.setMinimumSize(QtCore.QSize(70, 70))
self.Integral_2x_icon.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.Integral_2x_icon.setFrameShadow(QtWidgets.QFrame.Raised)
self.Integral_2x_icon.setObjectName("Integral_2x_icon")
self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.Integral_2x_icon)
self.horizontalLayout_8.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_8.setSpacing(0)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.Integral_2x_btn = QtWidgets.QPushButton(self.Integral_2x_icon)
self.Integral_2x_btn.setMinimumSize(QtCore.QSize(72, 72))
self.Integral_2x_btn.setStyleSheet("QPushButton {\n"
" border-radius: 32px;\n"
" border-style: outset;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
" );\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
" border-style: inset;\n"
" background: qradialgradient(\n"
" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #ddd);\n"
" }\n"
"")
self.Integral_2x_btn.setText("")
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("icons/Calka2x-white-g.PNG"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Integral_2x_btn.setIcon(icon5)
self.Integral_2x_btn.setIconSize(QtCore.QSize(64, 64))
self.Integral_2x_btn.setObjectName("Integral_2x_btn")
self.horizontalLayout_8.addWidget(self.Integral_2x_btn)
self.verticalLayout_4.addWidget(self.Integral_2x_icon)
self.Integral_curved_plot = QtWidgets.QFrame(self.Bottom_left_icons_in)
self.Integral_curved_plot.setMinimumSize(QtCore.QSize(72, 72))
self.Integral_curved_plot.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.Integral_curved_plot.setFrameShadow(QtWidgets.QFrame.Raised)
self.Integral_curved_plot.setObjectName("Integral_curved_plot")
self.horizontalLayout_9 = QtWidgets.QHBoxLayout(self.Integral_curved_plot)
self.horizontalLayout_9.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_9.setSpacing(0)
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.Integral_curved_btn = QtWidgets.QPushButton(self.Integral_curved_plot)
self.Integral_curved_btn.setMinimumSize(QtCore.QSize(72, 72))
self.Integral_curved_btn.setStyleSheet("QPushButton {\n"
" border-radius: 32px;\n"
" border-style: outset;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
" );\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
" border-style: inset;\n"
" background: qradialgradient(\n"
" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #ddd);\n"
" }\n"
"")
self.Integral_curved_btn.setText("")
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap("icons/Calka3x-white-g.PNG"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Integral_curved_btn.setIcon(icon6)
self.Integral_curved_btn.setIconSize(QtCore.QSize(64, 64))
self.Integral_curved_btn.setShortcut("")
self.Integral_curved_btn.setObjectName("Integral_curved_btn")
self.horizontalLayout_9.addWidget(self.Integral_curved_btn)
self.verticalLayout_4.addWidget(self.Integral_curved_plot)
self.Delta_plot = QtWidgets.QFrame(self.Bottom_left_icons_in)
self.Delta_plot.setMinimumSize(QtCore.QSize(72, 72))
self.Delta_plot.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.Delta_plot.setFrameShadow(QtWidgets.QFrame.Raised)
self.Delta_plot.setObjectName("Delta_plot")
self.horizontalLayout_10 = QtWidgets.QHBoxLayout(self.Delta_plot)
self.horizontalLayout_10.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_10.setSpacing(0)
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.Delta_plot_btn = QtWidgets.QPushButton(self.Delta_plot)
self.Delta_plot_btn.setMinimumSize(QtCore.QSize(72, 72))
self.Delta_plot_btn.setStyleSheet("QPushButton {\n"
" border-radius: 32px;\n"
" border-style: outset;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
" );\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
" border-style: inset;\n"
" background: qradialgradient(\n"
" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #ddd);\n"
" }\n"
"")
self.Delta_plot_btn.setText("")
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap("icons/delta-white-g.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Delta_plot_btn.setIcon(icon7)
self.Delta_plot_btn.setIconSize(QtCore.QSize(64, 64))
self.Delta_plot_btn.setObjectName("Delta_plot_btn")
self.horizontalLayout_10.addWidget(self.Delta_plot_btn)
self.verticalLayout_4.addWidget(self.Delta_plot)
self.verticalLayout_3.addWidget(self.Bottom_left_icons_in, 0, QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.horizontalLayout_4.addWidget(self.Bottom_left_icons_out)
self.Bottom_right_content_out = QtWidgets.QFrame(self.Bottom_bar)
self.Bottom_right_content_out.setLayoutDirection(QtCore.Qt.LeftToRight)
self.Bottom_right_content_out.setStyleSheet("background-color: rgb(60,60,60);\n"
"border-left: 2px solid;\n"
"border-left-color: rgb(60,60,60);")
self.Bottom_right_content_out.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.Bottom_right_content_out.setFrameShadow(QtWidgets.QFrame.Raised)
self.Bottom_right_content_out.setObjectName("Bottom_right_content_out")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.Bottom_right_content_out)
self.verticalLayout_6.setContentsMargins(30, 30, 30, 5)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.Bottom_right_content_in = QtWidgets.QFrame(self.Bottom_right_content_out)
self.Bottom_right_content_in.setStyleSheet("border:0px solid;")
self.Bottom_right_content_in.setFrameShape(QtWidgets.QFrame.NoFrame)
self.Bottom_right_content_in.setFrameShadow(QtWidgets.QFrame.Raised)
self.Bottom_right_content_in.setObjectName("Bottom_right_content_in")
self.horizontalLayout_11 = QtWidgets.QHBoxLayout(self.Bottom_right_content_in)
self.horizontalLayout_11.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_11.setSpacing(0)
self.horizontalLayout_11.setObjectName("horizontalLayout_11")
self.stackedWidget = QtWidgets.QStackedWidget(self.Bottom_right_content_in)
self.stackedWidget.setEnabled(True)
self.stackedWidget.setMaximumSize(QtCore.QSize(800, 16777215))
self.stackedWidget.setFont(font)
self.stackedWidget.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.stackedWidget.setFrameShadow(QtWidgets.QFrame.Raised)
self.stackedWidget.setObjectName("stackedWidget")
self.Home_content = QtWidgets.QWidget()
self.Home_content.setFont(font)
self.Home_content.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size:22px;")
self.Home_content.setObjectName("Home_content")
self.Home_label_2 = QtWidgets.QLabel(self.Home_content)
self.Home_label_2.setGeometry(QtCore.QRect(0, 40, 800, 121))
self.Home_label_2.setMaximumSize(QtCore.QSize(700, 200))
self.Home_label_2.setFont(font)
self.Home_label_2.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Home_label_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.Home_label_2.setTextFormat(QtCore.Qt.AutoText)
self.Home_label_2.setScaledContents(False)
self.Home_label_2.setWordWrap(True)
self.Home_label_2.setObjectName("Home_label_2")
self.Home_label_1 = QtWidgets.QLabel(self.Home_content)
self.Home_label_1.setGeometry(QtCore.QRect(0, 0, 321, 33))
self.Home_label_1.setMaximumSize(QtCore.QSize(16777215, 50))
self.Home_label_1.setFont(font)
self.Home_label_1.setStyleSheet("color: rgb(255, 255, 255);\n"
"font-size: 26px;\n"
"")
self.Home_label_1.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.Home_label_1.setObjectName("Home_label_1")
self.Home_label_3 = QtWidgets.QLabel(self.Home_content)
self.Home_label_3.setGeometry(QtCore.QRect(0, 200, 621, 33))
self.Home_label_3.setMaximumSize(QtCore.QSize(16777215, 50))
self.Home_label_3.setFont(font)
self.Home_label_3.setStyleSheet("color: rgb(255, 255, 255);\n"
"font-size: 26px;\n"
"")
self.Home_label_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.Home_label_3.setObjectName("Home_label_3")
self.Home_label_4 = QtWidgets.QLabel(self.Home_content)
self.Home_label_4.setGeometry(QtCore.QRect(0, 240, 700, 30))
self.Home_label_4.setMaximumSize(QtCore.QSize(700, 100))
self.Home_label_4.setFont(font)
self.Home_label_4.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Home_label_4.setFrameShadow(QtWidgets.QFrame.Raised)
self.Home_label_4.setTextFormat(QtCore.Qt.AutoText)
self.Home_label_4.setScaledContents(False)
self.Home_label_4.setWordWrap(True)
self.Home_label_4.setObjectName("Home_label_4")
self.Home_label_5 = QtWidgets.QLabel(self.Home_content)
self.Home_label_5.setGeometry(QtCore.QRect(0, 270, 700, 30))
self.Home_label_5.setMaximumSize(QtCore.QSize(700, 100))
self.Home_label_5.setFont(font)
self.Home_label_5.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Home_label_5.setFrameShadow(QtWidgets.QFrame.Raised)
self.Home_label_5.setTextFormat(QtCore.Qt.AutoText)
self.Home_label_5.setScaledContents(False)
self.Home_label_5.setWordWrap(True)
self.Home_label_5.setObjectName("Home_label_5")
self.Home_label_6 = QtWidgets.QLabel(self.Home_content)
self.Home_label_6.setGeometry(QtCore.QRect(0, 300, 700, 30))
self.Home_label_6.setMaximumSize(QtCore.QSize(700, 100))
self.Home_label_6.setFont(font)
self.Home_label_6.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Home_label_6.setFrameShadow(QtWidgets.QFrame.Raised)
self.Home_label_6.setTextFormat(QtCore.Qt.AutoText)
self.Home_label_6.setScaledContents(False)
self.Home_label_6.setWordWrap(True)
self.Home_label_6.setObjectName("Home_label_6")
self.Home_label_7 = QtWidgets.QLabel(self.Home_content)
self.Home_label_7.setGeometry(QtCore.QRect(0, 330, 700, 30))
self.Home_label_7.setMaximumSize(QtCore.QSize(700, 100))
self.Home_label_7.setFont(font)
self.Home_label_7.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Home_label_7.setFrameShadow(QtWidgets.QFrame.Raised)
self.Home_label_7.setTextFormat(QtCore.Qt.AutoText)
self.Home_label_7.setScaledContents(False)
self.Home_label_7.setWordWrap(True)
self.Home_label_7.setObjectName("Home_label_7")
self.Home_label_8 = QtWidgets.QLabel(self.Home_content)
self.Home_label_8.setGeometry(QtCore.QRect(0, 360, 700, 30))
self.Home_label_8.setMaximumSize(QtCore.QSize(700, 100))
self.Home_label_8.setFont(font)
self.Home_label_8.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Home_label_8.setFrameShadow(QtWidgets.QFrame.Raised)
self.Home_label_8.setTextFormat(QtCore.Qt.AutoText)
self.Home_label_8.setScaledContents(False)
self.Home_label_8.setWordWrap(True)
self.Home_label_8.setObjectName("Home_label_8")
self.Home_label_9 = QtWidgets.QLabel(self.Home_content)
self.Home_label_9.setGeometry(QtCore.QRect(0, 390, 700, 30))
self.Home_label_9.setMaximumSize(QtCore.QSize(700, 100))
self.Home_label_9.setFont(font)
self.Home_label_9.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Home_label_9.setFrameShadow(QtWidgets.QFrame.Raised)
self.Home_label_9.setTextFormat(QtCore.Qt.AutoText)
self.Home_label_9.setScaledContents(False)
self.Home_label_9.setWordWrap(True)
self.Home_label_9.setObjectName("Home_label_9")
self.Home_label_10 = QtWidgets.QLabel(self.Home_content)
self.Home_label_10.setGeometry(QtCore.QRect(0, 450, 321, 33))
self.Home_label_10.setMaximumSize(QtCore.QSize(16777215, 50))
self.Home_label_10.setFont(font)
self.Home_label_10.setStyleSheet("color: rgb(255, 255, 255);\n"
"font-size: 26px;\n"
"")
self.Home_label_10.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.Home_label_10.setObjectName("Home_label_10")
self.Home_label_11 = QtWidgets.QLabel(self.Home_content)
self.Home_label_11.setGeometry(QtCore.QRect(0, 490, 700, 51))
self.Home_label_11.setMaximumSize(QtCore.QSize(700, 100))
self.Home_label_11.setFont(font)
self.Home_label_11.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Home_label_11.setFrameShadow(QtWidgets.QFrame.Raised)
self.Home_label_11.setTextFormat(QtCore.Qt.AutoText)
self.Home_label_11.setScaledContents(False)
self.Home_label_11.setWordWrap(True)
self.Home_label_11.setObjectName("Home_label_11")
self.stackedWidget.addWidget(self.Home_content)
self.Integral_content = QtWidgets.QWidget()
self.Integral_content.setObjectName("Integral_content")
self.Integral_content.setStyleSheet('font-size:18px')
self.Integral_main_label = QtWidgets.QLabel(self.Integral_content)
self.Integral_main_label.setGeometry(QtCore.QRect(0, 0, 701, 191))
self.Integral_main_label.setFont(font)
self.Integral_main_label.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.Integral_main_label.setWordWrap(True)
self.Integral_main_label.setObjectName("Integral_main_label")
self.Integral_sign = QtWidgets.QPushButton(self.Integral_content)
self.Integral_sign.setGeometry(QtCore.QRect(6, 315, 31, 71))
self.Integral_sign.setText("")
self.Integral_sign.setIcon(icon4)
self.Integral_sign.setIconSize(QtCore.QSize(58, 58))
self.Integral_sign.setObjectName("Integral_sign")
self.Integral_label_fx = QtWidgets.QLabel(self.Integral_content)
self.Integral_label_fx.setGeometry(QtCore.QRect(50, 200, 71, 31))
self.Integral_label_fx.setFont(font)
self.Integral_label_fx.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Integral_label_fx.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.Integral_label_fx.setObjectName("Integral_label_fx")
self.Integral_input_value = QtWidgets.QLineEdit(self.Integral_content)
self.Integral_input_value.setGeometry(QtCore.QRect(130, 200, 181, 31))
self.Integral_input_value.setFont(font)
self.Integral_input_value.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"outline: none;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.Integral_input_value.setObjectName("Integral_input_value")
self.Integral_label_fx_2 = QtWidgets.QLabel(self.Integral_content)
self.Integral_label_fx_2.setGeometry(QtCore.QRect(48, 330, 81, 31))
self.Integral_label_fx_2.setFont(font)
self.Integral_label_fx_2.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Integral_label_fx_2.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.Integral_label_fx_2.setObjectName("Integral_label_fx_2")
self.Integral_label_fxpr_res = QtWidgets.QLabel(self.Integral_content)
self.Integral_label_fxpr_res.setGeometry(QtCore.QRect(130, 330, 181, 31))
self.Integral_label_fxpr_res.setFont(font)
self.Integral_label_fxpr_res.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.Integral_label_fxpr_res.setText("")
self.Integral_label_fxpr_res.setObjectName("Integral_label_fxpr_res")
self.Integral_image_label_preview = QtWidgets.QLabel(self.Integral_content)
self.Integral_image_label_preview.setGeometry(QtCore.QRect(410, 500, 271, 31))
self.Integral_image_label_preview.setText('Preview calculated figure')
self.Integral_image_label_preview.setFont(font)
self.Integral_image_label_preview.setStyleSheet("font-size: 18px")
self.Integral_image_label_preview.setObjectName('Integral_image_label_preview')
self.Integral_image_frame_preview = QtWidgets.QFrame(self.Integral_content)
self.Integral_image_frame_preview.setGeometry(QtCore.QRect(330, 160, 340, 340))
self.Integral_image_frame_preview.setStyleSheet("border: 1px solid;\n"
"border-color: rgb(90, 90, 90);")
self.Integral_image_frame_preview.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.Integral_image_frame_preview.setFrameShadow(QtWidgets.QFrame.Raised)
self.Integral_image_frame_preview.setObjectName("Integral_image_frame_preview")
self.Integral_image_label_preview_fig = QtWidgets.QLabel(self.Integral_image_frame_preview)
self.Integral_image_label_preview_fig.setGeometry(QtCore.QRect(0,0,340,340))
self.Integral_image_label_preview_fig.setText("")
self.Integral_image_label_preview_fig.setScaledContents(True)
self.Integral_image_label_preview_fig.setObjectName("Integral_image_label_preview_fig ")
self.Integral_BTN_compute = QtWidgets.QPushButton(self.Integral_content)
self.Integral_BTN_compute.setGeometry(QtCore.QRect(100, 460, 131, 41))
self.Integral_BTN_compute.setFont(font)
self.Integral_BTN_compute.setStyleSheet("QPushButton {\n"
" border-radius: 16px;\n"
" border-style: outset;\n"
" color: white;\n"
" font-size: 22px;\n"
" border: 1px solid;\n"
" border-color: rgb(232, 232, 232);\n"
"\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
" );\n"
" color: black;\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
" border-style: inset;\n"
" background: qradialgradient(\n"
" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #ddd);\n"
" }")
self.Integral_BTN_compute.setObjectName("Integral_BTN_compute")
###
self.Integral_plot_range = QtWidgets.QLabel(self.Integral_content)
self.Integral_plot_range.setGeometry(QtCore.QRect(0, 245, 121, 61))
self.Integral_plot_range.setFont(font)
self.Integral_plot_range.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Integral_plot_range.setFrameShadow(QtWidgets.QFrame.Raised)
self.Integral_plot_range.setTextFormat(QtCore.Qt.AutoText)
self.Integral_plot_range.setScaledContents(False)
self.Integral_plot_range.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.Integral_plot_range.setWordWrap(True)
self.Integral_plot_range.setObjectName("Integral_plot_range")
self.Integral_plot_range.setText('Integration area:')
self.Integral_range_x1 = QtWidgets.QLineEdit(self.Integral_content)
self.Integral_range_x1.setGeometry(QtCore.QRect(130, 260, 86, 36))
self.Integral_range_x1.setFont(font)
self.Integral_range_x1.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.Integral_range_x1.setObjectName("Integral_range_x1")
self.Integral_range_x2 = QtWidgets.QLineEdit(self.Integral_content)
self.Integral_range_x2.setGeometry(QtCore.QRect(220, 260, 86, 36))
self.Integral_range_x2.setFont(font)
self.Integral_range_x2.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.Integral_range_x2.setObjectName("Integral_range_x2")
self.Integral_label_P = QtWidgets.QLabel(self.Integral_content)
self.Integral_label_P.setGeometry(QtCore.QRect(50, 390, 71, 31))
self.Integral_label_P.setFont(font)
self.Integral_label_P.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Integral_label_P.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.Integral_label_P.setObjectName("Integral_label_P")
self.Integral_label_P_res = QtWidgets.QLabel(self.Integral_content)
self.Integral_label_P_res.setGeometry(QtCore.QRect(130, 390, 181, 31))
self.Integral_label_P_res.setFont(font)
self.Integral_label_P_res.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.Integral_label_P_res.setText("")
self.Integral_label_P_res.setObjectName("Integral_label_P_res")
###
self.stackedWidget_4 = QtWidgets.QStackedWidget(self.Integral_content)
self.stackedWidget_4.setGeometry(QtCore.QRect(0, 510, 321, 61))
self.stackedWidget_4.setFont(font)
self.stackedWidget_4.setStyleSheet("color: rgb(253, 41, 41);\n"
"font-size: 16px;")
self.stackedWidget_4.setObjectName("stackedWidget_4")
self.error_widget_6 = QtWidgets.QWidget()
self.error_widget_6.setFont(font)
self.error_widget_6.setObjectName("error_widget_6")
self.horizontalLayout_18 = QtWidgets.QHBoxLayout(self.error_widget_6)
self.horizontalLayout_18.setObjectName("horizontalLayout_18")
self.error_label_6 = QtWidgets.QLabel(self.error_widget_6)
self.error_label_6.setFont(font)
self.error_label_6.setWordWrap(True)
self.error_label_6.setObjectName("error_label_6")
self.horizontalLayout_18.addWidget(self.error_label_6)
self.stackedWidget_4.addWidget(self.error_widget_6)
self.error_widget_7 = QtWidgets.QWidget()
self.error_widget_7.setFont(font)
self.error_widget_7.setObjectName("error_widget_7")
self.horizontalLayout_19 = QtWidgets.QHBoxLayout(self.error_widget_7)
self.horizontalLayout_19.setObjectName("horizontalLayout_19")
self.error_label_7 = QtWidgets.QLabel(self.error_widget_7)
self.error_label_7.setFont(font)
self.error_label_7.setWordWrap(True)
self.error_label_7.setObjectName("error_label_7")
self.horizontalLayout_19.addWidget(self.error_label_7)
self.stackedWidget_4.addWidget(self.error_widget_7)
self.correct_widget_7 = QtWidgets.QWidget()
self.correct_widget_7.setFont(font)
self.correct_widget_7.setObjectName("correct_widget_7")
self.horizontalLayout_19 = QtWidgets.QHBoxLayout(self.correct_widget_7)
self.horizontalLayout_19.setObjectName("horizontalLayout_19")
self.correct_label_7 = QtWidgets.QLabel(self.correct_widget_7)
self.correct_label_7.setFont(font)
self.correct_label_7.setWordWrap(True)
self.correct_label_7.setStyleSheet('color:blue;')
self.correct_label_7.setObjectName("correct_label_7")
self.horizontalLayout_19.addWidget(self.correct_label_7)
self.stackedWidget_4.addWidget(self.correct_widget_7)
self.stackedWidget.addWidget(self.Integral_content)
self.Plot_content = QtWidgets.QWidget()
self.Plot_content.setEnabled(True)
self.Plot_content.setFont(font)
self.Plot_content.setObjectName("Plot_content")
self.Plot_label_1 = QtWidgets.QLabel(self.Plot_content)
self.Plot_label_1.setGeometry(QtCore.QRect(0, 20, 341, 91))
self.Plot_label_1.setMaximumSize(QtCore.QSize(700, 200))
self.Plot_label_1.setFont(font)
self.Plot_label_1.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Plot_label_1.setFrameShadow(QtWidgets.QFrame.Raised)
self.Plot_label_1.setTextFormat(QtCore.Qt.AutoText)
self.Plot_label_1.setScaledContents(False)
self.Plot_label_1.setWordWrap(True)
self.Plot_label_1.setObjectName("Plot_label_1")
self.Plot_frame = QtWidgets.QFrame(self.Plot_content)
self.Plot_frame.setGeometry(QtCore.QRect(350, 0, 350, 350))
self.Plot_frame.setStyleSheet("border: 1px solid;\n"
"border-color: rgb(90, 90, 90);")
self.Plot_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.Plot_frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.Plot_frame.setObjectName("Plot_frame")
self.Plot_fn_edit = QtWidgets.QLineEdit(self.Plot_content)
self.Plot_fn_edit.setGeometry(QtCore.QRect(130, 140, 141, 31))
self.Plot_fn_edit.setFont(font)
self.Plot_fn_edit.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.Plot_fn_edit.setObjectName("Plot_fn_edit")
self.Plot_fn_sign_label = QtWidgets.QLabel(self.Plot_content)
self.Plot_fn_sign_label.setGeometry(QtCore.QRect(50, 135, 71, 41))
self.Plot_fn_sign_label.setMaximumSize(QtCore.QSize(700, 200))
self.Plot_fn_sign_label.setFont(font)
self.Plot_fn_sign_label.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Plot_fn_sign_label.setFrameShadow(QtWidgets.QFrame.Raised)
self.Plot_fn_sign_label.setTextFormat(QtCore.Qt.AutoText)
self.Plot_fn_sign_label.setScaledContents(False)
self.Plot_fn_sign_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.Plot_fn_sign_label.setWordWrap(True)
self.Plot_fn_sign_label.setObjectName("Plot_fn_sign_label")
self.Plot_range_sign = QtWidgets.QLabel(self.Plot_content)
self.Plot_range_sign.setGeometry(QtCore.QRect(35, 185, 81, 41))
self.Plot_range_sign.setMaximumSize(QtCore.QSize(700, 200))
self.Plot_range_sign.setFont(font)
self.Plot_range_sign.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Plot_range_sign.setFrameShadow(QtWidgets.QFrame.Raised)
self.Plot_range_sign.setTextFormat(QtCore.Qt.AutoText)
self.Plot_range_sign.setScaledContents(False)
self.Plot_range_sign.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.Plot_range_sign.setWordWrap(True)
self.Plot_range_sign.setObjectName("Plot_range_sign")
self.Plot_range_x1 = QtWidgets.QLineEdit(self.Plot_content)
self.Plot_range_x1.setGeometry(QtCore.QRect(130, 190, 61, 31))
self.Plot_range_x1.setFont(font)
self.Plot_range_x1.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.Plot_range_x1.setObjectName("Plot_range_x1")
self.Plot_color_sign = QtWidgets.QLabel(self.Plot_content)
self.Plot_color_sign.setGeometry(QtCore.QRect(25, 235, 91, 41))
self.Plot_color_sign.setMaximumSize(QtCore.QSize(700, 200))
self.Plot_color_sign.setFont(font)
self.Plot_color_sign.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Plot_color_sign.setFrameShadow(QtWidgets.QFrame.Raised)
self.Plot_color_sign.setTextFormat(QtCore.Qt.AutoText)
self.Plot_color_sign.setScaledContents(False)
self.Plot_color_sign.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.Plot_color_sign.setWordWrap(True)
self.Plot_color_sign.setObjectName("Plot_color_sign")
self.Plot_range_x2 = QtWidgets.QLineEdit(self.Plot_content)
self.Plot_range_x2.setGeometry(QtCore.QRect(210, 190, 61, 31))
self.Plot_range_x2.setFont(font)
self.Plot_range_x2.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.Plot_range_x2.setObjectName("Plot_range_x2")
self.Plot_combo_color = QtWidgets.QComboBox(self.Plot_content)
self.Plot_combo_color.setGeometry(QtCore.QRect(130, 240, 141, 31))
self.Plot_combo_color.setFont(font)
self.Plot_combo_color.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"border: 1px solid;\n"
"border-color: rgb(200, 200, 200);\n"
"border-radius: 15px;")
self.Plot_combo_color.setObjectName("Plot_combo_color")
self.Plot_combo_color.addItem("")
self.Plot_combo_color.addItem("")
self.Plot_combo_color.addItem("")
self.Plot_combo_color.addItem("")
self.Plot_combo_color.addItem("")
self.Plot_combo_color.addItem("")
self.Plot_BTN_generate = QtWidgets.QPushButton(self.Plot_content)
self.Plot_BTN_generate.setGeometry(QtCore.QRect(110, 300, 131, 41))
self.Plot_BTN_generate.setFont(font)
self.Plot_BTN_generate.setStyleSheet("QPushButton {\n"
" border-radius: 16px;\n"
" border-style: outset;\n"
" color: white;\n"
" font-size: 22px;\n"
" \n"
" border: 1px solid;\n"
" border-color: rgb(232, 232, 232);\n"
"\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
" );\n"
" color: black;\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
" border-style: inset;\n"
" background: qradialgradient(\n"
" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #ddd);\n"
" }")
self.Plot_BTN_generate.setObjectName("Plot_BTN_generate")
self.Plot_label_2 = QtWidgets.QLabel(self.Plot_content)
self.Plot_label_2.setGeometry(QtCore.QRect(350, 350, 351, 21))
self.Plot_label_2.setMaximumSize(QtCore.QSize(700, 200))
self.Plot_label_2.setFont(font)
self.Plot_label_2.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Plot_label_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.Plot_label_2.setTextFormat(QtCore.Qt.AutoText)
self.Plot_label_2.setScaledContents(False)
self.Plot_label_2.setAlignment(QtCore.Qt.AlignCenter)
self.Plot_label_2.setWordWrap(True)
self.Plot_label_2.setObjectName("Plot_label_2")
self.Plot_error_info = QtWidgets.QStackedWidget(self.Plot_content)
self.Plot_error_info.setGeometry(QtCore.QRect(20, 370, 311, 51))
self.Plot_error_info.setObjectName("Plot_error_info")
self.error_widget_1 = QtWidgets.QWidget()
self.error_widget_1.setObjectName("error_widget_1")
self.horizontalLayout_13 = QtWidgets.QHBoxLayout(self.error_widget_1)
self.horizontalLayout_13.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_13.setSpacing(0)
self.horizontalLayout_13.setObjectName("horizontalLayout_13")
self.error_label_1 = QtWidgets.QLabel(self.error_widget_1)
self.error_label_1.setFont(font)
self.error_label_1.setStyleSheet("color: rgb(253, 41, 41);\n"
"font-size: 16px;")
self.error_label_1.setWordWrap(True)
self.error_label_1.setObjectName("error_label_1")
self.horizontalLayout_13.addWidget(self.error_label_1)
self.Plot_error_info.addWidget(self.error_widget_1)
self.error_widget_2 = QtWidgets.QWidget()
self.error_widget_2.setObjectName("error_widget_2")
self.horizontalLayout_14 = QtWidgets.QHBoxLayout(self.error_widget_2)
self.horizontalLayout_14.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_14.setSpacing(0)
self.horizontalLayout_14.setObjectName("horizontalLayout_14")
self.error_label_2 = QtWidgets.QLabel(self.error_widget_2)
self.error_label_2.setFont(font)
self.error_label_2.setStyleSheet("color: rgb(253, 41, 41);\n"
"font-size: 16px;")
self.error_label_2.setWordWrap(True)
self.error_label_2.setObjectName("error_label_2")
self.horizontalLayout_14.addWidget(self.error_label_2)
self.Plot_error_info.addWidget(self.error_widget_2)
self.Plot_figure_saved_widget = QtWidgets.QWidget()
self.Plot_figure_saved_widget.setObjectName("Plot_figure_saved_widget")
self.horizontalLayout_15 = QtWidgets.QHBoxLayout(self.Plot_figure_saved_widget)
self.horizontalLayout_15.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_15.setSpacing(0)
self.horizontalLayout_15.setObjectName("horizontalLayout_15")
self.Plot_figure_saved_label = QtWidgets.QLabel(self.Plot_figure_saved_widget)
self.Plot_figure_saved_label.setFont(font)
self.Plot_figure_saved_label.setStyleSheet("color: rgb(12, 158, 255);\n"
"font-size: 16px;")
self.Plot_figure_saved_label.setObjectName("Plot_figure_saved_label")
self.horizontalLayout_15.addWidget(self.Plot_figure_saved_label)
self.Plot_error_info.addWidget(self.Plot_figure_saved_widget)
self.stackedWidget.addWidget(self.Plot_content)
self.Derivative_centent = QtWidgets.QWidget()
self.Derivative_centent.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Derivative_centent.setObjectName("Derivative_centent")
self.Derivative_main_label = QtWidgets.QLabel(self.Derivative_centent)
self.Derivative_main_label.setGeometry(QtCore.QRect(0, 0, 701, 141))
self.Derivative_main_label.setFont(font)
self.Derivative_main_label.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Derivative_main_label.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.Derivative_main_label.setWordWrap(True)
self.Derivative_main_label.setObjectName("Derivative_main_label")
self.Derivative_label_fx = QtWidgets.QLabel(self.Derivative_centent)
self.Derivative_label_fx.setGeometry(QtCore.QRect(60, 160, 71, 31))
self.Derivative_label_fx.setFont(font)
self.Derivative_label_fx.setStyleSheet('font-size:18px;')
self.Derivative_label_fx.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.Derivative_label_fx.setObjectName("Derivative_label_fx")
self.Derivative_input_value = QtWidgets.QLineEdit(self.Derivative_centent)
self.Derivative_input_value.setGeometry(QtCore.QRect(140, 160, 111, 31))
self.Derivative_input_value.setFont(font)
self.Derivative_input_value.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.Derivative_input_value.setObjectName("Derivative_input_value")
self.Derivative_label_fxpr = QtWidgets.QLabel(self.Derivative_centent)
self.Derivative_label_fxpr.setGeometry(QtCore.QRect(60, 220, 71, 31))
self.Derivative_label_fxpr.setStyleSheet('font-size:18px;')
self.Derivative_label_fxpr.setFont(font)
self.Derivative_label_fxpr.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.Derivative_label_fxpr.setObjectName("Derivative_label_fxpr")
self.Derivative_label_fxpr_res = QtWidgets.QLabel(self.Derivative_centent)
self.Derivative_label_fxpr_res.setGeometry(QtCore.QRect(140, 220, 111, 31))
self.Derivative_label_fxpr_res.setFont(font)
self.Derivative_label_fxpr_res.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.Derivative_label_fxpr_res.setText("")
self.Derivative_label_fxpr_res.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.Derivative_label_fxpr_res.setObjectName("Derivative_label_fxpr_res")
self.Derivative_sign = QtWidgets.QPushButton(self.Derivative_centent)
self.Derivative_sign.setGeometry(QtCore.QRect(65, 205, 50, 58))
self.Derivative_sign.setText('')
self.Derivative_sign.setIcon(icon3)
self.Derivative_sign.setIconSize(QtCore.QSize(48, 48))
self.Derivative_sign.setObjectName('Derivative_dxdy_operator')
self.Derivative_BTN_compute = QtWidgets.QPushButton(self.Derivative_centent)
self.Derivative_BTN_compute.setGeometry(QtCore.QRect(100, 350, 141, 41))
self.Derivative_BTN_compute.setFont(font)
self.Derivative_BTN_compute.setStyleSheet("QPushButton {\n"
" border-radius: 16px;\n"
" border-style: outset;\n"
" color: white;\n"
" font-size: 22px;\n"
" border: 1px solid;\n"
" border-color: rgb(232, 232, 232);\n"
"\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
" );\n"
" color: black;\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
" border-style: inset;\n"
" background: qradialgradient(\n"
" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #ddd);\n"
" }")
self.Derivative_BTN_compute.setObjectName("Derivative_BTN_compute")
self.Derivative_image_preview_dxdy = QtWidgets.QLabel(self.Derivative_centent)
self.Derivative_image_preview_dxdy.setGeometry(QtCore.QRect(410, 460, 271, 31))
self.Derivative_image_preview_dxdy.setText('Preview calculated figure')
self.Derivative_image_preview_dxdy.setFont(font)
self.Derivative_image_preview_dxdy.setStyleSheet("font-size: 18px")
self.Derivative_image_preview_dxdy.setObjectName('Derivative_image_preview_dxdy')
self.Derivative_frame_dxdy = QtWidgets.QFrame(self.Derivative_centent)
self.Derivative_frame_dxdy.setGeometry(QtCore.QRect(330, 120, 340, 340))
self.Derivative_frame_dxdy.setStyleSheet("border: 1px solid;\n"
"border-color: rgb(90, 90, 90);")
self.Derivative_frame_dxdy.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.Derivative_frame_dxdy.setFrameShadow(QtWidgets.QFrame.Raised)
self.Derivative_frame_dxdy.setObjectName("Derivative_frame_dxdy")
self.Derivative_plot_range = QtWidgets.QLabel(self.Derivative_centent)
self.Derivative_plot_range.setGeometry(QtCore.QRect(50, 275, 81, 41))
self.Derivative_plot_range.setFont(font)
self.Derivative_plot_range.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Derivative_plot_range.setFrameShadow(QtWidgets.QFrame.Raised)
self.Derivative_plot_range.setTextFormat(QtCore.Qt.AutoText)
self.Derivative_plot_range.setScaledContents(False)
self.Derivative_plot_range.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.Derivative_plot_range.setWordWrap(True)
self.Derivative_plot_range.setObjectName("Derivative_plot_range")
self.Derivative_plot_range.setText('Range:')
self.Derivative_range_x1 = QtWidgets.QLineEdit(self.Derivative_centent)
self.Derivative_range_x1.setGeometry(QtCore.QRect(140, 282, 61, 31))
self.Derivative_range_x1.setFont(font)
self.Derivative_range_x1.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.Derivative_range_x1.setObjectName("Derivative_range_x1")
self.Derivative_range_x2 = QtWidgets.QLineEdit(self.Derivative_centent)
self.Derivative_range_x2.setGeometry(QtCore.QRect(210, 282, 61, 31))
self.Derivative_range_x2.setFont(font)
self.Derivative_range_x2.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.Derivative_range_x2.setObjectName("Derivative_range_x2")
self.stackedWidget_3 = QtWidgets.QStackedWidget(self.Derivative_centent)
self.stackedWidget_3.setGeometry(QtCore.QRect(0, 400, 321, 81))
self.stackedWidget_3.setStyleSheet("color: rgb(253, 41, 41);\n"
"font-size: 16px;")
self.stackedWidget_3.setObjectName("stackedWidget_3")
self.error_widget_4 = QtWidgets.QWidget()
self.error_widget_4.setObjectName("error_widget_4")
self.horizontalLayout_16 = QtWidgets.QHBoxLayout(self.error_widget_4)
self.horizontalLayout_16.setObjectName("horizontalLayout_16")
self.error_label_4 = QtWidgets.QLabel(self.error_widget_4)
self.error_label_4.setMaximumSize(QtCore.QSize(500, 16777215))
self.error_label_4.setFont(font)
self.error_label_4.setWordWrap(True)
self.error_label_4.setObjectName("error_label_4")
self.horizontalLayout_16.addWidget(self.error_label_4)
self.stackedWidget_3.addWidget(self.error_widget_4)
self.correct_widget_4 = QtWidgets.QWidget()
self.correct_widget_4.setObjectName("correct_widget_4")
self.horizontalLayout_16 = QtWidgets.QHBoxLayout(self.correct_widget_4)
self.horizontalLayout_16.setObjectName("horizontalLayout_16")
self.correct_label_4 = QtWidgets.QLabel(self.correct_widget_4)
self.correct_label_4.setMaximumSize(QtCore.QSize(500, 16777215))
self.correct_label_4.setStyleSheet('color: Blue;')
self.correct_label_4.setFont(font)
self.correct_label_4.setWordWrap(True)
self.correct_label_4.setObjectName("correct_label_4")
self.horizontalLayout_16.addWidget(self.correct_label_4)
self.stackedWidget_3.addWidget(self.correct_widget_4)
self.error_widget_5 = QtWidgets.QWidget()
self.error_widget_5.setObjectName("error_widget_5")
self.horizontalLayout_17 = QtWidgets.QHBoxLayout(self.error_widget_5)
self.horizontalLayout_17.setObjectName("horizontalLayout_17")
self.error_label_5 = QtWidgets.QLabel(self.error_widget_5)
self.error_label_5.setFont(font)
self.error_label_5.setWordWrap(True)
self.error_label_5.setObjectName("error_label_5")
self.horizontalLayout_17.addWidget(self.error_label_5)
self.stackedWidget_3.addWidget(self.error_widget_5)
self.stackedWidget.addWidget(self.Derivative_centent)
self.d_Integral_content = QtWidgets.QWidget()
self.d_Integral_content.setObjectName("d_Integral_content")
self.d_Integral_main_label = QtWidgets.QLabel(self.d_Integral_content)
self.d_Integral_main_label.setGeometry(QtCore.QRect(0, 0, 701, 91))
self.d_Integral_main_label.setFont(font)
self.d_Integral_main_label.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.d_Integral_main_label.setWordWrap(True)
self.d_Integral_main_label.setObjectName("d_Integral_main_label")
self.d_Integral_label_fx = QtWidgets.QLabel(self.d_Integral_content)
self.d_Integral_label_fx.setGeometry(QtCore.QRect(50, 280, 141, 31))
self.d_Integral_label_fx.setFont(font)
self.d_Integral_label_fx.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.d_Integral_label_fx.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.d_Integral_label_fx.setObjectName("d_Integral_label_fx")
self.d_Integral_label_fxpr_res = QtWidgets.QLabel(self.d_Integral_content)
self.d_Integral_label_fxpr_res.setGeometry(QtCore.QRect(160, 280, 151, 31))
self.d_Integral_label_fxpr_res.setFont(font)
self.d_Integral_label_fxpr_res.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.d_Integral_label_fxpr_res.setText("")
self.d_Integral_label_fxpr_res.setObjectName("d_Integral_label_fxpr_res")
self.d_Integral_sign = QtWidgets.QPushButton(self.d_Integral_content)
self.d_Integral_sign.setGeometry(QtCore.QRect(0, 260, 41, 71))
self.d_Integral_sign.setText("")
self.d_Integral_sign.setIcon(icon5)
self.d_Integral_sign.setIconSize(QtCore.QSize(64, 64))
self.d_Integral_sign.setObjectName("d_Integral_sign")
self.d_Integral_label_fx_2 = QtWidgets.QLabel(self.d_Integral_content)
self.d_Integral_label_fx_2.setGeometry(QtCore.QRect(30, 130, 91, 31))
self.d_Integral_label_fx_2.setFont(font)
self.d_Integral_label_fx_2.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.d_Integral_label_fx_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.d_Integral_label_fx_2.setObjectName("d_Integral_label_fx_2")
self.d_Integral_input_value = QtWidgets.QLineEdit(self.d_Integral_content)
self.d_Integral_input_value.setGeometry(QtCore.QRect(130, 130, 181, 31))
self.d_Integral_input_value.setFont(font)
self.d_Integral_input_value.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.d_Integral_input_value.setObjectName("d_Integral_input_value")
self.d_Integral_BTN_compute = QtWidgets.QPushButton(self.d_Integral_content)
self.d_Integral_BTN_compute.setGeometry(QtCore.QRect(100, 410, 131, 41))
self.d_Integral_BTN_compute.setFont(font)
self.d_Integral_BTN_compute.setStyleSheet("QPushButton {\n"
" border-radius: 16px;\n"
" border-style: outset;\n"
" color: white;\n"
" font-size: 22px;\n"
" border: 1px solid;\n"
" border-color: rgb(232, 232, 232);\n"
"\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
" );\n"
" color: black;\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
" border-style: inset;\n"
" background: qradialgradient(\n"
" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #ddd);\n"
" }")
self.d_Integral_BTN_compute.setObjectName("d_Integral_BTN_compute")
self.d_Integral_plot_range = QtWidgets.QLabel(self.d_Integral_content)
self.d_Integral_plot_range.setGeometry(QtCore.QRect(0, 185, 121, 61))
self.d_Integral_plot_range.setFont(font)
self.d_Integral_plot_range.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.d_Integral_plot_range.setFrameShadow(QtWidgets.QFrame.Raised)
self.d_Integral_plot_range.setTextFormat(QtCore.Qt.AutoText)
self.d_Integral_plot_range.setScaledContents(False)
self.d_Integral_plot_range.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.d_Integral_plot_range.setWordWrap(True)
self.d_Integral_plot_range.setObjectName("d_Integral_plot_range")
self.d_Integral_plot_range.setText('Integration area:')
self.d_Integral_range_x1 = QtWidgets.QLineEdit(self.d_Integral_content)
self.d_Integral_range_x1.setGeometry(QtCore.QRect(130, 180, 91, 31))
self.d_Integral_range_x1.setFont(font)
self.d_Integral_range_x1.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.d_Integral_range_x1.setObjectName("d_Integral_range_x1")
self.d_Integral_range_x2 = QtWidgets.QLineEdit(self.d_Integral_content)
self.d_Integral_range_x2.setGeometry(QtCore.QRect(230, 180, 91, 31))
self.d_Integral_range_x2.setFont(font)
self.d_Integral_range_x2.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.d_Integral_range_x2.setObjectName("d_Integral_range_x2")
self.d_Integral_range_y1 = QtWidgets.QLineEdit(self.d_Integral_content)
self.d_Integral_range_y1.setGeometry(QtCore.QRect(130, 220, 91, 31))
self.d_Integral_range_y1.setFont(font)
self.d_Integral_range_y1.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.d_Integral_range_y1.setObjectName("d_Integral_range_y1")
self.d_Integral_range_y2 = QtWidgets.QLineEdit(self.d_Integral_content)
self.d_Integral_range_y2.setGeometry(QtCore.QRect(230, 220, 91, 31))
self.d_Integral_range_y2.setFont(font)
self.d_Integral_range_y2.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.d_Integral_range_y2.setObjectName("d_Integral_range_y2")
self.d_Integral_label_P = QtWidgets.QLabel(self.d_Integral_content)
self.d_Integral_label_P.setGeometry(QtCore.QRect(40, 340, 81, 31))
self.d_Integral_label_P.setFont(font)
self.d_Integral_label_P.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.d_Integral_label_P.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.d_Integral_label_P.setObjectName("d_Integral_label_P")
self.d_Integral_label_P_res = QtWidgets.QLabel(self.d_Integral_content)
self.d_Integral_label_P_res.setGeometry(QtCore.QRect(130, 340, 181, 31))
self.d_Integral_label_P_res.setFont(font)
self.d_Integral_label_P_res.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.d_Integral_label_P_res.setText("")
self.d_Integral_label_P_res.setObjectName("d_Integral_label_P_res")
self.d_Integral_image_frame_preview = QtWidgets.QFrame(self.d_Integral_content)
self.d_Integral_image_frame_preview.setGeometry(QtCore.QRect(330, 110, 340, 340))
self.d_Integral_image_frame_preview.setStyleSheet("border: 1px solid;\n"
"border-color: rgb(90, 90, 90);")
self.d_Integral_image_frame_preview.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.d_Integral_image_frame_preview.setFrameShadow(QtWidgets.QFrame.Raised)
self.d_Integral_image_frame_preview.setObjectName("d_Integral_image_frame_preview")
self.d_Integral_image_label_preview_fig = QtWidgets.QLabel(self.d_Integral_image_frame_preview)
self.d_Integral_image_label_preview_fig.setGeometry(QtCore.QRect(0,0,340,340))
self.d_Integral_image_label_preview_fig.setText("")
self.d_Integral_image_label_preview_fig.setScaledContents(True)
self.d_Integral_image_label_preview_fig.setObjectName("d_Integral_image_label_preview_fig ")
self.d_Integral_image_label_preview = QtWidgets.QLabel(self.d_Integral_content)
self.d_Integral_image_label_preview.setGeometry(QtCore.QRect(410, 450, 271, 31))
self.d_Integral_image_label_preview.setText('Preview calculated figure')
self.d_Integral_image_label_preview.setFont(font)
self.d_Integral_image_label_preview.setStyleSheet("font-size: 18px")
self.d_Integral_image_label_preview.setObjectName('d_Integral_image_label_preview')
self.stackedWidget_5 = QtWidgets.QStackedWidget(self.d_Integral_content)
self.stackedWidget_5.setGeometry(QtCore.QRect(20, 470, 341, 61))
self.stackedWidget_5.setStyleSheet("color: rgb(253, 41, 41);\n"
"font-size: 16px;")
self.stackedWidget_5.setObjectName("stackedWidget_5")
self.error_widget_8 = QtWidgets.QWidget()
self.error_widget_8.setObjectName("error_widget_8")
self.horizontalLayout_20 = QtWidgets.QHBoxLayout(self.error_widget_8)
self.horizontalLayout_20.setObjectName("horizontalLayout_20")
self.error_label_8 = QtWidgets.QLabel(self.error_widget_8)
self.error_label_8.setMaximumSize(QtCore.QSize(500, 16777215))
self.error_label_8.setFont(font)
self.error_label_8.setWordWrap(True)
self.error_label_8.setObjectName("error_label_8")
self.horizontalLayout_20.addWidget(self.error_label_8)
self.stackedWidget_5.addWidget(self.error_widget_8)
self.error_widget_9 = QtWidgets.QWidget()
self.error_widget_9.setObjectName("error_widget_9")
self.horizontalLayout_21 = QtWidgets.QHBoxLayout(self.error_widget_9)
self.horizontalLayout_21.setObjectName("horizontalLayout_21")
self.error_label_9 = QtWidgets.QLabel(self.error_widget_9)
self.error_label_9.setFont(font)
self.error_label_9.setWordWrap(True)
self.error_label_9.setObjectName("error_label_9")
self.horizontalLayout_21.addWidget(self.error_label_9)
self.stackedWidget_5.addWidget(self.error_widget_9)
self.correct_widget_9 = QtWidgets.QWidget()
self.correct_widget_9.setObjectName("correct_widget_9")
self.horizontalLayout_21 = QtWidgets.QHBoxLayout(self.correct_widget_9)
self.horizontalLayout_21.setObjectName("horizontalLayout_21")
self.correct_label_9 = QtWidgets.QLabel(self.correct_widget_9)
self.correct_label_9.setStyleSheet('color:blue;')
self.correct_label_9.setFont(font)
self.correct_label_9.setWordWrap(True)
self.correct_label_9.setObjectName("correct_label_9")
self.horizontalLayout_21.addWidget(self.correct_label_9)
self.stackedWidget_5.addWidget(self.correct_widget_9)
self.stackedWidget.addWidget(self.d_Integral_content)
self.c_Integral_content = QtWidgets.QWidget()
self.c_Integral_content.setObjectName("c_Integral_content")
self.c_Integral_input_value_fx = QtWidgets.QLineEdit(self.c_Integral_content)
self.c_Integral_input_value_fx.setGeometry(QtCore.QRect(100, 111, 221, 31))
self.c_Integral_input_value_fx.setFont(font)
self.c_Integral_input_value_fx.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 16px;\n"
"padding-left:10px;\n"
"")
self.c_Integral_input_value_fx.setObjectName("c_Integral_input_value_fx")
self.c_Integral_BTN_compute = QtWidgets.QPushButton(self.c_Integral_content)
self.c_Integral_BTN_compute.setGeometry(QtCore.QRect(80, 410, 141, 41))
self.c_Integral_BTN_compute.setFont(font)
self.c_Integral_BTN_compute.setStyleSheet("QPushButton {\n"
" border-radius: 16px;\n"
" border-style: outset;\n"
" color: white;\n"
" font-size: 22px;\n"
" border: 1px solid;\n"
" border-color: rgb(232, 232, 232);\n"
"\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
" );\n"
" color: black;\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
" border-style: inset;\n"
" background: qradialgradient(\n"
" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #ddd);\n"
" }")
self.c_Integral_BTN_compute.setObjectName("c_Integral_BTN_compute")
self.c_Integral_main_label = QtWidgets.QLabel(self.c_Integral_content)
self.c_Integral_main_label.setGeometry(QtCore.QRect(0, 0, 701, 91))
self.c_Integral_main_label.setFont(font)
self.c_Integral_main_label.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.c_Integral_main_label.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.c_Integral_main_label.setWordWrap(True)
self.c_Integral_main_label.setObjectName("c_Integral_main_label")
self.c_Integral_label_fx = QtWidgets.QLabel(self.c_Integral_content)
self.c_Integral_label_fx.setGeometry(QtCore.QRect(0, 110, 91, 31))
self.c_Integral_label_fx.setFont(font)
self.c_Integral_label_fx.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.c_Integral_label_fx.setObjectName("c_Integral_label_fx")
self.c_Integral_label_EP = QtWidgets.QLabel(self.c_Integral_content)
self.c_Integral_label_EP.setGeometry(QtCore.QRect(0, 150, 101, 81))
self.c_Integral_label_EP.setFont(font)
self.c_Integral_label_EP.setWordWrap(True)
self.c_Integral_label_EP.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.c_Integral_label_EP.setObjectName("c_Integral_label_EP")
self.c_Integral_input_value_x1 = QtWidgets.QLineEdit(self.c_Integral_content)
self.c_Integral_input_value_x1.setGeometry(QtCore.QRect(110, 160, 101, 31))
self.c_Integral_input_value_x1.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 16px;\n"
"padding-left:10px;\n"
"")
self.c_Integral_input_value_x1.setObjectName("c_Integral_input_value_x1")
self.c_Integral_input_value_x2 = QtWidgets.QLineEdit(self.c_Integral_content)
self.c_Integral_input_value_x2.setGeometry(QtCore.QRect(220, 160, 101, 31))
self.c_Integral_input_value_x2.setFont(font)
self.c_Integral_input_value_x2.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 16px;\n"
"padding-left:10px;\n"
"")
self.c_Integral_input_value_x2.setObjectName("c_Integral_input_value_x2")
self.c_Integral_input_value_y1 = QtWidgets.QLineEdit(self.c_Integral_content)
self.c_Integral_input_value_y1.setGeometry(QtCore.QRect(110, 200, 101, 31))
self.c_Integral_input_value_y1.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 16px;\n"
"padding-left:10px;\n"
"")
self.c_Integral_input_value_y1.setObjectName("c_Integral_input_value_y1")
self.c_Integral_input_value_y2 = QtWidgets.QLineEdit(self.c_Integral_content)
self.c_Integral_input_value_y2.setGeometry(QtCore.QRect(220, 200, 101, 31))
self.c_Integral_input_value_y2.setFont(font)
self.c_Integral_input_value_y2.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 16px;\n"
"padding-left:10px;\n"
"")
self.c_Integral_input_value_y2.setObjectName("c_Integral_input_value_y2")
self.c_Integral_input_value_z1 = QtWidgets.QLineEdit(self.c_Integral_content)
self.c_Integral_input_value_z1.setGeometry(QtCore.QRect(110, 240, 101, 31))
self.c_Integral_input_value_z1.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 16px;\n"
"padding-left:10px;\n"
"")
self.c_Integral_input_value_z1.setObjectName("c_Integral_input_value_z1")
self.c_Integral_input_value_z2 = QtWidgets.QLineEdit(self.c_Integral_content)
self.c_Integral_input_value_z2.setGeometry(QtCore.QRect(220, 240, 101, 31))
self.c_Integral_input_value_z2.setFont(font)
self.c_Integral_input_value_z2.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 16px;\n"
"padding-left:10px;\n"
"")
self.c_Integral_input_value_z2.setObjectName("c_Integral_input_value_z2")
self.c_integral_sign = QtWidgets.QPushButton(self.c_Integral_content)
self.c_integral_sign.setGeometry(QtCore.QRect(0, 280, 41, 71))
self.c_integral_sign.setText("")
self.c_integral_sign.setIcon(icon6)
self.c_integral_sign.setIconSize(QtCore.QSize(56, 56))
self.c_integral_sign.setObjectName("c_integral_sign")
self.c_Integral_label_func = QtWidgets.QLabel(self.c_Integral_content)
self.c_Integral_label_func.setGeometry(QtCore.QRect(40, 295, 131, 31))
self.c_Integral_label_func.setFont(font)
self.c_Integral_label_func.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.c_Integral_label_func.setObjectName("c_Integral_label_func")
self.c_Integral_label_volume = QtWidgets.QLabel(self.c_Integral_content)
self.c_Integral_label_volume.setGeometry(QtCore.QRect(70, 350, 101, 31))
self.c_Integral_label_volume.setFont(font)
self.c_Integral_label_volume.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.c_Integral_label_volume.setObjectName("c_Integral_label_volume")
self.c_Integral_label_symbolic_res = QtWidgets.QLabel(self.c_Integral_content)
self.c_Integral_label_symbolic_res.setGeometry(QtCore.QRect(180, 296, 141, 31))
self.c_Integral_label_symbolic_res.setFont(font)
self.c_Integral_label_symbolic_res.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 16px;\n"
"padding-left:10px;\n"
"")
self.c_Integral_label_symbolic_res.setText("")
self.c_Integral_label_symbolic_res.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.c_Integral_label_symbolic_res.setObjectName("c_Integral_label_symbolic_res")
self.c_Integral_label_volume_res = QtWidgets.QLabel(self.c_Integral_content)
self.c_Integral_label_volume_res.setGeometry(QtCore.QRect(180, 351, 141, 31))
self.c_Integral_label_volume_res.setFont(font)
self.c_Integral_label_volume_res.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 16px;\n"
"padding-left:10px;\n"
"")
self.c_Integral_label_volume_res.setText("")
self.c_Integral_label_volume_res.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.c_Integral_label_volume_res.setObjectName("c_Integral_label_volume_res")
#
self.c_Integral_image_frame_preview = QtWidgets.QFrame(self.c_Integral_content)
self.c_Integral_image_frame_preview.setGeometry(QtCore.QRect(330, 110, 340, 340))
self.c_Integral_image_frame_preview.setStyleSheet("border: 1px solid;\n"
"border-color: rgb(90, 90, 90);")
self.c_Integral_image_frame_preview.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.c_Integral_image_frame_preview.setFrameShadow(QtWidgets.QFrame.Raised)
self.c_Integral_image_frame_preview.setObjectName("c_Integral_image_frame_preview")
self.c_Integral_image_label_preview_fig = QtWidgets.QLabel(self.c_Integral_image_frame_preview)
self.c_Integral_image_label_preview_fig.setGeometry(QtCore.QRect(0,0,340,340))
self.c_Integral_image_label_preview_fig.setText("")
self.c_Integral_image_label_preview_fig.setScaledContents(True)
self.c_Integral_image_label_preview_fig.setObjectName("c_Integral_image_label_preview_fig ")
self.c_Integral_image_label_preview = QtWidgets.QLabel(self.c_Integral_content)
self.c_Integral_image_label_preview.setGeometry(QtCore.QRect(410, 450, 271, 31))
self.c_Integral_image_label_preview.setText('Preview calculated figure')
self.c_Integral_image_label_preview.setFont(font)
self.c_Integral_image_label_preview.setStyleSheet("font-size: 18px")
self.c_Integral_image_label_preview.setObjectName('c_Integral_image_label_preview')
#
self.stackedWidget_6 = QtWidgets.QStackedWidget(self.c_Integral_content)
self.stackedWidget_6.setGeometry(QtCore.QRect(20, 470, 341, 61))
self.stackedWidget_6.setStyleSheet("color: rgb(253, 41, 41);\n"
"font-size: 16px;")
self.stackedWidget_6.setObjectName("stackedWidget_6")
self.error_widget_10 = QtWidgets.QWidget()
self.error_widget_10.setObjectName("error_widget_10")
self.horizontalLayout_22 = QtWidgets.QHBoxLayout(self.error_widget_10)
self.horizontalLayout_22.setObjectName("horizontalLayout_22")
self.error_label_10 = QtWidgets.QLabel(self.error_widget_10)
self.error_label_10.setMaximumSize(QtCore.QSize(500, 16777215))
self.error_label_10.setFont(font)
self.error_label_10.setWordWrap(True)
self.error_label_10.setObjectName("error_label_10")
self.horizontalLayout_22.addWidget(self.error_label_10)
self.stackedWidget_6.addWidget(self.error_widget_10)
self.error_widget_11 = QtWidgets.QWidget()
self.error_widget_11.setObjectName("error_widget_11")
self.horizontalLayout_23 = QtWidgets.QHBoxLayout(self.error_widget_11)
self.horizontalLayout_23.setObjectName("horizontalLayout_23")
self.error_label_11 = QtWidgets.QLabel(self.error_widget_11)
self.error_label_11.setFont(font)
self.error_label_11.setWordWrap(True)
self.error_label_11.setObjectName("error_label_11")
self.horizontalLayout_23.addWidget(self.error_label_11)
self.stackedWidget_6.addWidget(self.error_widget_11)
self.stackedWidget.addWidget(self.c_Integral_content)
self.delta_content = QtWidgets.QWidget()
self.delta_content.setObjectName("delta_content")
self.Delta_input_value_A = QtWidgets.QLineEdit(self.delta_content)
self.Delta_input_value_A.setGeometry(QtCore.QRect(90, 260, 51, 31))
self.Delta_input_value_A.setFont(font)
self.Delta_input_value_A.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.Delta_input_value_A.setObjectName("Delta_input_value_A")
self.Delta_input_value_B = QtWidgets.QLineEdit(self.delta_content)
self.Delta_input_value_B.setGeometry(QtCore.QRect(150, 260, 51, 31))
self.Delta_input_value_B.setFont(font)
self.Delta_input_value_B.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.Delta_input_value_B.setObjectName("Delta_input_value_B")
self.Delta_input_value_C = QtWidgets.QLineEdit(self.delta_content)
self.Delta_input_value_C.setGeometry(QtCore.QRect(210, 260, 51, 31))
self.Delta_input_value_C.setFont(font)
self.Delta_input_value_C.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.Delta_input_value_C.setObjectName("Delta_input_value_B")
self.Delta_BTN_compute_2 = QtWidgets.QPushButton(self.delta_content)
self.Delta_BTN_compute_2.setGeometry(QtCore.QRect(80, 360, 141, 41))
self.Delta_BTN_compute_2.setFont(font)
self.Delta_BTN_compute_2.setStyleSheet("QPushButton {\n"
" border-radius: 16px;\n"
" border-style: outset;\n"
" color: white;\n"
" font-size: 22px;\n"
" border: 1px solid;\n"
" border-color: rgb(232, 232, 232);\n"
"\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background: qradialgradient(\n"
" cx: 0.3, cy: -0.4, fx: 0.3, fy: -0.4,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #bbb\n"
" );\n"
" color: black;\n"
" }\n"
"\n"
"QPushButton:pressed {\n"
" border-style: inset;\n"
" background: qradialgradient(\n"
" cx: 0.4, cy: -0.1, fx: 0.4, fy: -0.1,\n"
" radius: 1.35, stop: 0 #fff, stop: 1 #ddd);\n"
" }")
self.Delta_BTN_compute_2.setObjectName("Delta_BTN_compute_2")
self.Delta_main_label_2 = QtWidgets.QLabel(self.delta_content)
self.Delta_main_label_2.setGeometry(QtCore.QRect(0, 0, 701, 71))
self.Delta_main_label_2.setFont(font)
self.Delta_main_label_2.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Delta_main_label_2.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.Delta_main_label_2.setWordWrap(True)
self.Delta_main_label_2.setObjectName("Delta_main_label_2")
self.Delta_label_fx_2 = QtWidgets.QLabel(self.delta_content)
self.Delta_label_fx_2.setGeometry(QtCore.QRect(70, 215, 141, 31))
self.Delta_label_fx_2.setFont(font)
self.Delta_label_fx_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.Delta_label_fx_2.setObjectName("Delta_label_fx_2")
self.Delta_label_range = QtWidgets.QLabel(self.delta_content)
self.Delta_label_range.setGeometry(QtCore.QRect(0, 260, 81, 31))
self.Delta_label_range.setFont(font)
self.Delta_label_range.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.Delta_label_range.setObjectName("Delta_label_range")
self.Delta_label_result_x2 = QtWidgets.QLabel(self.delta_content)
self.Delta_label_result_x2.setGeometry(QtCore.QRect(40, 310, 81, 31))
self.Delta_label_result_x2.setFont(font)
self.Delta_label_result_x2.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.Delta_label_result_x2.setText("")
self.Delta_label_result_x2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.Delta_label_result_x2.setObjectName("Delta_label_result_x2")
self.Delta_result_x1 = QtWidgets.QLabel(self.delta_content)
self.Delta_result_x1.setGeometry(QtCore.QRect(0, 310, 31, 31))
self.Delta_result_x1.setFont(font)
self.Delta_result_x1.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.Delta_result_x1.setObjectName("Delta_result_x1")
self.Delta_main_label_3 = QtWidgets.QLabel(self.delta_content)
self.Delta_main_label_3.setGeometry(QtCore.QRect(0, 80, 701, 91))
self.Delta_main_label_3.setFont(font)
self.Delta_main_label_3.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Delta_main_label_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.Delta_main_label_3.setWordWrap(True)
self.Delta_main_label_3.setObjectName("Delta_main_label_3")
self.Delta_label_result_x1 = QtWidgets.QLabel(self.delta_content)
self.Delta_label_result_x1.setGeometry(QtCore.QRect(170, 310, 81, 31))
self.Delta_label_result_x1.setFont(font)
self.Delta_label_result_x1.setStyleSheet("border: 1px solid;\n"
"border-color: white;\n"
"border-radius: 15px;\n"
"\n"
"color: rgb(235, 235, 235);\n"
"font-size: 18px;\n"
"padding-left:10px;\n"
"")
self.Delta_label_result_x1.setText("")
self.Delta_label_result_x1.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.Delta_label_result_x1.setObjectName("Delta_label_result_x1")
self.Delta_result_x1_2 = QtWidgets.QLabel(self.delta_content)
self.Delta_result_x1_2.setGeometry(QtCore.QRect(130, 310, 31, 31))
self.Delta_result_x1_2.setFont(font)
self.Delta_result_x1_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.Delta_result_x1_2.setObjectName("Delta_result_x1_2")
self.Delta_image_frame_preview = QtWidgets.QFrame(self.delta_content)
self.Delta_image_frame_preview.setGeometry(QtCore.QRect(330, 170, 340, 340))
self.Delta_image_frame_preview.setStyleSheet("border: 1px solid;\n"
"border-color: rgb(90, 90, 90);")
self.Delta_image_frame_preview.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.Delta_image_frame_preview.setFrameShadow(QtWidgets.QFrame.Raised)
self.Delta_image_frame_preview.setObjectName("Delta_image_frame_preview")
self.Delta_image_label_preview_fig = QtWidgets.QLabel(self.Delta_image_frame_preview)
self.Delta_image_label_preview_fig.setGeometry(QtCore.QRect(0,0,340,340))
self.Delta_image_label_preview_fig.setText("")
self.Delta_image_label_preview_fig.setScaledContents(True)
self.Delta_image_label_preview_fig.setObjectName("Delta_image_label_preview_fig ")
self.Delta_image_label_preview = QtWidgets.QLabel(self.delta_content)
self.Delta_image_label_preview.setGeometry(QtCore.QRect(410, 510, 271, 31))
self.Delta_image_label_preview.setText('Preview calculated figure')
self.Delta_image_label_preview.setFont(font)
self.Delta_image_label_preview.setStyleSheet("font-size: 18px")
self.Delta_image_label_preview.setObjectName('c_Integral_image_label_preview')
self.stackedWidget_7 = QtWidgets.QStackedWidget(self.delta_content)
self.stackedWidget_7.setGeometry(QtCore.QRect(0, 410, 291, 81))
self.stackedWidget_7.setStyleSheet("color: rgb(253, 41, 41);\n"
"font-size: 16px;")
self.stackedWidget_7.setObjectName("stackedWidget_7")
self.error_widget_12 = QtWidgets.QWidget()
self.error_widget_12.setObjectName("error_widget_12")
self.horizontalLayout_24 = QtWidgets.QHBoxLayout(self.error_widget_12)
self.horizontalLayout_24.setObjectName("horizontalLayout_24")
self.error_label_12 = QtWidgets.QLabel(self.error_widget_12)
self.error_label_12.setMaximumSize(QtCore.QSize(500, 16777215))
self.error_label_12.setFont(font)
self.error_label_12.setWordWrap(True)
self.error_label_12.setObjectName("error_label_12")
self.horizontalLayout_24.addWidget(self.error_label_12)
self.stackedWidget_7.addWidget(self.error_widget_12)
self.error_widget_13 = QtWidgets.QWidget()
self.error_widget_13.setObjectName("error_widget_13")
self.horizontalLayout_25 = QtWidgets.QHBoxLayout(self.error_widget_13)
self.horizontalLayout_25.setObjectName("horizontalLayout_25")
self.error_label_13 = QtWidgets.QLabel(self.error_widget_13)
self.error_label_13.setFont(font)
self.error_label_13.setWordWrap(True)
self.error_label_13.setObjectName("error_label_13")
self.horizontalLayout_25.addWidget(self.error_label_13)
self.stackedWidget_7.addWidget(self.error_widget_13)
self.correct_widget_14 = QtWidgets.QWidget()
self.correct_widget_14.setObjectName("correct_widget_14")
self.horizontalLayout_25 = QtWidgets.QHBoxLayout(self.correct_widget_14)
self.horizontalLayout_25.setObjectName("horizontalLayout_25")
self.correct_label_14 = QtWidgets.QLabel(self.correct_widget_14)
self.correct_label_14.setFont(font)
self.correct_label_14.setWordWrap(True)
self.correct_label_14.setObjectName("correct_label_14")
self.correct_label_14.setStyleSheet('color:blue;')
self.horizontalLayout_25.addWidget(self.correct_label_14)
self.stackedWidget_7.addWidget(self.correct_widget_14)
self.stackedWidget.addWidget(self.delta_content)
self.horizontalLayout_11.addWidget(self.stackedWidget)
self.verticalLayout_6.addWidget(self.Bottom_right_content_in)
self.Bottom_right_copyright = QtWidgets.QFrame(self.Bottom_right_content_out)
self.Bottom_right_copyright.setMaximumSize(QtCore.QSize(16777215, 30))
self.Bottom_right_copyright.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.Bottom_right_copyright.setFrameShadow(QtWidgets.QFrame.Raised)
self.Bottom_right_copyright.setObjectName("Bottom_right_copyright")
self.horizontalLayout_12 = QtWidgets.QHBoxLayout(self.Bottom_right_copyright)
self.horizontalLayout_12.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_12.setSpacing(0)
self.horizontalLayout_12.setObjectName("horizontalLayout_12")
self.Copyright_label = QtWidgets.QLabel(self.Bottom_right_copyright)
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.Copyright_label.setFont(font)
self.Copyright_label.setStyleSheet("color: rgb(235, 235, 235)")
self.Copyright_label.setObjectName("Copyright_label")
self.horizontalLayout_12.addWidget(self.Copyright_label)
self.verticalLayout_6.addWidget(self.Bottom_right_copyright, 0, QtCore.Qt.AlignRight)
self.horizontalLayout_4.addWidget(self.Bottom_right_content_out)
self.verticalLayout.addWidget(self.Bottom_bar)
self.Plot_preview_label = QtWidgets.QLabel(self.Plot_frame)
self.Plot_preview_label.setGeometry(QtCore.QRect(0,0,350,350))
self.Plot_preview_label.setText("")
self.Plot_preview_label.setScaledContents(True)
self.Plot_preview_label.setObjectName("Plot_preview_label")
self.Derivative_preview_label_dxdy = QtWidgets.QLabel(self.Derivative_frame_dxdy)
self.Derivative_preview_label_dxdy.setGeometry(QtCore.QRect(0,0,340,340))
self.Derivative_preview_label_dxdy.setText("")
self.Derivative_preview_label_dxdy.setScaledContents(True)
self.Derivative_preview_label_dxdy.setObjectName("Derivative_preview_label_dxdy")
self.Plot_warrning_note = QtWidgets.QLabel(self.Plot_content)
self.Plot_warrning_note.setGeometry(QtCore.QRect(0, 570, 701, 61))
self.Plot_warrning_note.setText('Note: Error may occour if you type uncountable function like: x/0 or log(x=0) etc.')
self.Plot_warrning_note.setFont(font)
self.Plot_warrning_note.setStyleSheet("font-size: 18px")
self.Plot_warrning_note.setObjectName('Plot_warrning_note')
self.Plot_warrning_note.setFont(font)
self.Plot_warrning_note.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Plot_warrning_note.setFrameShadow(QtWidgets.QFrame.Raised)
self.Plot_warrning_note.setTextFormat(QtCore.Qt.AutoText)
self.Plot_warrning_note.setScaledContents(False)
self.Plot_warrning_note.setWordWrap(True)
self.Derivative_warrning_note = QtWidgets.QLabel(self.Derivative_centent)
self.Derivative_warrning_note.setGeometry(QtCore.QRect(0, 570, 701, 61))
self.Derivative_warrning_note.setText('Note: Error may occour if you type uncountable function like: x/0 or log(x=0) etc.')
self.Derivative_warrning_note.setFont(font)
self.Derivative_warrning_note.setStyleSheet("font-size: 18px")
self.Derivative_warrning_note.setObjectName('Derivative_warrning_note')
self.Derivative_warrning_note.setFont(font)
self.Derivative_warrning_note.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Derivative_warrning_note.setFrameShadow(QtWidgets.QFrame.Raised)
self.Derivative_warrning_note.setTextFormat(QtCore.Qt.AutoText)
self.Derivative_warrning_note.setScaledContents(False)
self.Derivative_warrning_note.setWordWrap(True)
self.Integral_warrning_note = QtWidgets.QLabel(self.Integral_content)
self.Integral_warrning_note.setGeometry(QtCore.QRect(0, 570, 701, 61))
self.Integral_warrning_note.setText('Note: Error may occour if you type uncountable function like: x/0 or log(x=0) etc.')
self.Integral_warrning_note.setFont(font)
self.Integral_warrning_note.setStyleSheet("font-size: 18px")
self.Integral_warrning_note.setObjectName('Integral_warrning_note')
self.Integral_warrning_note.setFont(font)
self.Integral_warrning_note.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.Integral_warrning_note.setFrameShadow(QtWidgets.QFrame.Raised)
self.Integral_warrning_note.setTextFormat(QtCore.Qt.AutoText)
self.Integral_warrning_note.setScaledContents(False)
self.Integral_warrning_note.setWordWrap(True)
self.d_Integral_warrning_note = QtWidgets.QLabel(self.d_Integral_content)
self.d_Integral_warrning_note.setGeometry(QtCore.QRect(0, 570, 701, 61))
self.d_Integral_warrning_note.setText('Note: Error may occour if you type uncountable function like: x/0 or log(x=0) etc.')
self.d_Integral_warrning_note.setFont(font)
self.d_Integral_warrning_note.setStyleSheet("font-size: 18px")
self.d_Integral_warrning_note.setObjectName('d_Integral_warrning_note')
self.d_Integral_warrning_note.setFont(font)
self.d_Integral_warrning_note.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.d_Integral_warrning_note.setFrameShadow(QtWidgets.QFrame.Raised)
self.d_Integral_warrning_note.setTextFormat(QtCore.Qt.AutoText)
self.d_Integral_warrning_note.setScaledContents(False)
self.d_Integral_warrning_note.setWordWrap(True)
self.c_Integral_warrning_note = QtWidgets.QLabel(self.c_Integral_content)
self.c_Integral_warrning_note.setGeometry(QtCore.QRect(0, 570, 701, 61))
self.c_Integral_warrning_note.setText('Note: Error may occour if you type uncountable function like: x/0 or log(x=0) etc.')
self.c_Integral_warrning_note.setFont(font)
self.c_Integral_warrning_note.setStyleSheet("font-size: 18px")
self.c_Integral_warrning_note.setObjectName('c_Integral_warrning_note')
self.c_Integral_warrning_note.setFont(font)
self.c_Integral_warrning_note.setStyleSheet("color: rgb(235, 235, 235);\n"
"font-size: 18px;")
self.c_Integral_warrning_note.setFrameShadow(QtWidgets.QFrame.Raised)
self.c_Integral_warrning_note.setTextFormat(QtCore.Qt.AutoText)
self.c_Integral_warrning_note.setScaledContents(False)
self.c_Integral_warrning_note.setWordWrap(True)
MainWindow.setCentralWidget(self.Main)
self.retranslateUi(MainWindow)
self.stackedWidget_2.setCurrentIndex(0)
self.stackedWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.retranslateUi(MainWindow)
self.set_page(MainWindow)
self.set_toggle_flag()
self.set_figure_flags()
self.plot_expressions()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.Home_title_label.setText(_translate("MainWindow", "Home"))
self.Derivative_label.setText(_translate("MainWindow", "Derivative"))
self.Integral_label.setText(_translate("MainWindow", "Integral"))
self.d_integral_title_label.setText(_translate("MainWindow", "Double Integral"))
self.c_integral_title_label.setText(_translate("MainWindow", "Triple Integral"))
self.Plot_title_label.setText(_translate("MainWindow", "Plots"))
self.delta_title_label.setText(_translate("MainWindow", "Quadratic Equation"))
self.Home_label_2.setText(_translate("MainWindow", "This is demonstrational version of program. Software is created with persons in mind who study mathematics in high school and people who work on scientific stuff. The basic function of software is calculating advanced mathematic functions like integrals, derivatives etc., also software contains basic mathematic features like creating plots established by functions."))
self.Home_label_1.setText(_translate("MainWindow", "About Scientific Calculator"))
self.Home_label_3.setText(_translate("MainWindow", "Scientific Calculator\'s features:"))
self.Home_label_4.setText(_translate("MainWindow", "- Creating plots"))
self.Home_label_5.setText(_translate("MainWindow", "- Calculating derivative"))
self.Home_label_6.setText(_translate("MainWindow", "- Calculating integrals"))
self.Home_label_7.setText(_translate("MainWindow", "- Calculating double integrals"))
self.Home_label_8.setText(_translate("MainWindow", "- Calculating triple integrals"))
self.Home_label_9.setText(_translate("MainWindow", "- Calculating square equation"))
self.Home_label_10.setText(_translate("MainWindow", "About Author"))
self.Home_label_11.setText(_translate("MainWindow", "The author of this software is <NAME>. Program has been created only just for portfolio needs."))
self.Integral_main_label.setText(_translate("MainWindow", "In mathematics, an integral assigns numbers to functions in a way that can describe displacement, area, volume, and other concepts that arise by combining infinitesimal data. Integration is one of the two main operations of calculus; its inverse operation, differentiation, is the other. Given a function f of a real variable x and an interval [a, b] of the real line, the definite integral of f from a to b can be interpreted informally as the signed area of the region in the xy-plane that is bounded by the graph of f, the x-axis and the vertical lines x = a and x = b. Source: Wikipedia"))
self.Integral_label_fx.setText(_translate("MainWindow", "f(x) ="))
self.Integral_input_value.setPlaceholderText(_translate("MainWindow", "x**2"))
self.Integral_label_fx_2.setText(_translate("MainWindow", "f(x) dx ="))
self.Integral_label_P.setText(_translate("MainWindow", "P(x) ="))
self.d_Integral_label_P.setText(_translate("MainWindow", "P(x,y) ="))
self.Integral_BTN_compute.setText(_translate("MainWindow", "Compute"))
self.error_label_6.setText(_translate("MainWindow", "Error: you left blank bracket somewhere, make sure that you enter correct values"))
self.error_label_7.setText(_translate("MainWindow", "Error: wrong data type in bracket, make sure that you enter correct values"))
self.correct_label_7.setText(_translate("MainWindow", "The function has been calculated"))
self.Plot_label_1.setText(_translate("MainWindow", "Insert input values to brackets and click generate to get plot with your datas. Only you can use integer or float type of data "))
self.Plot_fn_edit.setPlaceholderText(_translate("MainWindow", "ax+b"))
self.Plot_fn_sign_label.setText(_translate("MainWindow", "f(x) = "))
self.Plot_range_sign.setText(_translate("MainWindow", "Range:"))
self.Plot_range_x1.setPlaceholderText(_translate("MainWindow", "x1"))
self.Plot_color_sign.setText(_translate("MainWindow", "Color:"))
self.Plot_range_x2.setPlaceholderText(_translate("MainWindow", "x2"))
self.Plot_combo_color.setItemText(0, _translate("MainWindow", "Red"))
self.Plot_combo_color.setItemText(1, _translate("MainWindow", "Blue"))
self.Plot_combo_color.setItemText(2, _translate("MainWindow", "Purple"))
self.Plot_combo_color.setItemText(3, _translate("MainWindow", "Yellow"))
self.Plot_combo_color.setItemText(4, _translate("MainWindow", "Teal"))
self.Plot_combo_color.setItemText(5, _translate("MainWindow", "Green"))
self.Plot_BTN_generate.setText(_translate("MainWindow", "Compute"))
self.Plot_label_2.setText(_translate("MainWindow", "Preview calculated figure"))
self.error_label_1.setText(_translate("MainWindow", "Error: you left blank bracket somewhere, make sure that you enter correct values"))
self.error_label_2.setText(_translate("MainWindow", "Error: wrong data type in bracket, make sure that you enter correct values"))
self.Plot_figure_saved_label.setText(_translate("MainWindow", "The figure has been saved"))
self.Derivative_main_label.setText(_translate("MainWindow", "The derivative of a function of a real variable measures the sensitivity to change of the function value (output value) with respect to a change in its argument (input value). Derivatives are a fundamental tool of calculus. For example, the derivative of the position of a moving object with respect to time is the object\'s velocity: this measures how quickly the position of the object changes when time advances. Source: wikipedia"))
self.Derivative_label_fx.setText(_translate("MainWindow", "y ="))
self.Derivative_input_value.setPlaceholderText(_translate("MainWindow", "x**2-3*x"))
self.Derivative_label_fxpr.setText(_translate("MainWindow", "="))
self.Derivative_BTN_compute.setText(_translate("MainWindow", "Compute"))
self.Derivative_range_x1.setPlaceholderText(_translate("MainWindow", "x1"))
self.Derivative_range_x2.setPlaceholderText(_translate("MainWindow", "x2"))
self.Integral_range_x1.setPlaceholderText(_translate("MainWindow", "x1"))
self.Integral_range_x2.setPlaceholderText(_translate("MainWindow", "x2"))
self.d_Integral_range_x1.setPlaceholderText(_translate("MainWindow", "x1"))
self.d_Integral_range_x2.setPlaceholderText(_translate("MainWindow", "x2"))
self.d_Integral_range_y1.setPlaceholderText(_translate("MainWindow", "y1"))
self.d_Integral_range_y2.setPlaceholderText(_translate("MainWindow", "y2"))
self.correct_label_4.setText(_translate("MainWindow", "The function has been calculated"))
self.error_label_4.setText(_translate("MainWindow", "Error: you left blank bracket somewhere, make sure that you enter correct values"))
self.error_label_5.setText(_translate("MainWindow", "Error: wrong data type in bracket, make sure that you enter correct values"))
self.d_Integral_main_label.setText(_translate("MainWindow", "The multiple integral is a definite integral of a function of more than one real variable, for instance, f or f. Integrals of a function of two variables over a region in R² are called double integrals, and integrals of a function of three variables over a region of R³ are called triple integrals. Source: Wikipedia"))
self.d_Integral_label_fx.setText(_translate("MainWindow", "f(x,y)dxdy ="))
self.d_Integral_label_fx_2.setText(_translate("MainWindow", "f(x,y) ="))
self.d_Integral_input_value.setPlaceholderText(_translate("MainWindow", "x*y"))
self.d_Integral_BTN_compute.setText(_translate("MainWindow", "Compute"))
self.error_label_8.setText(_translate("MainWindow", "Error: you left blank bracket somewhere, make sure that you enter correct values"))
self.error_label_9.setText(_translate("MainWindow", "Error: wrong data type in bracket, make sure that you enter correct values"))
self.correct_label_9.setText(_translate("MainWindow", "The function has been calculated"))
self.c_Integral_input_value_fx.setPlaceholderText(_translate("MainWindow", "x**2*y*z"))
self.c_Integral_BTN_compute.setText(_translate("MainWindow", "Compute"))
self.c_Integral_main_label.setText(_translate("MainWindow", "In mathematics, a multiple integral is a definite integral of a function of several real variables, for instance, f or f. Integrals of a function of two variables over a region in are called double integrals, and integrals of a function of three variables over a region in are called triple integrals. Source: Wikipedia"))
self.c_Integral_label_fx.setText(_translate("MainWindow", "f(x,y,z) ="))
self.c_Integral_label_EP.setText(_translate("MainWindow", "Integration area:"))
self.c_Integral_input_value_x1.setPlaceholderText(_translate("MainWindow", "x1"))
self.c_Integral_input_value_x2.setPlaceholderText(_translate("MainWindow", "x2"))
self.c_Integral_input_value_y1.setPlaceholderText(_translate("MainWindow", "y1"))
self.c_Integral_input_value_y2.setPlaceholderText(_translate("MainWindow", "y2"))
self.c_Integral_input_value_z1.setPlaceholderText(_translate("MainWindow", "z1"))
self.c_Integral_input_value_z2.setPlaceholderText(_translate("MainWindow", "z2"))
self.c_Integral_label_func.setText(_translate("MainWindow", "f(x,y,x)dxdydz ="))
self.c_Integral_label_volume.setText(_translate("MainWindow", "V(x,y,z) ="))
self.error_label_10.setText(_translate("MainWindow", "Error: you left blank bracket somewhere, make sure that you enter correct values"))
self.error_label_11.setText(_translate("MainWindow", "Error: wrong data type in bracket, make sure that you enter correct values"))
self.Delta_input_value_A.setPlaceholderText(_translate("MainWindow", "A"))
self.Delta_input_value_B.setPlaceholderText(_translate("MainWindow", "B"))
self.Delta_input_value_C.setPlaceholderText(_translate("MainWindow", "C"))
self.Delta_BTN_compute_2.setText(_translate("MainWindow", "Compute"))
self.Delta_main_label_2.setText(_translate("MainWindow", "The quadratic equation only contains powers of x that are non-negative integers, and therefore it is a polynomial equation. In particular, it is a second-degree polynomial equation, since the greatest power is two."))
self.Delta_label_fx_2.setText(_translate("MainWindow", "f(x) = Ax²+Bx+C"))
self.Delta_label_range.setText(_translate("MainWindow", "Variables:"))
self.Delta_result_x1.setText(_translate("MainWindow", "x1"))
self.Delta_main_label_3.setText(_translate("MainWindow", "In algebra, a quadratic equation is any equation that can be rearranged in standard form as where x represents an unknown, and a, b, and c represent known numbers, where a ≠ 0. If a = 0, then the equation is linear, not quadratic, as there is no term. Source: Wikipedia"))
self.Delta_result_x1_2.setText(_translate("MainWindow", "x2"))
self.error_label_12.setText(_translate("MainWindow", "Error: you left blank bracket somewhere, make sure that you enter correct values"))
self.error_label_13.setText(_translate("MainWindow", "Error: wrong data type in bracket, make sure that you enter correct values"))
self.correct_label_14.setText(_translate("MainWindow", "The function has been calculated"))
self.Copyright_label.setText(_translate("MainWindow", "© 2020 <NAME>. All Rights Reserved. version 1.0"))
def set_page(self, MainWindow):
#Pages
self.Home_btn.clicked.connect(lambda: self.stackedWidget.setCurrentWidget(self.Home_content))
self.Home_btn.clicked.connect(lambda: self.stackedWidget_2.setCurrentWidget(self.Home_title))
self.Plot_btn.clicked.connect(lambda: self.stackedWidget.setCurrentWidget(self.Plot_content))
self.Plot_btn.clicked.connect(lambda: self.stackedWidget_2.setCurrentWidget(self.Plot_title))
self.Derviate_btn.clicked.connect(lambda: self.stackedWidget.setCurrentWidget(self.Derivative_centent))
self.Derviate_btn.clicked.connect(lambda: self.stackedWidget_2.setCurrentWidget(self.Derivative_title))
self.Integral_1st_btn.clicked.connect(lambda: self.stackedWidget.setCurrentWidget(self.Integral_content))
self.Integral_1st_btn.clicked.connect(lambda: self.stackedWidget_2.setCurrentWidget(self.Integral))
self.Integral_2x_btn.clicked.connect(lambda: self.stackedWidget.setCurrentWidget(self.d_Integral_content))
self.Integral_2x_btn.clicked.connect(lambda: self.stackedWidget_2.setCurrentWidget(self.d_Integral_title))
self.Integral_curved_btn.clicked.connect(lambda: self.stackedWidget.setCurrentWidget(self.c_Integral_content))
self.Integral_curved_btn.clicked.connect(lambda: self.stackedWidget_2.setCurrentWidget(self.c_Integral_title))
self.Delta_plot_btn.clicked.connect(lambda: self.stackedWidget.setCurrentWidget(self.delta_content))
self.Delta_plot_btn.clicked.connect(lambda: self.stackedWidget_2.setCurrentWidget(self.delta_title))
#Toggle Menu
self.Menu_button.clicked.connect(lambda: self.toggle_menu(0))
#Errors dispaly
self.Plot_error_info.setVisible(False)
self.stackedWidget_4.setVisible(False)
self.stackedWidget_3.setVisible(False)
self.stackedWidget_6.setVisible(False)
self.stackedWidget_7.setVisible(False)
self.stackedWidget_5.setVisible(False)
def set_figure_flags(self):
global delta_close_figure_flag
delta_close_figure_flag = True
def set_toggle_flag(self):
global flagIt
flagIt = True
def toggle_menu(self, Value):
global flagIt
if flagIt:
#HIDDEN
self.stackedWidget.setMinimumSize(QtCore.QSize(800, 0))
self.stackedWidget.setMaximumSize(QtCore.QSize(800, 16777215))
self.Bottom_left_icons_out.setMinimumSize(QtCore.QSize(Value, 0))
self.Bottom_left_icons_out.setMaximumSize(QtCore.QSize(Value, 16777215))
#Home section
self.Home_label_2.setMaximumWidth(1200)
self.Home_label_2.setGeometry(QtCore.QRect(0,40,800,121))
self.Home_label_11.setMaximumWidth(1200)
self.Home_label_11.setGeometry(QtCore.QRect(0, 490, 800, 51))
#Plot Section
self.Plot_frame.setGeometry(QtCore.QRect(450, 0, 350, 350))
self.Plot_label_2.setGeometry(QtCore.QRect(450, 350, 351, 21))
self.Plot_label_1.setGeometry(QtCore.QRect(0, 20, 441, 91))
#Derivative Section
self.Derivative_main_label.setGeometry(QtCore.QRect(0, 0, 801, 141))
self.Derivative_frame_dxdy.setGeometry(QtCore.QRect(430, 120, 340, 340))
self.Derivative_image_preview_dxdy.setGeometry(QtCore.QRect(510, 460, 271, 31))
#Integral Section
self.Integral_main_label.setGeometry(QtCore.QRect(0, 0, 801, 191))
self.Integral_image_label_preview.setGeometry(QtCore.QRect(510, 500, 271, 31))
self.Integral_image_frame_preview.setGeometry(QtCore.QRect(430, 160, 340, 340))
self.Integral_input_value.setGeometry(QtCore.QRect(130, 200, 281, 31))
self.Integral_label_fxpr_res.setGeometry(QtCore.QRect(130, 330, 281, 31))
self.Integral_label_P_res.setGeometry(QtCore.QRect(130, 390, 281, 31))
self.Integral_BTN_compute.setGeometry(QtCore.QRect(150, 460, 131, 41))
self.stackedWidget_4.setGeometry(QtCore.QRect(50, 510, 321, 61))
#Double Integral Section
self.d_Integral_main_label.setGeometry(QtCore.QRect(0, 0, 801, 91))
self.d_Integral_image_frame_preview.setGeometry(QtCore.QRect(430, 110, 340, 340))
self.d_Integral_image_label_preview.setGeometry(QtCore.QRect(510, 450, 271, 31))
self.d_Integral_label_fxpr_res.setGeometry(QtCore.QRect(160, 280, 251, 31))
self.d_Integral_input_value.setGeometry(QtCore.QRect(130, 130, 281, 31))
self.d_Integral_label_P_res.setGeometry(QtCore.QRect(130, 340, 281, 31))
self.d_Integral_BTN_compute.setGeometry(QtCore.QRect(150, 410, 131, 41))
self.stackedWidget_5.setGeometry(QtCore.QRect(70, 470, 341, 61))
#Triple Integral Section
self.c_Integral_main_label.setGeometry(QtCore.QRect(0, 0, 801, 91))
self.c_Integral_input_value_fx.setGeometry(QtCore.QRect(100, 111, 321, 31))
self.c_Integral_image_frame_preview.setGeometry(QtCore.QRect(430, 110, 340, 340))
self.c_Integral_image_label_preview.setGeometry(QtCore.QRect(510, 450, 271, 31))
self.c_Integral_label_symbolic_res.setGeometry(QtCore.QRect(180, 296, 241, 31))
self.c_Integral_label_volume_res.setGeometry(QtCore.QRect(180, 351, 241, 31))
self.c_Integral_BTN_compute.setGeometry(QtCore.QRect(130, 410, 141, 41))
self.stackedWidget_6.setGeometry(QtCore.QRect(70, 470, 341, 61))
#Delta Section
self.Delta_main_label_2.setGeometry(QtCore.QRect(0, 0, 801, 71))
self.Delta_main_label_3.setGeometry(QtCore.QRect(0, 80, 801, 91))
self.Delta_image_frame_preview.setGeometry(QtCore.QRect(430, 170, 340, 340))
self.Delta_image_label_preview.setGeometry(QtCore.QRect(510, 510, 271, 31))
flagIt = False
return 0
else:
#NOT HIDDEN
self.stackedWidget.setMinimumSize(QtCore.QSize(800+128, 0))
self.stackedWidget.setMaximumSize(QtCore.QSize(800+128, 16777215))
self.Bottom_left_icons_out.setMinimumSize(QtCore.QSize(128, 0))
self.Bottom_left_icons_out.setMaximumSize(QtCore.QSize(128, 16777215))
#Home section
self.Home_label_2.setGeometry(QtCore.QRect(0,40,700,121))
self.Home_label_11.setGeometry(QtCore.QRect(0, 490, 700, 51))
#Plot Section
self.Plot_frame.setGeometry(QtCore.QRect(350, 0, 350, 350))
self.Plot_label_2.setGeometry(QtCore.QRect(350, 350, 351, 21))
self.Plot_label_1.setGeometry(QtCore.QRect(0, 20, 341, 91))
#Derivative Section
self.Derivative_main_label.setGeometry(QtCore.QRect(0, 0, 701, 141))
self.Derivative_frame_dxdy.setGeometry(QtCore.QRect(330, 120, 340, 340))
self.Derivative_image_preview_dxdy.setGeometry(QtCore.QRect(410, 460, 271, 31))
#Integral Section
self.Integral_main_label.setGeometry(QtCore.QRect(0, 0, 801, 191))
self.Integral_image_label_preview.setGeometry(QtCore.QRect(410, 500, 271, 31))
self.Integral_image_frame_preview.setGeometry(QtCore.QRect(330, 160, 340, 340))
self.Integral_input_value.setGeometry(QtCore.QRect(130, 200, 181, 31))
self.Integral_label_fxpr_res.setGeometry(QtCore.QRect(130, 330, 181, 31))
self.Integral_label_P_res.setGeometry(QtCore.QRect(130, 390, 181, 31))
self.Integral_BTN_compute.setGeometry(QtCore.QRect(100, 460, 131, 41))
self.stackedWidget_4.setGeometry(QtCore.QRect(0, 510, 321, 61))
#Double Integral Section
self.d_Integral_main_label.setGeometry(QtCore.QRect(0, 0, 701, 91))
self.d_Integral_image_frame_preview.setGeometry(QtCore.QRect(330, 110, 340, 340))
self.d_Integral_image_label_preview.setGeometry(QtCore.QRect(410, 450, 271, 31))
self.d_Integral_label_fxpr_res.setGeometry(QtCore.QRect(160, 280, 151, 31))
self.d_Integral_input_value.setGeometry(QtCore.QRect(130, 130, 181, 31))
self.d_Integral_label_P_res.setGeometry(QtCore.QRect(130, 340, 181, 31))
self.d_Integral_BTN_compute.setGeometry(QtCore.QRect(100, 410, 131, 41))
self.stackedWidget_5.setGeometry(QtCore.QRect(20, 470, 341, 61))
#Triple Integral Section
self.c_Integral_main_label.setGeometry(QtCore.QRect(0, 0, 701, 91))
self.c_Integral_input_value_fx.setGeometry(QtCore.QRect(100, 111, 221, 31))
self.c_Integral_image_frame_preview.setGeometry(QtCore.QRect(330, 110, 340, 340))
self.c_Integral_image_label_preview.setGeometry(QtCore.QRect(410, 450, 271, 31))
self.c_Integral_label_symbolic_res.setGeometry(QtCore.QRect(180, 296, 141, 31))
self.c_Integral_label_volume_res.setGeometry(QtCore.QRect(180, 351, 141, 31))
self.c_Integral_BTN_compute.setGeometry(QtCore.QRect(80, 410, 141, 41))
self.stackedWidget_6.setGeometry(QtCore.QRect(20, 470, 341, 61))
#Delta Section
self.Delta_main_label_2.setGeometry(QtCore.QRect(0, 0, 701, 71))
self.Delta_main_label_3.setGeometry(QtCore.QRect(0, 80, 701, 91))
self.Delta_image_frame_preview.setGeometry(QtCore.QRect(330, 170, 340, 340))
self.Delta_image_label_preview.setGeometry(QtCore.QRect(410, 510, 271, 31))
flagIt = True
return 0
def plot_expressions(self):
self.Plot_BTN_generate.clicked.connect(lambda: self.plot_generate_btn_function())
self.Derivative_BTN_compute.clicked.connect(lambda: self.derivative_compute_btn_function())
self.Integral_BTN_compute.clicked.connect(lambda: self.integral_compute_btn_function())
self.d_Integral_BTN_compute.clicked.connect(lambda: self.d_integral_compute_btn_function())
self.c_Integral_BTN_compute.clicked.connect(lambda: self.c_integral_compute_btn_function())
self.Delta_BTN_compute_2.clicked.connect(lambda: self.delta_compute_btn_function())
def plot_generate_btn_function(self):
try:
def Plot_checking_blank_brackets(x, a, b):
if x == '' or a == '' or b == '':
self.Plot_error_info.setVisible(True)
self.Plot_error_info.setCurrentWidget(self.error_widget_1)
return False
else:
return True
def Plot_checking_correct_datatypes(Enable):
if Enable:
if 'x' in self.Plot_fn_edit.text():
self.x1 = float(self.Plot_range_x1.text())
self.x2 = float(self.Plot_range_x2.text())
if self.x2 > self.x1:
self.Plot_range_values = [self.x1, self.x2]
return True
self.Plot_error_info.setCurrentWidget(self.error_widget_2)
return False
else:
self.Plot_error_info.setCurrentWidget(self.error_widget_2)
return False
else:
return False
def Plot_counting_erase_data(RG, Enable):
if Enable:
self.Data_x_axis = []
self.Data_y_axis = []
self.x1 = RG[0]
self.x2 = RG[1]
self.Dens = 200
self.Step = (self.x2-self.x1)/self.Dens
for i in range(1, self.Dens+2):
self.Data_x_axis.append(float("{:.2f}".format(self.x1+(i-1)*self.Step)))
for x in self.Data_x_axis:
x = float(x)
self.y_res = eval(self.Plot_fn_edit.text())
self.Data_y_axis.append(self.y_res)
self.Plot_error_info.setVisible(False)
return (self.Data_x_axis, self.Data_y_axis)
def Plot_range_plot_value_function(Data):
self.x1 = np.max(Data[0])
self.x2 = np.max(Data[1])
if self.x1 >= self.x2:
return self.x1
else:
return self.x2
def Plot_figure_positioning(Data):
self.x1 = np.min(Data[0])
self.x2 = np.max(Data[0])
self.y1 = np.min(Data[1])
self.y2 = np.max(Data[1])
return (self.x1-1, self.x2+1), (self.y1-1, self.y2+1)
def Plot_figure_lim(Data):
plt.xlim(Data[0])
plt.ylim(Data[1])
return True
def Plot_figure_draw(Enable, Data, Range):
if Enable:
plt.close('all')
plt.grid(True, color='black', linewidth=0.5)
plt.axhline(color = 'k')
plt.axvline(color = 'k')
plt.plot(Data[0], Data[1], color=self.Plot_combo_color.currentText(),label='Figure',linewidth=2)
plt.savefig(path+'/figure-preview-img.png')
self.Plot_preview_label.setPixmap(QtGui.QPixmap(path+"/figure-preview-img.png"))
plt.show()
return True
self.Plot_range_values = []
self.Flag_Plot_checking_blank_brackets = Plot_checking_blank_brackets(self.Plot_fn_edit.text(), self.Plot_range_x1.text(), self.Plot_range_x2.text())
self.Flag_Plot_checking_correct_datatypes = Plot_checking_correct_datatypes(self.Flag_Plot_checking_blank_brackets)
self.Plot_figure_data = Plot_counting_erase_data(self.Plot_range_values, self.Flag_Plot_checking_correct_datatypes)
self.Plot_range_plot_value = Plot_range_plot_value_function(self.Plot_figure_data)
self.Plot_figure_positioning_value = Plot_figure_positioning(self.Plot_figure_data)
self.Plot_figure_lim_flag = Plot_figure_lim(self.Plot_figure_positioning_value)
self.Plot_figure_draw_flag = Plot_figure_draw(self.Plot_figure_lim_flag, self.Plot_figure_data ,self.Plot_figure_positioning_value)
except:
self.Plot_error_info.setVisible(True)
self.Plot_error_info.setCurrentWidget(self.error_widget_2)
def plot_save_btn_function(self):
self.Plot_error_info.setVisible(True)
self.Plot_error_info.setCurrentWidget(self.Plot_figure_saved_widget)
def derivative_compute_btn_function(self):
try:
def Derivative_checking_blank_brackets(x, R1, R2):
if x == '' or R1 == '' or R2 == '':
self.stackedWidget_3.setVisible(True)
self.stackedWidget_3.setCurrentWidget(self.error_widget_4)
return False
else:
return True
def Derivative_checking_correct_datatypes(Enable, Data):
if Enable:
return True
else:
return False
def Derivative_compute(Enable, Data):
global Derivative_final_result
if Enable:
self.x = sp.Symbol('x')
Derivative_final_result = sp.diff(Data, self.x)
return True
else:
return False
def Derivative_show_result(Enable):
if Enable:
self.stackedWidget_3.setVisible(False)
self.Derivative_label_fxpr_res.setText(str(Derivative_final_result))
return True
else:
return False
def Derivative_draw_figures(Enable, Data_Input, Data_Output, R1, R2):
if Enable:
self.Data_x_axis = []
self.Data_y_axis = []
self.Data_dydx_axis = []
self.Dens = 20
self.x1 = float(R1)
self.x2 = float(R2)
self.Step = (self.x2-self.x1)/self.Dens
for i in range(1, self.Dens+2):
self.Data_x_axis.append(float("{:.2f}".format(self.x1+(i-1)*self.Step)))
for x in self.Data_x_axis:
x = float(x)
self.res_y = eval(Data_Input)
self.res_dydx = eval(str(Data_Output))
self.Data_y_axis.append(self.res_y)
self.Data_dydx_axis.append(self.res_dydx)
plt.grid(True, color='black', linewidth=0.5)
plt.plot(self.Data_x_axis,self.Data_y_axis, color='Blue',label=Data_Input,linewidth=2)
plt.plot(self.Data_x_axis,self.Data_dydx_axis, color='Red',label=Data_Output,linewidth=2)
plt.axhline(color = 'k')
plt.axvline(color = 'k')
plt.legend()
plt.savefig(path+'/figure-dydx-img.png')
self.Derivative_preview_label_dxdy.setPixmap(QtGui.QPixmap(path+'/figure-dydx-img.png'))
self.stackedWidget_3.setVisible(True)
self.stackedWidget_3.setCurrentWidget(self.correct_widget_4)
plt.show()
return True
else:
return False
self.Derivative_checking_blank_brackets_Flag = Derivative_checking_blank_brackets(self.Derivative_input_value.text(),self.Derivative_range_x1.text(),self.Derivative_range_x2.text())
self.Derivative_checking_correct_datatypes_Flag = Derivative_checking_correct_datatypes(self.Derivative_checking_blank_brackets_Flag, self.Derivative_input_value.text())
self.Derivative_compute_flag = Derivative_compute(self.Derivative_checking_correct_datatypes_Flag, self.Derivative_input_value.text())
self.Derivative_show_result_flag = Derivative_show_result(self.Derivative_compute_flag)
self.Derivative_draw_figures_flag = Derivative_draw_figures(
self.Derivative_show_result_flag,
self.Derivative_input_value.text(),
Derivative_final_result,
self.Derivative_range_x1.text(),
self.Derivative_range_x2.text())
except:
self.stackedWidget_3.setVisible(True)
self.stackedWidget_3.setCurrentWidget(self.error_widget_5)
def integral_compute_btn_function(self):
try:
def Integral_checking_blank_brackets(x, R1, R2):
if x == '' or R1 == '' or R2 == '':
self.stackedWidget_4.setVisible(True)
self.stackedWidget_4.setCurrentWidget(self.error_widget_6)
return False
else:
return True
def Integral_checking_correct_datatypes(Enable, Data, R1, R2):
if Enable:
if float(R2) > float(R1):
return True
else:
return False
else:
return False
def Integral_compute(Enable, Data):
global Integral_final_result
if Enable:
self.x = sp.Symbol('x')
Integral_final_result = sp.integrate(Data, self.x)
return True
else:
return False
def Integral_show_result(Enable):
if Enable:
self.Integral_label_fxpr_res.setText(str(Integral_final_result)+'+C')
return True
else:
return False
def Intgeral_draw_figures(Enable, Data_Input, Data_Output, R1, R2):
if Enable:
plt.close('all')
self.Data_x_axis = []
self.Data_y_axis = []
self.Data_inte_axis = []
self.Dens = 500
self.x1 = float(R1)
self.x2 = float(R2)
self.R = [self.x1, self.x2]
self.dx_plt = self.x2 - self.x1
self.dx_plt = self.dx_plt * 0.25
self.dx1_plt = self.x1 - self.dx_plt
self.dx2_plt = self.x2 + self.dx_plt
self.Step = (self.dx2_plt-self.dx1_plt)/self.Dens
for i in range(1, self.Dens+2):
self.Data_x_axis.append(float("{:.2f}".format(self.dx1_plt+(i-1)*self.Step)))
for x in self.Data_x_axis:
x = float(x)
self.res_y = eval(Data_Input)
self.res_inte = eval(str(Data_Output))
self.Data_y_axis.append(self.res_y)
self.Data_inte_axis.append(self.res_inte)
self.Data_x_axis = np.array(self.Data_x_axis)
self.Data_y_axis = np.array(self.Data_y_axis)
self.Data_inte_axis = np.array(self.Data_inte_axis)
self.P_arr = []
for x in self.R[::-1]:
self.Pd = eval(str(Integral_final_result))
self.P_arr.append(self.Pd)
self.P = self.P_arr[0] - self.P_arr[1]
self.P = "{:.3f}".format(self.P)
self.Integral_label_P_res.setText(str(self.P))
plt.grid(True, color='black', linewidth=0.5)
plt.plot(self.Data_x_axis,self.Data_y_axis, color='Red',label=Data_Input,linewidth=1)
plt.plot(self.Data_x_axis,self.Data_inte_axis, color='Blue',label=Data_Output,linewidth=1)
plt.fill_between(self.Data_x_axis,self.Data_y_axis, 0, where=(self.Data_x_axis >= self.x1) & (self.Data_x_axis <= self.x2), color='Red', alpha=0.5)
plt.axhline(color = 'k')
plt.axvline(color = 'k')
plt.legend()
plt.savefig(path+'/figure-inte-img.png')
self.Integral_image_label_preview_fig.setPixmap(QtGui.QPixmap(path+"/figure-inte-img.png"))
self.stackedWidget_4.setVisible(True)
self.stackedWidget_4.setCurrentWidget(self.correct_widget_7)
plt.show()
else:
self.stackedWidget_4.setVisible(True)
self.stackedWidget_4.setCurrentWidget(self.error_widget_7)
self.Integral_checking_blank_brackets_flag = Integral_checking_blank_brackets(self.Integral_input_value.text(), self.Integral_range_x1.text(), self.Integral_range_x2.text())
self.Integral_checking_correct_datatypes_flag = Integral_checking_correct_datatypes(self.Integral_checking_blank_brackets_flag, self.Integral_input_value.text(), self.Integral_range_x1.text(), self.Integral_range_x2.text())
self.Integral_compute_flag = Integral_compute(self.Integral_checking_correct_datatypes_flag, self.Integral_input_value.text())
self.Integral_show_result_flag = Integral_show_result(self.Integral_compute_flag)
Intgeral_draw_figures(
self.Integral_show_result_flag,
self.Integral_input_value.text(),
Integral_final_result,
self.Integral_range_x1.text(),
self.Integral_range_x2.text()
)
except:
self.stackedWidget_4.setVisible(True)
self.stackedWidget_4.setCurrentWidget(self.error_widget_7)
def d_integral_compute_btn_function(self):
try:
def d_Integral_checking_blank_brackets(x, RX1, RX2, RY1, RY2):
if x == '' or RX1 == '' or RX2 == '' or RY1 == '' or RY2 == '':
self.stackedWidget_5.setVisible(True)
self.stackedWidget_5.setCurrentWidget(self.error_widget_8)
return False
else:
return True
def d_Integral_checking_correct_datatypes(Enable, Data, RX1, RX2, RY1, RY2):
if Enable:
if float(RX2) > float(RX1) and float(RY2) > float(RY1):
return True
else:
return False
else:
return False
def d_Integral_compute(Enable, Data, RX1, RX2, RY1, RY2):
global d_Integral_final_result_symbolic, d_Integral_final_result_area
if Enable:
self.x = sp.Symbol('x')
self.y = sp.Symbol('y')
self.d_Integral_final_result_x = sp.integrate(Data, self.x)
self.d_Integral_final_result_y = sp.integrate(self.d_Integral_final_result_x, self.y)
d_Integral_final_result_symbolic = self.d_Integral_final_result_y
self.f = lambda y, x: eval(Data)
d_Integral_final_result_area = integrate.dblquad(self.f, float(RX1), float(RX2), lambda x: float(RY1), lambda x: float(RY2))
return True
else:
return False
def d_Integral_show_result(Enable):
if Enable:
self.stackedWidget_5.setVisible(False)
self.d_Integral_label_fxpr_res.setText(str(d_Integral_final_result_symbolic)+'+C')
self.d_Integral_label_P_res.setText(str("{:.3f}".format(d_Integral_final_result_area[0])))
return True
else:
return False
def d_Integral_draw_figures(Enable, Data_Input, Data_Output, RX1, RX2, RY1, RY2):
if Enable:
plt.close('all')
self.Data_a = np.array([float(RX1), float(RX2)])
self.Data_b1 = np.array([float(RY1), float(RY1)])
self.Data_b2 = np.array([float(RY2), float(RY2)])
plt.fill_between(self.Data_a, self.Data_b1, self.Data_b2, color='red', alpha=0.75)
plt.grid(True, color='black', linewidth=0.5)
self.Data_fn = np.array([float(RX1), float(RX2), float(RY1), float(RY2)])
for i in range(len(self.Data_fn)):
if 0 > self.Data_fn[i]:
self.Data_fn[i] = self.Data_fn[i]*(-1)
self.range = max(self.Data_fn)
plt.axhline(color = 'k')
plt.axvline(color = 'k')
plt.xlim(self.range*(-1)*1.2,self.range*1.2)
plt.ylim(self.range*(-1)*1.2,self.range*1.2)
plt.savefig(path+'/figure-dinte-img.png')
self.d_Integral_image_label_preview_fig.setPixmap(QtGui.QPixmap(path+"/figure-dinte-img.png"))
self.stackedWidget_5.setVisible(True)
self.stackedWidget_5.setCurrentWidget(self.correct_widget_9)
plt.show()
self.d_Integral_checking_blank_brackets_flag = d_Integral_checking_blank_brackets(
self.d_Integral_input_value.text(),
self.d_Integral_range_x1.text(),
self.d_Integral_range_x2.text(),
self.d_Integral_range_y1.text(),
self.d_Integral_range_y2.text()
)
self.d_Integral_checking_correct_datatypes_flag = d_Integral_checking_correct_datatypes(
self.d_Integral_checking_blank_brackets_flag,
self.d_Integral_input_value.text(),
self.d_Integral_range_x1.text(),
self.d_Integral_range_x2.text(),
self.d_Integral_range_y1.text(),
self.d_Integral_range_y2.text()
)
self.d_Integral_compute_flag = d_Integral_compute(
self.d_Integral_checking_correct_datatypes_flag,
self.d_Integral_input_value.text(),
self.d_Integral_range_x1.text(),
self.d_Integral_range_x2.text(),
self.d_Integral_range_y1.text(),
self.d_Integral_range_y2.text()
)
self.d_Integral_show_result_flag = d_Integral_show_result(self.d_Integral_compute_flag)
d_Integral_draw_figures(
self.d_Integral_show_result_flag,
self.d_Integral_input_value.text(),
d_Integral_final_result_symbolic,
self.d_Integral_range_x1.text(),
self.d_Integral_range_x2.text(),
self.d_Integral_range_y1.text(),
self.d_Integral_range_y2.text()
)
except:
self.stackedWidget_5.setVisible(True)
self.stackedWidget_5.setCurrentWidget(self.error_widget_9)
def c_integral_compute_btn_function(self):
try:
def c_Integral_checking_blank_brackets(x, RX1, RX2, RY1, RY2, RZ1, RZ2):
if x == '' or RX1 == '' or RX2 == '' or RY1 == '' or RY2 == '' or RZ1 == '' or RZ2 == '':
self.stackedWidget_6.setVisible(True)
self.stackedWidget_6.setCurrentWidget(self.error_widget_10)
return False
else:
return True
def c_Integral_checking_correct_datatypes(Enable, Data, RX1, RX2, RY1, RY2, RZ1, RZ2):
if Enable:
if float(RX2) > float(RX1) and float(RY2) > float(RY1) and float(RZ2) > float(RZ1):
return True
else:
return False
else:
return False
def c_Integral_compute(Enable, Data, RX1, RX2, RY1, RY2, RZ1, RZ2):
global c_Integral_final_result_symbolic, c_Integral_final_result_volume
if Enable:
self.x = sp.Symbol('x')
self.y = sp.Symbol('y')
self.z = sp.Symbol('z')
self.c_Integral_symbolic_result_x = sp.integrate(Data, self.x)
self.c_Integral_symbolic_result_y = sp.integrate(self.c_Integral_symbolic_result_x, self.y)
self.c_Integral_symbolic_result_z = sp.integrate(self.c_Integral_symbolic_result_y, self.z)
c_Integral_final_result_symbolic = self.c_Integral_symbolic_result_z
self.f = lambda z, y, x: eval(Data)
c_Integral_final_result_volume = integrate.tplquad(self.f, float(RX1), float(RX2),
lambda x: float(RY1), lambda x: float(RY2),
lambda x, y: float(RZ1), lambda x, y: float(RZ2)
)
return True
else:
return False
def c_Integral_show_result(Enable):
if Enable:
self.stackedWidget_5.setVisible(False)
self.c_Integral_label_symbolic_res.setText(str(c_Integral_final_result_symbolic)+'+C')
self.c_Integral_label_volume_res.setText(str("{:.3f}".format(c_Integral_final_result_volume[0])))
return True
else:
return False
def c_Integral_draw_figures(Enable, Data_Input, Data_Output, RX1, RX2, RY1, RY2, RZ1, RZ2):
if Enable:
rx1, rx2, ry1, ry2, rz1, rz2 = float(RX1), float(RX2), float(RY1), float(RY2), float(RZ1), float(RZ2)
nx = (rx2 + rx1)/2
ny = (ry2 + ry1)/2
nz = (rz2 + rz1)/2
dx = rx2 - rx1
dy = ry2 - ry1
dz = rz2 - rz1
def Xaxisrange(x1, x2, dx, nx):
if x1 <= 0 and x2 >= 0:
Tx = 1.2*dx
elif x1 > 0:
Tx = 1.5*nx
elif x2 < 0:
Tx = -1.5*nx
return Tx
def Yaxisrange(y1, y2, dy, ny):
if y1 <= 0 and y2 >= 0:
Ty = 1.2*dy
elif y1 > 0:
Ty = 1.5*ny
elif y2 < 0:
Ty = -1.5*ny
return Ty
def Zaxisrange(z1, z2, dz, nz):
if z1 <= 0 and z2 >= 0:
Tz = 1.2*dz
elif z1 > 0:
Tz = 1.5*nz
elif z2 < 0:
Tz = -1.5*nz
return Tz
plt.close('all')
Range_X = Xaxisrange(rx1, rx2, dx, nx)
Range_Y = Yaxisrange(ry1, ry2, dy, ny)
Range_Z = Zaxisrange(rz1, rz2, dz, nz)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax1 = fig.gca(projection='3d')
ax.set_xlim(-Range_X,Range_X)
ax.set_ylim(-Range_Y,Range_Y)
ax.set_zlim(-Range_Z,Range_Z)
self.x1 = np.array([[nx,rx1,rx1,nx],[nx,rx1,rx1,nx],[nx,rx2,rx2,nx],[nx,rx2,rx2,nx],[nx,rx1,rx1,nx]])
self.y1 = np.array([[ny,ry1,ry1,ny],[ny,ry2,ry2,ny],[ny,ry2,ry2,ny],[ny,ry1,ry1,ny],[ny,ry1,ry1,ny]])
self.z1 = np.array([[rz1,rz1,rz2,rz2],[rz1,rz1,rz2,rz2],[rz1,rz1,rz2,rz2],[rz1,rz1,rz2,rz2],[rz1,rz1,rz2,rz2]])
self.XaxisDrawX = [-Range_X, Range_X]
self.YaxisDrawX = self.ZaxisDrawX = [0, 0]
self.YaxisDrawY = [-Range_Y, Range_Y]
self.XaxisDrawY = self.ZaxisDrawY = [0, 0]
self.ZaxisDrawZ = [-Range_Z, Range_Z]
self.YaxisDrawZ = self.XaxisDrawZ = [0, 0]
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
ax.plot_surface(self.x1, self.y1, self.z1, color='r')
ax1.plot(self.XaxisDrawX, self.YaxisDrawX, self.ZaxisDrawX, color='black', linewidth=1)
ax1.plot(self.XaxisDrawY, self.YaxisDrawY, self.ZaxisDrawY, color='black', linewidth=1)
ax1.plot(self.XaxisDrawZ, self.YaxisDrawZ, self.ZaxisDrawZ, color='black', linewidth=1)
plt.savefig(path+'/figure-cinte-img.png')
self.c_Integral_image_label_preview_fig.setPixmap(QtGui.QPixmap(path+"/figure-cinte-img.png"))
self.stackedWidget_5.setVisible(True)
self.stackedWidget_5.setCurrentWidget(self.correct_widget_9)
plt.show()
self.c_Integral_checking_blank_brackets_flag = c_Integral_checking_blank_brackets(
self.c_Integral_input_value_fx.text(),
self.c_Integral_input_value_x1.text(),
self.c_Integral_input_value_x2.text(),
self.c_Integral_input_value_y1.text(),
self.c_Integral_input_value_y2.text(),
self.c_Integral_input_value_z1.text(),
self.c_Integral_input_value_z2.text()
)
self.c_Integral_checking_correct_datatypes_flag = c_Integral_checking_correct_datatypes(
self.c_Integral_checking_blank_brackets_flag,
self.c_Integral_input_value_fx.text(),
self.c_Integral_input_value_x1.text(),
self.c_Integral_input_value_x2.text(),
self.c_Integral_input_value_y1.text(),
self.c_Integral_input_value_y2.text(),
self.c_Integral_input_value_z1.text(),
self.c_Integral_input_value_z2.text()
)
self.c_Integral_compute_flag = c_Integral_compute(
self.c_Integral_checking_correct_datatypes_flag,
self.c_Integral_input_value_fx.text(),
self.c_Integral_input_value_x1.text(),
self.c_Integral_input_value_x2.text(),
self.c_Integral_input_value_y1.text(),
self.c_Integral_input_value_y2.text(),
self.c_Integral_input_value_z1.text(),
self.c_Integral_input_value_z2.text()
)
self.c_Integral_show_result_flag = c_Integral_show_result(self.c_Integral_compute_flag)
c_Integral_draw_figures(
self.c_Integral_show_result_flag,
self.c_Integral_input_value_fx.text(),
c_Integral_final_result_symbolic,
self.c_Integral_input_value_x1.text(),
self.c_Integral_input_value_x2.text(),
self.c_Integral_input_value_y1.text(),
self.c_Integral_input_value_y2.text(),
self.c_Integral_input_value_z1.text(),
self.c_Integral_input_value_z2.text()
)
except:
self.stackedWidget_6.setVisible(True)
self.stackedWidget_6.setCurrentWidget(self.error_widget_11)
def delta_compute_btn_function(self):
try:
def Delta_checking_blank_brackets(A):
if A == '':
self.stackedWidget_7.setVisible(True)
self.stackedWidget_7.setCurrentWidget(self.error_widget_12)
return False
else:
return True
def Delta_checking_correct_datatypes(Enable, A, B, C):
global A_value, B_value, C_value
if Enable:
if float(A) == 0:
return False
else:
A_value = float(A)
if B == '':
B_value = 0
else:
B_value = float(B)
if C == '':
C_value = 0
else:
C_value = float(C)
return True
else:
return False
def Delta_computing_values(Enable, A, B, C):
global Delta_final_results
if Enable:
delta = B**2-4*A*C
if delta > 0:
x1 = float("{:.2f}".format(((-B-math.sqrt(delta)))/(2*A)))
x2 = float("{:.2f}".format(((-B+math.sqrt(delta)))/(2*A)))
Delta_final_results = [x1, x2]
elif delta == 0:
x0 = float("{:.2f}".format(-B/2*A))
Delta_final_results = [x0]
else:
Delta_final_results = []
return True
else:
return False
def Delta_draw_plot(Enable, DATA, A, B, C):
if Enable:
def Delta_checking_soultion_ammount(data, A, B, C):
if len(data) == 2:
x1 = data[0]
x2 = data[1]
dx = x2- x1
x_data = | np.linspace(x1-dx,x2+dx,100) | numpy.linspace |
#!/usr/bin/env python3
from __future__ import print_function
from __future__ import division
import rospy
import rosbag
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Quaternion
from sensor_msgs.msg import Imu
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from mav_msgs.msg import Actuators
from waypoint_generation_library import WaypointGen
# TODO: make this critically damped by tuning the natural frequency
class PDControl(object):
""" Takes IMU and position data and publishes actuator commands based off a Proportional Derivative law"""
def __init__(self):
self.dlqrPublisher = rospy.Publisher("/uams/command/motor_speed", Actuators, queue_size=1)
# self.dlqrPublisher = rospy.Publisher("/neo11/command/motor_speed", Actuators, queue_size=1)
self.receivedImuQuat = Quaternion()
self.thrustConstant = 1.269e-05
self.momentConstant = 0.016754
self.g = 9.8 # [m/s^2]
self.m = 4.88 # [kg]
self.Ixx = 6.08870e-02 # [kg*m^2]
self.Iyy = 6.87913e-02 # [kg*m^2]
self.Izz = 1.48916e-01 # [kg*m^2]
gamma = self.thrustConstant / self.momentConstant
self.L = 0.2895 # [m]
# damping ratio (overdamped)
zeta = 2
zetaYaw = 1
# natural frequency
self.PI = 3.14159
wnAng = 13 # [rad/s]
wnAngYaw = 200
# attitude control gains calculation based on 2nd order system assumption
# proportional gain
# self.kpAngle = np.array(([self.Ixx * pow(wnAng, 2), # roll
# self.Iyy * pow(wnAng, 2), # pitch
# self.Izz * pow(wnAngYaw, 2)])) # yaw
# self.kpAngle = np.array([11.2, 11.2, 5713.2])
# self.kdAngle = np.array([ 1.12, 1.12, 16.56])
# self.kpAngle = np.array([11.2, 11.2, 5000])
# self.kdAngle = np.array([1.12, 1.12, 16.56])
self.kpAngle = | np.array([20, 20, 5000]) | numpy.array |
# =============================================================================
# This module defines some functions in order to create a fuzzy control system.
# =============================================================================
# =============================================================================
# All these functions are defined in a way that suites the efficient inference
# mechanism; this includes the following assumptions:
# (1) Additive model for superpositioning of rules
# (2) All fuzzy sets are the same
# (3) Correlation product
# =============================================================================
import numpy as np
import skfuzzy as fuzz
import matplotlib.pyplot as plt
def gaussMF(domain, number, sigma=1):
"""
gaussMF creates a number of membership function equally spaced in the
domain. Sigma is assumed to be 1 in order to create a nice set of MF (for
5 curves in a range from zero to ten), but it can be passed as an extra
argument. The centers are automatically detected in order to be equally
spaced. Domain is a np.array of shape (n,).
Number is an integer (should be an odd number).
MFs are returned as a 2D array: each row contains a MF.
"""
# Create a list with center values of the MFs (except for the first and
# last one, which values are obvious)
step = (domain[-1] - domain[0]) / (number - 1)
centers = []
for i in np.arange(number):
centers.append(domain[0] + i * step)
# MFs creation
mbFunc = np.empty((number, len(domain))) # each row is a MF
for MF in range(number):
mbFunc[MF] = np.exp(-((domain - centers[MF]) / sigma)**2)
return mbFunc
def triMF(domain, number):
"""
trisMF creates a number of triangularmembership function equally spaced in
domain. Sigma is assumed to be 1 in order to create a nice set of MF (for
5 curves in a range from zero to ten), but it can be passed as an extra
argument. The centers are automatically detected in order to be equally
spaced. Domain is a np.array of shape (n,).
Number is an integer (should be an odd number).
MFs are returned as a 2D array: each row contains a MF.
"""
# Create a list with center values of the MFs (except for the first and
# last one, which values are obvious)
step = (domain[-1] - domain[0]) / (number - 1)
centers = []
for i in np.arange(number):
centers.append(domain[0] + i * step)
# MFs creation
mbFunc = np.empty((number, len(domain))) # each row is a MF
for MF in range(number):
if MF == 0:
mbFunc[MF] = fuzz.trimf(domain, [centers[MF], centers[MF],
centers[MF + 1]])
elif MF >= number - 1:
mbFunc[MF] = fuzz.trimf(domain, [centers[MF - 1], centers[MF],
centers[MF]])
else:
mbFunc[MF] = fuzz.trimf(domain, [centers[MF - 1], centers[MF],
centers[MF + 1]])
return mbFunc
def trapMF(domain, number):
step = (domain[-1] - domain[0]) / (number - 1)
centers = []
for i in | np.arange(number) | numpy.arange |
# -*- coding: utf-8 -*-
"""
FEM routines
------------
Functions to compute kinematics variables for the Finite
Element Analysis.
The elements included are:
1. 4 node bilinear quadrilateral.
2. 6 node quadratic triangle.
3. 3 node linear triangle.
The notation used is similar to the one used by Bathe [1]_.
References
----------
.. [1] Bathe, Klaus-Jürgen. Finite element procedures. Prentice Hall,
Pearson Education, 2006.
"""
from __future__ import absolute_import, division, print_function
import solidspy.gaussutil as gau
import numpy as np
def eletype(iet):
"""Assigns number to degrees of freedom
According to iet assigns number of degrees of freedom, number of
nodes and minimum required number of integration points.
Parameters
----------
iet : int
Type of element. These are:
1. 4 node bilinear quadrilateral.
2. 6 node quadratic triangle.
3. 3 node linear triangle.
5. 2 node spring.
6. 2 node truss element.
7. 2 node beam (3 DOF per node).
Returns
-------
ndof : int
Number of degrees of freedom for the selected element.
nnodes : int
Number of nodes for the selected element.
ngpts : int
Number of Gauss points for the selected element.
"""
if iet == 1:
ndof = 8
nnodes = 4
ngpts = 4
if iet == 2:
ndof = 12
nnodes = 6
ngpts = 7
if iet == 3:
ndof = 6
nnodes = 3
ngpts = 3
if iet == 5:
ndof = 4
nnodes = 2
ngpts = 3
if iet == 6:
ndof = 4
nnodes = 2
ngpts = 3
if iet == 7:
ndof = 6
nnodes = 2
ngpts = 3
return ndof, nnodes, ngpts
#%% Shape functions and derivatives
def sha4(x, y):
"""Shape functions for a 4-noded quad element
Parameters
----------
x : float
x coordinate for a point within the element.
y : float
y coordinate for a point within the element.
Returns
-------
N : Numpy array
Array of interpolation functions.
Examples
--------
We can check evaluating at two different points, namely (0, 0) and
(1, 1). Thus
>>> N = sha4(0, 0)
>>> N_ex = np.array([
... [1/4, 0, 1/4, 0, 1/4, 0, 1/4, 0],
... [0, 1/4, 0, 1/4, 0, 1/4, 0, 1/4]])
>>> np.allclose(N, N_ex)
True
and
>>> N = sha4(1, 1)
>>> N_ex = np.array([
... [0, 0, 0, 0, 1, 0, 0, 0],
... [0, 0, 0, 0, 0, 1, 0, 0]])
>>> np.allclose(N, N_ex)
True
"""
N = np.zeros((2, 8))
H = 0.25*np.array(
[(1 - x)*(1 - y),
(1 + x)*(1 - y),
(1 + x)*(1 + y),
(1 - x)*(1 + y)])
N[0, ::2] = H
N[1, 1::2] = H
return N
def sha6(x, y):
"""Shape functions for a 6-noded triangular element
Parameters
----------
x : float
x coordinate for a point within the element.
y : float
y coordinate for a point within the element.
Returns
-------
N : Numpy array
Array of interpolation functions.
Examples
--------
We can check evaluating at two different points, namely (0, 0) and
(0.5, 0.5). Thus
>>> N = sha6(0, 0)
>>> N_ex = np.array([
... [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> np.allclose(N, N_ex)
True
and
>>> N = sha6(1/2, 1/2)
>>> N_ex = np.array([
... [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]])
>>> np.allclose(N, N_ex)
True
"""
N = np.zeros((2, 12))
H = np.array(
[(1 - x - y) - 2*x*(1 - x - y) - 2*y*(1 - x - y),
x - 2*x*(1 - x - y) - 2*x*y,
y - 2*x*y - 2*y*(1-x-y),
4*x*(1 - x - y),
4*x*y,
4*y*(1 - x - y)])
N[0, ::2] = H
N[1, 1::2] = H
return N
def sha3(x, y):
"""Shape functions for a 3-noded triangular element
Parameters
----------
x : float
x coordinate for a point within the element.
y : float
y coordinate for a point within the element.
Returns
-------
N : Numpy array
Array of interpolation functions.
Examples
--------
We can check evaluating at two different points, namely (0, 0) and
(0, 0.5). Thus
>>> N = sha3(0, 0)
>>> N_ex = np.array([
... [1, 0, 0, 0, 0, 0],
... [0, 1, 0, 0, 0, 0]])
>>> np.allclose(N, N_ex)
True
and
>>> N = sha3(1/2, 1/2)
>>> N_ex = np.array([
... [0, 0, 1/2, 0, 1/2, 0],
... [0, 0, 0, 1/2, 0, 1/2]])
>>> np.allclose(N, N_ex)
True
"""
N = np.zeros((2, 6))
H = np.array([
(1 - x - y),
x,
y])
N[0, ::2] = H
N[1, 1::2] = H
return N
def stdm4NQ(r, s, coord):
"""Strain-displacement interpolator B for a 4-noded quad element
Parameters
----------
r : float
r component in the natural space.
s : float
s component in the natural space.
coord : ndarray
Coordinates of the nodes of the element (4, 2).
Returns
-------
ddet : float
Determinant evaluated at `(r, s)`.
B : ndarray
Strain-displacement interpolator evaluated at `(r, s)`.
"""
nn = 4
B = np.zeros((3, 2*nn))
dhdx = 0.25*np.array([
[s - 1, -s + 1, s + 1, -s - 1],
[r - 1, -r - 1, r + 1, -r + 1]])
det, jaco_inv = jacoper(dhdx, coord)
dhdx = np.dot(jaco_inv, dhdx)
B[0, ::2] = dhdx[0, :]
B[1, 1::2] = dhdx[1, :]
B[2, ::2] = dhdx[1, :]
B[2, 1::2] = dhdx[0, :]
return det, B
def stdm6NT(r, s, coord):
"""Strain-displacement interpolator B for a 6-noded triang element
Parameters
----------
r : float
r component in the natural space.
s : float
s component in the natural space.
coord : ndarray
Coordinates of the nodes of the element (6, 2).
Returns
-------
ddet : float
Determinant evaluated at `(r, s)`.
B : ndarray
Strain-displacement interpolator evaluated at `(r, s)`.
"""
nn = 6
B = np.zeros((3, 2*nn))
dhdx = np.array([
[4*r + 4*s - 3, 4*r - 1, 0, -8*r - 4*s + 4, 4*s, -4*s],
[4*r + 4*s - 3, 0, 4*s - 1, -4*r, 4*r, -4*r - 8*s + 4]])
det, jaco_inv = jacoper(dhdx, coord)
dhdx = np.dot(jaco_inv, dhdx)
B[0, ::2] = dhdx[0, :]
B[1, 1::2] = dhdx[1, :]
B[2, ::2] = dhdx[1, :]
B[2, 1::2] = dhdx[0, :]
return det, B
def stdm3NT(r, s, coord):
"""Strain-displacement interpolator B for a 3-noded triang element
Parameters
----------
r : float
r component in the natural space.
s : float
s component in the natural space.
coord : ndarray
Coordinates of the nodes of the element (3, 2).
Returns
-------
det : float
Determinant evaluated at `(r, s)`.
B : ndarray
Strain-displacement interpolator evaluated at `(r, s)`.
"""
nn = 3
B = np.zeros((3, 2*nn))
dhdx = np.array([
[-1, 1, 0],
[-1, 0, 1]])
det, jaco_inv = jacoper(dhdx, coord)
dhdx = np.dot(jaco_inv, dhdx)
B[0, ::2] = dhdx[0, :]
B[1, 1::2] = dhdx[1, :]
B[2, ::2] = dhdx[1, :]
B[2, 1::2] = dhdx[0, :]
return det, B
def jacoper(dhdx, coord):
"""
Compute the Jacobian of the transformation evaluated at
the Gauss point
Parameters
----------
dhdx : ndarray
Derivatives of the interpolation function with respect to the
natural coordinates.
coord : ndarray
Coordinates of the nodes of the element (nn, 2).
Returns
-------
jaco_inv : ndarray (2, 2)
Jacobian of the transformation evaluated at `(r, s)`.
"""
jaco = dhdx.dot(coord)
det = np.linalg.det(jaco)
if np.isclose(np.abs(det), 0.0):
msg = "Jacobian close to zero. Check the shape of your elements!"
raise ValueError(msg)
jaco_inv = np.linalg.inv(jaco)
if det < 0.0:
msg = "Jacobian is negative. Check your elements orientation!"
raise ValueError(msg)
return det, jaco_inv
#%% Material routines
def umat(nu, E):
"""2D Elasticity consitutive matrix in plane stress
For plane strain use effective properties.
Parameters
----------
nu : float
Poisson coefficient (-1, 0.5).
E : float
Young modulus (>0).
Returns
-------
C : ndarray
Constitutive tensor in Voigt notation.
Examples
--------
>>> C = umat(1/3, 8/3)
>>> C_ex = np.array([
... [3, 1, 0],
... [1, 3, 0],
... [0, 0, 1]])
>>> np.allclose(C, C_ex)
True
"""
C = np.zeros((3, 3))
enu = E/(1 - nu**2)
mnu = (1 - nu)/2
C[0, 0] = enu
C[0, 1] = nu*enu
C[1, 0] = C[0, 1]
C[1, 1] = enu
C[2, 2] = enu*mnu
return C
#%% Elemental strains
def str_el4(coord, ul):
"""Compute the strains at each element integration point
This one is used for 4-noded quadrilateral elements.
Parameters
----------
coord : ndarray
Coordinates of the nodes of the element (4, 2).
ul : ndarray
Array with displacements for the element.
Returns
-------
epsGT : ndarray
Strain components for the Gauss points.
xl : ndarray
Configuration of the Gauss points after deformation.
"""
epsl = np.zeros([3])
epsG = np.zeros([3, 4])
xl = np.zeros([4, 2])
XW, XP = gau.gpoints2x2()
for i in range(4):
ri = XP[i, 0]
si = XP[i, 1]
ddet, B = stdm4NQ(ri, si, coord)
epsl = | np.dot(B, ul) | numpy.dot |
"""
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
Author: <NAME> <<EMAIL>>
Copyright (C) European X-Ray Free-Electron Laser Facility GmbH.
All rights reserved.
"""
import unittest
from unittest.mock import MagicMock, patch
import numpy as np
from extra_foam.pipeline.processors.image_processor import ImageProcessor
from extra_foam.pipeline.exceptions import ImageProcessingError, ProcessingError
from extra_foam.pipeline.tests import _TestDataMixin
class TestImageProcessorTr(_TestDataMixin, unittest.TestCase):
"""Test pulse-resolved ImageProcessor.
For train-resolved data.
"""
def setUp(self):
self._proc = ImageProcessor()
self._proc._ref_sub.update = MagicMock(side_effect=lambda x: x) # no redis server
self._proc._cal_sub.update = MagicMock(
side_effect=lambda x, y: (False, x, False, y)) # no redis server
self._proc._threshold_mask = (-100, 100)
del self._proc._dark
def testPulseSlice(self):
# The sliced_indices for train-resolved data should always be [0]
data, processed = self.data_with_assembled(1, (2, 2))
self._proc.process(data)
# FIXME
# np.testing.assert_array_equal(data['assembled']['data'], processed.image.images)
self.assertIsInstance(processed.image.images, list)
self.assertListEqual([0], processed.image.sliced_indices)
# set a slicer
self._proc._pulse_slicer = slice(0, 2)
self._proc.process(data)
# FIXME
# np.testing.assert_array_equal(data['assembled']['data'], processed.image.images)
self.assertListEqual([0], processed.image.sliced_indices)
def testDarkRecordingAndSubtraction(self):
# TODO: add tests
pass
def testImageShapeChangeOnTheFly(self):
proc = self._proc
proc._image_mask = np.ones((2, 2), dtype=np.bool)
data, _ = self.data_with_assembled(1, (2, 2))
proc.process(data)
# image shape changes
with self.assertRaisesRegex(ImageProcessingError, 'image mask'):
data, _ = self.data_with_assembled(2, (4, 2))
proc.process(data)
# image mask remains the same, one needs to clear it by hand
np.testing.assert_array_equal(np.ones((2, 2), dtype=np.bool), proc._image_mask)
proc._image_mask = None
# assign a reference image
proc._reference = np.ones((4, 2), dtype=np.float32)
# image shape changes
with self.assertRaisesRegex(ImageProcessingError, 'reference'):
data, _ = self.data_with_assembled(3, (2, 2))
proc.process(data)
# image mask remains the same, one needs to clear it by hand
np.testing.assert_array_equal(np.ones((4, 2), dtype=np.float32), proc._reference)
proc._reference = None
class TestImageProcessorPr(_TestDataMixin, unittest.TestCase):
"""Test pulse-resolved ImageProcessor.
For pulse-resolved data.
"""
def setUp(self):
self._proc = ImageProcessor()
self._proc._ref_sub.update = MagicMock(side_effect=lambda x: x) # no redis server
self._proc._cal_sub.update = MagicMock(
side_effect=lambda x, y: (False, x, False, y)) # no redis server
del self._proc._dark
self._proc._threshold_mask = (-100, 100)
def testDarkRecordingAndSubtraction(self):
self._proc._recording_dark = True
# -----------------------------
# test without dark subtraction
# -----------------------------
self._proc._dark_as_offset = False
data, processed = self.data_with_assembled(1, (4, 2, 2))
dark_run_gt = data['assembled']['data'].copy()
self._proc.process(data)
np.testing.assert_array_almost_equal(dark_run_gt, self._proc._dark)
np.testing.assert_array_almost_equal(
np.nanmean(dark_run_gt, axis=0), self._proc._dark_mean)
# test moving average is going on
data, processed = self.data_with_assembled(1, (4, 2, 2))
assembled_gt = data['assembled']['data'].copy()
dark_run_gt = (dark_run_gt + assembled_gt) / 2.0
self._proc.process(data)
np.testing.assert_array_almost_equal(dark_run_gt, self._proc._dark)
np.testing.assert_array_almost_equal(
np.nanmean(dark_run_gt, axis=0), self._proc._dark_mean)
# test 'assembled' is not subtracted by dark
np.testing.assert_array_almost_equal(data['assembled']['data'], assembled_gt)
# --------------------------
# test with dark subtraction
# --------------------------
self._proc._dark_as_offset = True
del self._proc._dark
self._proc._dark_mean = None
data, processed = self.data_with_assembled(1, (4, 2, 2))
dark_run_gt = data['assembled']['data'].copy()
assembled_gt = dark_run_gt
self._proc.process(data)
np.testing.assert_array_almost_equal(dark_run_gt, self._proc._dark)
np.testing.assert_array_almost_equal(
np.nanmean(dark_run_gt, axis=0), self._proc._dark_mean)
# test 'assembled' is dark run subtracted
np.testing.assert_array_almost_equal(
data['assembled']['data'], assembled_gt - self._proc._dark)
data, processed = self.data_with_assembled(1, (4, 2, 2))
assembled_gt = data['assembled']['data'].copy()
dark_run_gt = (dark_run_gt + assembled_gt) / 2.0
self._proc.process(data)
| np.testing.assert_array_almost_equal(dark_run_gt, self._proc._dark) | numpy.testing.assert_array_almost_equal |
from cgitb import grey
from matplotlib.pyplot import axis, close, hot
import numpy as np
from numpy.random import default_rng
import random
import copy
import math
class ImageManipulator:
def __init__(self):
self._rng = default_rng(seed=42)
def salt_pepper_noise(self, gray_img, ratio):
noise = self._rng.choice(
[-1, 0, 255], size=gray_img.shape, p=[1 - ratio, ratio / 2, ratio / 2]
)
np.copyto(noise, gray_img, where=noise == -1)
gray_img = noise.astype(np.uint8)
return gray_img
def gaussian_noise(self, gray_img, mean, std):
noise = self._rng.normal(loc=mean, scale=std, size=gray_img.shape)
gray_img = gray_img + noise
gray_img = np.clip(gray_img, 0, 255)
gray_img = | np.rint(gray_img) | numpy.rint |
import os
import random
from imutils import paths
import numpy as np
import pandas as pd
import skimage as sk
import skimage.transform
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from tensorflow.keras.utils import to_categorical
import config
def import_minimias_dataset(data_dir: str, label_encoder) -> (np.ndarray, np.ndarray):
"""
Import the dataset by pre-processing the images and encoding the labels.
:param data_dir: Directory to the mini-MIAS images.
:param label_encoder: The label encoder.
:return: Two NumPy arrays, one for the processed images and one for the encoded labels.
"""
# Initialise variables.
images = list()
labels = list()
# Loop over the image paths and update the data and labels lists with the pre-processed images & labels.
for image_path in list(paths.list_images(data_dir)):
images.append(preprocess_image(image_path))
labels.append(image_path.split(os.path.sep)[-2]) # Extract label from path.
# Convert the data and labels lists to NumPy arrays.
images = np.array(images, dtype="float32") # Convert images to a batch.
labels = np.array(labels)
# Encode labels.
labels = encode_labels(labels, label_encoder)
return images, labels
def import_cbisddsm_training_dataset(label_encoder):
"""
Import the dataset getting the image paths (downloaded on BigTMP) and encoding the labels.
:param label_encoder: The label encoder.
:return: Two arrays, one for the image paths and one for the encoded labels.
"""
df = pd.read_csv("./test_mamo1/data/CBIS-DDSM/training.csv")
list_IDs = df['img_path'].values
labels = encode_labels(df['label'].values, label_encoder)
return list_IDs, labels
def preprocess_image(image_path: str) -> np.ndarray:
"""
Pre-processing steps:
* Load the input image in grayscale mode (1 channel),
* resize it to 224x224 pixels for the VGG19 CNN model,
* transform it to an array format,
* normalise the pixel intensities.
:param image_path: The path to the image to preprocess.
:return: The pre-processed image in NumPy array format.
"""
image = load_img(image_path,
color_mode="grayscale",
target_size=(config.VGG_IMG_SIZE['HEIGHT'], config.VGG_IMG_SIZE["WIDTH"]))
image = img_to_array(image)
image /= 255.0
return image
def encode_labels(labels_list: np.ndarray, label_encoder) -> np.ndarray:
"""
Encode labels using one-hot encoding.
:param label_encoder: The label encoder.
:param labels_list: The list of labels in NumPy array format.
:return: The encoded list of labels in NumPy array format.
"""
labels = label_encoder.fit_transform(labels_list)
if label_encoder.classes_.size == 2:
return labels
else:
return to_categorical(labels)
def dataset_stratified_split(split: float, dataset: np.ndarray, labels: np.ndarray) -> \
(np.ndarray, np.ndarray, np.ndarray, np.ndarray):
"""
Partition the data into training and testing splits. Stratify the split to keep the same class distribution in both
sets and shuffle the order to avoid having imbalanced splits.
:param split: Dataset split (e.g. if 0.2 is passed, then the dataset is split in 80%/20%).
:param dataset: The dataset of pre-processed images.
:param labels: The list of labels.
:return: the training and testing sets split in input (X) and label (Y).
"""
train_X, test_X, train_Y, test_Y = train_test_split(dataset,
labels,
test_size=split,
stratify=labels,
random_state=config.RANDOM_SEED,
shuffle=True)
return train_X, test_X, train_Y, test_Y
def random_rotation(image_array: np.ndarray):
"""
Randomly rotate the image
:param image_array: input image
:return: randomly rotated image
"""
random_degree = random.uniform(-20, 20)
return sk.transform.rotate(image_array, random_degree)
def random_noise(image_array: np.ndarray):
"""
Add random noise to image
:param image_array: input image
:return: image with added random noise
"""
return sk.util.random_noise(image_array)
def horizontal_flip(image_array: np.ndarray):
"""
Flip image
:param image_array: input image
:return: horizantally flipped image
"""
return image_array[:, ::-1]
def generate_image_transforms(images, labels):
"""
oversample data by tranforming existing images
:param images: input images
:param labels: input labels
:return: updated list of images and labels with extra transformed images and labels
"""
images_with_transforms = images
labels_with_transforms = labels
available_transforms = {'rotate': random_rotation,
'noise': random_noise,
'horizontal_flip': horizontal_flip}
class_balance = get_class_balances(labels)
max_count = max(class_balance)
to_add = [max_count - i for i in class_balance]
for i in range(len(to_add)):
if int(to_add[i]) == 0:
continue
label = np.zeros(len(to_add))
label[i] = 1
indices = [j for j, x in enumerate(labels) if | np.array_equal(x, label) | numpy.array_equal |
# Activity 2: Evaluating Mean Inertia by Cluster After PCA Transformation
# Continuing from Exercise 7:
from sklearn.decomposition import PCA
model = PCA(n_components=best_n_components) # remember, best_n_components = 6
# fit model and transform scaled_features into best_n_components
df_pca = model.fit_transform(scaled_features)
# fit 100 models for each n_clusters 1-10
from sklearn.cluster import KMeans
import numpy as np
mean_inertia_list_PCA = [] # create a list for the average inertia at each n_clusters
for x in range(1, 11): # loop through n_clusters 1-10
inertia_list = [] # create a list for each individual inertia value at n_cluster
for i in range(100):
model = KMeans(n_clusters=x) # instantiate model
model.fit(df_pca) # fit model
inertia = model.inertia_ # get inertia
inertia_list.append(inertia) # append inertia to inertia_list
# moving to the outside loop
mean_inertia = | np.mean(inertia_list) | numpy.mean |
import os
import cv2
import numpy as np
import pandas as pd
np.set_printoptions(threshold=np.inf)
np.set_printoptions(suppress=True)
def case_study(path,r_tuple,g_tuple,b_tuple):
new_path = path.replace('pseudo_images','case_study')
# print(new_path)
if not os.path.exists(new_path):
os.makedirs(new_path)
for name in os.listdir(path):
img = cv2.imread(path+name)
img = cv2.resize(img, (600, 600), cv2.INTER_NEAREST)
r_min = r_tuple[0]
r_max = r_tuple[1]
g_min = g_tuple[0]
g_max = g_tuple[1]
b_min = b_tuple[0]
b_max = b_tuple[1]
res = img.shape[0]
img_filter = | np.ones(shape=(600, 600, 3), dtype=np.uint8) | numpy.ones |
# -*- coding: utf-8 -*-
# @Time : 2021/3/31 14:51
# @Author : Gang
# @File : myCorrectationAnalysisModule.py
import sys, time
import configparser
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QMainWindow, QApplication, QMessageBox, QVBoxLayout, QFileDialog, QLineEdit
from matplotlib import cm, pyplot as plt
import matplotlib as mpl
from correlationFigure import *
import numpy as np
from gangLogger.myLog import MyLog
from gangUtils.generalUtils import GeneralUtils
from correlationConst import *
from ui_QWCorrelationAnalysisModule import Ui_QWCorrelationAnalysisModule
class QmyCorrelationAnalysisModule(QMainWindow):
logger = MyLog("QmyCorrelationAnalysisModule", BASEDIR)
def __init__(self, parent=None):
super(QmyCorrelationAnalysisModule, self).__init__(parent)
self.ui = Ui_QWCorrelationAnalysisModule()
self.ui.setupUi(self)
self.init_set()
self.init_widget()
def init_set(self):
self.keyPara = {}
self.keyPara["SAVE_DATA_STATUE"] = False # 数据保存标志位,初始化false,另外在点击run之后也应该设置false,绘图完成设置true
def init_widget(self):
self.checkConfig()
self.ui.actRun.setEnabled(False)
self.correlationLayout = QVBoxLayout(self)
self.createFigure()
# =============== 控件触发函数===============
@pyqtSlot()
def on_actQuit_triggered(self):
self.close()
def closeEvent(self, event):
"""
重写窗口关闭函数,关闭前保存面板参数
:param event: 无
:return: 无
"""
dlg_title = "Warning"
str_info = "Sure to quit?"
reply = QMessageBox.question(self, dlg_title, str_info,
QMessageBox.Yes | QMessageBox.Cancel,
QMessageBox.Cancel)
if reply == QMessageBox.Yes:
self.saveConfigPara()
time.sleep(0.1)
self.logger.debug("Program exits")
event.accept()
else:
event.ignore()
@pyqtSlot()
def on_actOpenFiles_triggered(self):
"""
文件加载,注意这里加载的是经过处理之后的已经切分好的单条曲线!!
:return:
"""
try:
dlgTitle = "Select a single_trace data file" # 对话框标题
filt = "npz Files(*.npz)" # 文件过滤器
desktopPath = GeneralUtils.getDesktopPath()
loadStatue = False
while not loadStatue:
filePath, _ = QFileDialog.getOpenFileName(self, dlgTitle, desktopPath, filt)
loadStatue = False if filePath == "" else True
if not loadStatue:
result = QMessageBox.warning(self, "Warning", "Please select a file!",
QMessageBox.Ok | QMessageBox.Cancel,
QMessageBox.Ok)
if result == QMessageBox.Cancel:
break
else:
# file load success!!!!
self.keyPara["FILE_PATH"] = filePath
logMsg = f"File loading succeeded:{filePath}"
self.addLogMsgWithBar(logMsg)
self.ui.actRun.setEnabled(True)
except Exception as e:
errMsg = f"DATA FILE LOAD ERROR:{e}"
self.addErrorMsgWithBox(errMsg)
@pyqtSlot()
def on_actRun_triggered(self):
try:
self.ui.actRun.setEnabled(False)
self.keyPara["SAVE_DATA_STATUE"] = False
keyPara = self.get_panel_para()
if keyPara is None:
return
else:
self.keyPara.update(keyPara)
self.logger.debug(f"Parameters are updated before running. Parameter list:{self.keyPara}")
self.draw_fig()
except Exception as e:
errMsg = f"RUN ERROR :{e}"
self.addErrorMsgWithBox(errMsg)
finally:
self.ui.actRun.setEnabled(True)
@pyqtSlot()
def on_actSaveData_triggered(self):
try:
preCheck = self.savePreCheck()
if preCheck:
self.saveFig()
# self.saveData()
# TODO
# 此处的数据保存先暂停,因为具体保存成什么格式?怎么保存?
# 后面确定了在保存!!!
except Exception as e:
errMsg = f"DATA SAVE ERROR:{e}"
self.addErrorMsgWithBox(errMsg)
# =============== 控件触发函数===============
def checkConfig(self):
"""
检查参数
:return:
"""
configPath = os.path.join(BASEDIR, "config.ini")
if os.path.exists(configPath):
dlgTitle = "Info"
strInfo = "Config file detected. Load it??"
reply = QMessageBox.question(self, dlgTitle, strInfo,
QMessageBox.Yes | QMessageBox.No,
QMessageBox.Yes)
if reply == QMessageBox.Yes:
self.getLastPara()
def saveFig(self):
saveFolderPath = self.keyPara["SAVE_FOLDER_PATH"]
imgPath = os.path.join(saveFolderPath, "Correlation.png")
if os.path.exists(imgPath):
curTime = time.strftime("%Y-%m-%d_%H_%M", time.localtime())
imgPath = os.path.join(saveFolderPath, f"Correlation{curTime}.png")
self.fig.savefig(imgPath, dpi=300, bbox_inches='tight')
logMsg = f"Images have been saved to {saveFolderPath}"
self.addLogMsgWithBar(logMsg)
def draw_fig(self):
VMAX = self.keyPara["le_Vmax"]
VMIN = self.keyPara["le_Vmin"]
COLORMAP = self.keyPara["cmb_ColorMap"]
BINS = int(self.keyPara["le_Bins"])
COND_HIGH = self.keyPara["le_CondHigh"]
COND_LOW = self.keyPara["le_CondLow"]
DPI = int(self.keyPara["le_Fig_dpi"])
FONTSIZE = 12
filePath = self.keyPara["FILE_PATH"]
dataset = | np.load(filePath) | numpy.load |
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
import pandas as pd
from tkinter.ttk import Combobox
from tkinter import StringVar
import numpy as np
from ttkthemes import ThemedStyle
from tkinter import ttk
import tkinter as tk
import matplotlib.pyplot as plt
from matplotlib import style
import matplotlib.animation as animation
from matplotlib.figure import Figure
import matplotlib
from datetime import datetime
matplotlib.use("TkAgg")
from PIL import Image, ImageTk
import os
import pathlib
cwd=pathlib.Path(__file__).parent.absolute()
file_list = os.listdir('C:\\UteriAN\\CSV')
sorted_files = sorted(file_list)
last_file = len(sorted_files)
A = 'C:\\UteriAN\\CSV\\' + str(sorted_files[last_file-1])
print(A)
now=datetime.now()
new_date = now.strftime("%d/%m/%Y , %H:%M:%S")
LARGE_FONT= ("Verdana", 12)
style.use("dark_background")
f = Figure(figsize=(5,5), dpi=100)
a = f.add_subplot(111)
f1 = Figure(figsize=(5,5), dpi=100)
a1 = f1.add_subplot(211)
a2 = f1.add_subplot(212)
f2 = Figure(figsize=(5,5), dpi=80)
a3 = f2.add_subplot(421)
a4 = f2.add_subplot(422)
a5 = f2.add_subplot(423)
a6 = f2.add_subplot(424)
a7 = f2.add_subplot(425)
a8 = f2.add_subplot(426)
a9 = f2.add_subplot(427)
a10 = f2.add_subplot(428)
def getParam(wantedParam):
switcher = {
'numOfChannels': 4, # number of channels (coloumns) to use from the data file (max 8)
'numShow': 6000, # number of samples to show each iteration (6000 => 10 min when sample rate = 0.1)
'epsilon': 0.01, # small number to add when numerator is very small
'firstValues': 10, # choose number of first values to mean
'lastValues': 5, # choose number of last values to mean
'samplingRate': 0.1, # 0.1 => 10 samples each sec
'filename': 'C:\\UteriAN\\CSV\\' + str(sorted_files[last_file-1]) , # path to file + file name of the data
}
return switcher.get(wantedParam)
def animate(i):
file_name = getParam('filename') # path to file + file name
df = pd.read_csv(file_name)
k=getParam('numOfChannels') # choose number of columns (channels)
numShow = getParam('numShow')
samplingRate = getParam('samplingRate')
factor = []
meanList = []
if k == 1:
factor = 1.05
if k != 1:
factor = 3
for j in range (0,len(df)):
if df.iloc[j,0:k].count() == k:
meanList.append(np.mean(df.iloc[j,0:k]))
time1 = np.arange(0,len(meanList)*samplingRate,samplingRate)
startTime = time1
meanListNew = meanList
if len(meanList)>numShow:
startTime = time1[(len(meanList)-numShow):len(meanList)]
meanListNew = meanList[(len(meanList)-numShow):len(meanList)]
a.clear()
a.title.set_text('First Plot')
a.grid(True, linewidth=0.3, color='white', linestyle='-')
a.set_xlabel('Time [sec]')
a.set_ylabel('Potential [mv]')
if len(meanList)!= 0 and file_name == str(cwd) + '\\TestWithMatlab.csv':
a.axis(xmin=0,xmax=600,ymin=-1.05,ymax=1.05) # set xlim and ylim
if len(meanList) != 0 and file_name == str(cwd) + '\\ExampleDataFile.csv':
a.axis(xmin=0,xmax=600,ymin=meanList[0]/1.05,ymax=meanList[0]*factor) # set xlim and ylim
if len(meanList) != 0 and file_name == 'C:\\UteriAN\\CSV\\' + str(sorted_files[last_file-1]):
a.axis(xmin=0,xmax=600,ymin=meanList[0]/1.05,ymax=meanList[0]*factor) # set xlim and ylim
if file_name == 'C:\\UteriAN\\CSV\\' + str(sorted_files[last_file-1]) and len(meanListNew) != 0 and len(meanListNew) < 20:
f.canvas.draw()
print('!')
return a.plot(startTime,meanListNew)
def animate2(i):
file_name = getParam('filename') # path to file + file name
df = pd.read_csv(file_name)
k=getParam('numOfChannels') # choose number of columns (channels)
eps = getParam('epsilon')
numShow = getParam('numShow') # 10 min
samplingRate = getParam('samplingRate')
relative = []
first = []
time2 = []
n = getParam('firstValues') # choose number of first values
m = getParam('lastValues') # choose number of last values
if df.iloc[:,k-1].count() < n:
relative = []
if (df.iloc[:,k-1].count() > n and df.iloc[:,k-1].count() < (n+m)):
first = np.mean(df.iloc[0:n,0:k])
for j in range (n,df.iloc[:,k-1].count()):
if df.iloc[j,0:k].count() == k:
relative.append(np.sum(abs((df.iloc[j,0:k]-first)/first)))
if (abs(relative[len(relative)-1]/(relative[len(relative)-2]+0.00001)) > 10):
relative[len(relative)-1] = np.sum(abs(((df.iloc[j,0:k]-first)/(first + eps))))
if df.iloc[:,k-1].count() > (n+m):
first = np.mean(df.iloc[0:n,0:k])
for i in range (n+m+1,df.iloc[:,k-1].count()+1):
if df.iloc[i-1,0:k].count() == k:
relative.append(np.sum(abs((np.mean(df.iloc[i-m:i-1,0:k])-first)/first)))
if (abs(relative[len(relative)-1]/(relative[len(relative)-2]+0.00001)) > 10):
relative[len(relative)-1] = np.sum(abs((np.mean(df.iloc[i-m:i-1,0:k])-first)/(first + eps)))
time2 = np.arange(samplingRate,(len(relative)+1)*samplingRate,samplingRate)
startTime = []
relativeNew = []
if df.iloc[:,k-1].count()-(n+m) <= numShow:
startTime = time2[0:df.iloc[:,k-1].count()-(n+m)]
relativeNew = relative[0:df.iloc[:,k-1].count()-(n+m)]
elif df.iloc[:,k-1].count()-(n+m) > numShow:
startTime = time2[(df.iloc[:,k-1].count()-(n+m)-numShow):df.iloc[:,k-1].count()-(n+m)]
relativeNew = relative[(df.iloc[:,k-1].count()-(n+m)-numShow):df.iloc[:,k-1].count()-(n+m)]
a1.clear()
a1.set_title('Sum Based Relative',size=10)
a1.xaxis.set_ticklabels([])
a1.grid(True, linewidth=0.3, color='white', linestyle='-')
a1.set_ylabel('Relative Potential [A.U.]')
a1.axis(xmin=0,xmax=600,ymin=-0.5,ymax=k*2.15) # set xlim and ylim
return a1.plot(startTime,relativeNew,'turquoise')
def animate4(i):
file_name = getParam('filename') # path to file + file name
df = pd.read_csv(file_name)
k=getParam('numOfChannels') # choose number of columns (channels)
numShow = getParam('numShow') # 10 min
samplingRate = getParam('samplingRate')
absolute = []
first = []
time2 = []
n = getParam('firstValues') # choose number of first values
m = getParam('lastValues') # choose number of last values
factor = []
if k == 1 or k==7 or k==8:
factor = 3
elif k == 2 or k==3:
factor = 500
elif k == 4 or k==5:
factor = 250
elif k == 6:
factor = 200
if df.iloc[:,k-1].count() < n:
absolute = []
if (df.iloc[:,k-1].count() > n and df.iloc[:,k-1].count() < (n+m)):
first = np.mean(df.iloc[0:n,0:k])
for j in range (n,df.iloc[:,k-1].count()):
if df.iloc[j,0:k].count() == k:
absolute.append(np.sum(abs(df.iloc[j,0:k]-first)))
if df.iloc[:,k-1].count() > (n+m):
first = | np.mean(df.iloc[0:n,0:k]) | numpy.mean |
"""
utils.py including misc functions, e.g.,
1 the class of matrix decomposition method
2 the class of clusterring method
3 some plot figure operators
4 Image preconditioning method for generating random image
using different distribution,
and decorrelate image color space.
"""
from lucid.optvis.param.resize_bilinear_nd import resize_bilinear_nd
import matplotlib.pyplot as plt
# from skimage import data, color
# from skimage.transform import rescale, resize, downscale_local_mean
# import os
# import imageio
# from operator import itemgetter
from re import findall
# import umap
import numpy as np
import sklearn.decomposition
import sklearn.cluster
# from sklearn.utils import check_array
import tensorflow as tf
from decorator import decorator
import lucid.optvis.objectives as objectives
def _make_arg_str(arg):
arg = str(arg)
too_big = len(arg) > 15 or "\n" in arg
return "..." if too_big else arg
@decorator
def wrap_objective(f, *args, **kwds):
"""Decorator for creating Objective factories.
Changes f from the closure: (args) => () => TF Tensor
into an Obejective factory: (args) => Objective
while perserving function name, arg info, docs... for interactive python.
"""
objective_func = f(*args, **kwds)
objective_name = f.__name__
args_str = " [" + ", ".join([_make_arg_str(arg) for arg in args]) + "]"
description = objective_name.title() + args_str
return objectives.Objective(objective_func, objective_name, description)
def _dot_attr_actmaps(x, y):
xy_dot = tf.reduce_sum(x * y, -1)
return tf.reduce_mean(xy_dot)
@wrap_objective
def dot_attr_actmaps(layer, attr, batch=None):
"""Loss func to compute the dot of attribution and activation maps"""
if batch is None:
attr = attr[None, None, None]
return lambda T: _dot_attr_actmaps(T(layer), attr)
else:
attr = attr[None, None]
return lambda T: _dot_attr_actmaps(T(layer)[batch], attr)
class MatrixDecomposer(object):
"""For Matrix Decomposition to the innermost dimension of a tensor.
This class wraps sklearn.decomposition classes to help them apply to arbitrary
rank tensors. It saves lots of annoying reshaping.
See the original sklearn.decomposition documentation:
http://scikit-learn.org/stable/modules/classes.html#module-sklearn.decomposition
"""
def __init__(self, n_features=3, reduction_alg=None, **kwargs):
"""Constructor for MatrixDecomposer.
Inputs:
n_features: Numer of dimensions to reduce inner most dimension to.
reduction_alg: A string or sklearn.decomposition class.
kwargs: Additional kwargs to be passed on to the reducer.
"""
if isinstance(reduction_alg, str):
reduction_alg = sklearn.decomposition.__getattribute__(reduction_alg)
self.n_features = n_features
self._decomposer = reduction_alg(n_features, **kwargs)
@classmethod
def _apply_flat(cls, f, acts):
"""Utility for applying f to inner dimension of acts.
Flattens acts into a 2D tensor, applies f, then unflattens so that all
dimesnions except innermost are unchanged.
"""
orig_shape = acts.shape
acts_flat = acts.reshape([-1, acts.shape[-1]])
new_flat = f(acts_flat)
if not isinstance(new_flat, np.ndarray):
return new_flat
shape = list(orig_shape[:-1]) + [-1]
return new_flat.reshape(shape)
@classmethod
def prec_apply_sum(cls, f):
"""Utility for applying f to inner dimension of acts.
Flattens acts into a 2D tensor, applies f, then unflattens so that all
dimesnions except innermost are unchanged.
"""
new_flat = f()
new_flat = np.sum(new_flat)
return new_flat
def get_precision(self):
return MatrixDecomposer.prec_apply_sum(self._decomposer.get_precision)
def get_score(self, AM, W):
W = np.reshape(W, (-1, W.shape[-1]))
prediction = np.dot(W, self._decomposer.components_)
# prediction = self._decomposer.inverse_transform(W)
prediction = np.reshape(prediction, (-1, prediction.shape[-1]))
AM = np.reshape(AM, (-1, AM.shape[-1]))
score = sklearn.metrics.explained_variance_score(AM, prediction)
return score
def fit(self, acts):
return MatrixDecomposer._apply_flat(self._decomposer.fit, acts)
def fit_transform(self, acts):
return MatrixDecomposer._apply_flat(self._decomposer.fit_transform, acts)
def transform(self, acts):
return MatrixDecomposer._apply_flat(self._decomposer.transform, acts)
# def transform(self, X):
# """
# E-step to compute transform X, or factors
# for factor analysis
# """
# orig_shape = X.shape
# X_flat = X.reshape([-1, X.shape[-1]])
# X_flat = check_array(X_flat)
# X_flat = X_flat - self._decomposer.mean_
# I = np.eye(len(self._decomposer.components_))
# temp = self._decomposer.components_ / self._decomposer.noise_variance_
# sigma = np.linalg.inv(I + np.dot(temp, self._decomposer.components_.T))
# X_transformed = np.dot(np.dot(X_flat, temp.T), sigma)
# shape = list(orig_shape[:-1]) + [-1]
# return X_transformed.reshape(shape)
class SklearnCluster(object):
"""Helper for clustering to the innermost dimension of a tensor.
This class wraps sklearn.cluster classes to help them apply to arbitrary
rank tensors. It saves lots of annoying reshaping.
See the original sklearn.decomposition documentation:
https://scikit-learn.org/stable/modules/classes.html#module-sklearn.cluster
"""
def __init__(self, n_clusters=6, reduction_alg="KMeans", **kwargs):
"""Constructor for SklearnCluster.
Inputs:
n_features: Numer of dimensions to reduce inner most dimension to.
reduction_alg: A string or sklearn.decomposition class. Defaults to
"KMeans"
kwargs: Additional kwargs to be passed on to the reducer.
"""
if isinstance(reduction_alg, str):
reduction_alg = sklearn.cluster.__getattribute__(reduction_alg)
self.n_clusters = n_clusters
self._decomposer = reduction_alg(n_clusters, **kwargs)
@classmethod
def _apply_flat(cls, f, acts):
"""Utility for applying f to inner dimension of acts.
Flattens acts into a 2D tensor, applies f, then unflattens so that all
dimesnions except innermost are unchanged.
"""
acts_flat = acts.reshape([-1, acts.shape[-1]])
acts_flat = np.transpose(acts_flat, (1, 0))
labels = f(acts_flat)
return labels
def fit_predict(self, acts):
return SklearnCluster._apply_flat(self._decomposer.fit_predict, acts)
def __dir__(self):
dynamic_attrs = dir(self._decomposer)
return self.__dict__.keys()
def save_imgs(images, save_directory, attr_class, factorization_method
, no_slash_layer_name, imgtype_name='opt'):
for i_optimgs in range(len(images)):
if len(images[i_optimgs]) > 1:
images_temp = images[i_optimgs]
w = int(np.sqrt(images_temp.size / 3))
img = images_temp.reshape(w, w, 3)
factorization_method = findall('[A-Z]', factorization_method)
factorization_method = ''.join(factorization_method)
plt.imsave(save_directory + "/" + attr_class + '_' + factorization_method + '_' +
no_slash_layer_name + '_' + imgtype_name + str(i_optimgs) + ".jpg", img)
def save_imgs_seperate_vis(images, save_directory, attr_class, factorization_method
, no_slash_layer_name, channel_shap_one, vis_channel_index=None):
for i_optimgs in range(len(images)):
if len(images[i_optimgs]) > 1:
images_temp = images[i_optimgs]
w = int(np.sqrt(images_temp.size / 3))
img = images_temp.reshape(w, w, 3)
factorization_method = findall('[A-Z]', factorization_method)
factorization_method = ''.join(factorization_method)
plt.imsave(save_directory + '/' + channel_shap_one[i_optimgs] + attr_class + '_' + factorization_method + '_' +
no_slash_layer_name + str(vis_channel_index[i_optimgs][0]) + '.jpg', img)
def plot(data, save_directory, attr_class, factorization_method,
no_slash_layer_name, imgtype_name, index_saveimg, xi=None, cmap='RdBu_r', cmap2='seismic', alpha=0.8):
plt.ioff()
# plt.ion()
fig = plt.figure(1, figsize=[2.24, 2.24], dpi=100, frameon=False)
axis = plt.Axes(fig, [0., 0., 1., 1.])
axis.set_axis_off()
fig.add_axes(axis)
dx, dy = 0.05, 0.05
xx = np.arange(0.0, data.shape[1]+dx, dx)
yy = np.arange(0.0, data.shape[0]+dy, dy)
xmin, xmax, ymin, ymax = np.amin(xx), np.amax(xx), np.amin(yy), np.amax(yy)
extent = xmin, xmax, ymin, ymax
cmap_xi = plt.get_cmap(cmap2)
cmap_xi.set_bad(alpha=0)
overlay = xi
if len(data.shape) == 3:
data = np.mean(data, 2)
# axis.imshow(data, extent=extent, interpolation='none', cmap=cmap, vmin=-abs_min, vmax=abs_max)
axis.imshow(data, extent=extent, interpolation='none', cmap=cmap)
axis.imshow(overlay, extent=extent, interpolation='none', cmap=cmap_xi, alpha=alpha)
factorization_method = findall('[A-Z]', factorization_method)
factorization_method = ''.join(factorization_method)
plt.savefig(save_directory + '/' + attr_class + '_' + factorization_method + '_' +
no_slash_layer_name + '_' + imgtype_name + str(index_saveimg) + '.jpg') # 'RdBu_r' 'hot'
# plt.show()
# plt.close(1)
def plot_seperate(data, save_directory, attr_class, factorization_method,
no_slash_layer_name, imgtype_name, score_str, index_num, xi=None, cmap='RdBu_r', alpha=0.8):
plt.ioff()
# plt.ion()
fig = plt.figure(1, figsize=[2.24, 2.24], dpi=100, frameon=False)
axis = plt.Axes(fig, [0., 0., 1., 1.])
axis.set_axis_off()
fig.add_axes(axis)
dx, dy = 0.05, 0.05
xx = np.arange(0.0, data.shape[1]+dx, dx)
yy = np.arange(0.0, data.shape[0]+dy, dy)
xmin, xmax, ymin, ymax = | np.amin(xx) | numpy.amin |
from collections import Counter
import numpy as np
import pandas as pd
from src.data.loaders.ascad import ASCADData
class TraceCategory:
def __init__(self, trace_category, trace_range):
self.t_range = trace_range
self.traces = np.array(trace_category["traces"])
self.labels = np.array(trace_category["labels"])
self.hamming_weights = np.array(trace_category["hamming_weights"])
self.unmasked_hw = np.array(trace_category["hw_unmasked"])
# 3rd state byte after 1st round SBox
self.aes_r1b3 = np.array(trace_category["aes_r1b3"])
self.tk_cache = {}
self.hw_cache = {}
self.ct_cache = {}
# Take the Hamming Weight of the third state byte.
self.hw_target_byte = 2
# Take Hamming Weight of the state after SBox from the first round.
self.hw_target_round = 0
self.hw_target = 0
def filter_by_key(self, key):
"""
Filters traces by a given first key byte.
"""
if key not in self.tk_cache:
ixs = np.where( | np.array(self.labels) | numpy.array |
# This file is part of the P3IV Simulator (https://github.com/fzi-forschungszentrum-informatik/P3IV),
# copyright by FZI Forschungszentrum Informatik, licensed under the BSD-3 license (see LICENSE file in main directory)
import unittest
import numpy as np
import matplotlib.pyplot as plt
import logging
from p3iv_utils_probability.distributions import *
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def plot_gaussians(gs, sigma=2, title=""):
fig, ax = plt.subplots()
fig.suptitle(title)
for g in gs:
plot_gaussian_new(g, sigma=sigma, ax=ax)
plt.show()
def plot_gaussian_new(g, sigma=2, ax=None):
color = np.random.rand(
sigma + 1,
)
x = np.arange(len(g.mean))
ax.plot(x, g.mean, linestyle="--", color=color)
for s in range(1, sigma + 1):
upper_bound = g.upper_bound(s)
lower_bound = g.lower_bound(s)
valid_ = upper_bound > lower_bound
ax.fill_between(x[valid_], lower_bound[valid_], upper_bound[valid_], facecolors=color, alpha=0.3)
class TestBasics(unittest.TestCase):
def test_univariate_float(self):
m = 1.0
v = 5.0
test = UnivariateNormalDistribution(mean=m, covariance=v)
r = test.range(2)
self.assertEqual(r.shape, (2,))
self.assertAlmostEqual(np.sum(r - np.array([-9.0, 11.0])), 0.0)
self.assertAlmostEqual(test.pdf(2)[0], 0.161434225872)
self.assertAlmostEqual(test.cdf(1)[0], 0.5)
def test_truncated_univariate_float(self):
m = 1.0
v = 5.0
test = TruncatedUnivariateNormalDistribution(mean=m, covariance=v, lower_truncation=0, upper_truncation=4)
r = test.range(2)
self.assertAlmostEqual(np.sum(r - np.array([0.0, 4.0])), 0.0)
def test_univariate_array(self):
m = np.array([1])
v = np.array([5])
test = UnivariateNormalDistribution(mean=m, covariance=v)
r = test.range(2)
self.assertEqual(r[1], np.array([11.0]))
self.assertEqual(r[0], np.array([-9.0]))
def test_univariate_seq(self):
test = UnivariateNormalDistributionSequence()
test.resize(100)
test.mean = np.arange(100)
test.covariance = np.linspace(0.1, 10, 100)
t = test[:5]
r = t.range(2)
upper = r[:, 1]
lower = r[:, 0]
self.assertAlmostEqual(np.sum(upper - np.asarray([0.2, 1.4, 2.6, 3.8, 5.0])), 0.0)
self.assertAlmostEqual(np.sum(lower - np.asarray([-0.2, 0.6, 1.4, 2.2, 3.0])), 0.0)
title = "UnivariateNormalDistributionSequence"
# plot_gaussians([test], sigma=3, title=title)
def test_univariate_seq_append(self):
title = "UnivariateNormalDistributionSequence"
test = UnivariateNormalDistributionSequence()
test.resize(50)
test.mean = np.arange(50)
test.covariance = np.linspace(0.1, 10, 50)
test2 = UnivariateNormalDistributionSequence()
test2.resize(50)
test2.mean = np.arange(50, 100)
test2.covariance = np.linspace(0.1, 10, 50)
test.append(test2)
t = test[:5]
r = t.range(2)
upper = r[:, 1]
lower = r[:, 0]
self.assertAlmostEqual(np.sum(t.mean - np.asarray([0, 1, 2, 3, 4])), 0.0)
self.assertAlmostEqual(
np.sum(t.covariance - np.asarray([0.1, 0.30204082, 0.50408163, 0.70612245, 0.90816327])), 0.0
)
self.assertAlmostEqual(np.sum(upper - np.asarray([0.2, 1.60408163, 3.00816327, 4.4122449, 5.81632653])), 0.0)
self.assertAlmostEqual(np.sum(lower - np.asarray([-0.2, 0.39591837, 0.99183673, 1.5877551, 2.18367347])), 0.0)
# plot_gaussians([test], sigma=3, title=title)
def test_bivariate(self):
m = np.array([[1, 2]])
v = np.array([[5, 0], [0, 1]])
test = BivariateNormalDistribution(mean=m, covariance=v)
r = test.range(2)
self.assertAlmostEqual(np.sum(r - np.asarray([1.0, 2.0, 0.0, 4.47213595, 2.0])), 0.0)
x = (1.5, 3, 4)
y = (3, 1, 5)
self.assertAlmostEqual(np.sum(test.pdf(x, y) - np.asarray([0.0421047, 0.02893811, 0.00032147])), 0.0)
self.assertAlmostEqual(test.cdf(1, 2), 0.25)
def test_truncated_bivariate(self):
m = np.array([[1, 2]])
v = np.array([[5, 0], [0, 1]])
tr_up = np.array([[7, 1], [4, 4]])
tr_lw = np.array([[0, 0], [0, 0]])
test = TruncatedBivariateNormalDistribution(
mean=m, covariance=v, upper_truncation=tr_up, lower_truncation=tr_lw
)
def test_bivariate_seq(self):
title = "BivariateNormalDistributionSequence"
m = np.array([[1, 0], [2, 2], [3, 3]])
v = np.array([[[5, 0], [0, 1]], [[3, 0], [0, 3]], [[1, 0], [0, 1]]])
test = BivariateNormalDistributionSequence()
test.resize(3)
test.mean = m
test.covariance = v
r = test.range(2)
self.assertAlmostEqual(np.sum(r[0] - np.asarray([1.0, 0.0, 0.0, 4.47213595, 2.0])), 0.0)
self.assertAlmostEqual(np.sum(r[1] - np.asarray([2.0, 2.0, 0.0, 3.46410162, 3.46410162])), 0.0)
self.assertAlmostEqual(np.sum(r[2] - np.asarray([3.0, 3.0, 0.0, 2.0, 2.0])), 0.0)
def test_bivariate_seq_mean(self):
title = "BivariateNormalDistributionSequence"
m = np.array([[1, 0], [2, 2], [3, 3]])
test = BivariateNormalDistributionSequence()
test.resize(3)
test.mean = m
t = test[1:]
r = t.range(2)
truth = | np.asarray([[2.0, 2.0, 0.0, 0.0, 0.0], [3.0, 3.0, 0.0, 0.0, 0.0]]) | numpy.asarray |
import unittest
import numpy as np
import openmdao.api as om
import numpy.testing as npt
import wisdem.commonse.environment as env
from wisdem.commonse import gravity as g
from openmdao.utils.assert_utils import assert_check_partials
npts = 100
myones = np.ones((npts,))
class TestPowerWind(unittest.TestCase):
def setUp(self):
self.params = {}
self.unknowns = {}
self.resid = None
self.params["shearExp"] = 2.0
self.params["Uref"] = 5.0
self.params["zref"] = 3.0
self.params["z0"] = 0.0
self.params["z"] = 9.0 * myones
self.wind = env.PowerWind(nPoints=npts)
def testRegular(self):
self.wind.compute(self.params, self.unknowns)
expect = 45.0 * myones
npt.assert_equal(self.unknowns["U"], expect)
def testIndex(self):
self.params["z"][1:] = -1.0
self.wind.compute(self.params, self.unknowns)
expect = 45.0 * myones
expect[1:] = 0.0
npt.assert_equal(self.unknowns["U"], expect)
def testZ0(self):
self.params["z0"] = 10.0
self.params["z"] += 10.0
self.params["zref"] += 10.0
self.wind.compute(self.params, self.unknowns)
expect = 45.0 * myones
npt.assert_equal(self.unknowns["U"], expect)
class TestLinearWaves(unittest.TestCase):
def setUp(self):
self.params = {}
self.unknowns = {}
self.resid = None
self.params["rho_water"] = 1e3
self.params["Hsig_wave"] = 2.0
self.params["Uc"] = 5.0
self.params["z_floor"] = -30.0
self.params["z_surface"] = 0.0
self.params["z"] = -2.0 * myones
self.wave = env.LinearWaves(nPoints=npts)
def testRegular(self):
D = np.abs(self.params["z_floor"])
k = 2.5
omega = np.sqrt(g * k * np.tanh(k * D))
self.params["Tsig_wave"] = 2.0 * np.pi / omega
self.wave.compute(self.params, self.unknowns)
a = 1.0 # 0.5*Hsig_wave
z = -2.0
rho = 1e3
U_exp = 5 + omega * a * np.cosh(k * (z + D)) / np.sinh(k * D)
W_exp = -omega * a * np.sinh(k * (z + D)) / np.sinh(k * D)
V_exp = np.sqrt(U_exp ** 2 + W_exp ** 2)
A_exp = omega * omega * a * np.cosh(k * (z + D)) / np.sinh(k * D)
p_exp = -rho * g * (z - a * np.cosh(k * (z + D)) / np.cosh(k * D))
npt.assert_almost_equal(self.unknowns["U"], U_exp)
npt.assert_almost_equal(self.unknowns["W"], W_exp)
npt.assert_almost_equal(self.unknowns["V"], V_exp)
npt.assert_almost_equal(self.unknowns["A"], A_exp)
npt.assert_almost_equal(self.unknowns["p"], p_exp)
# Positive depth input
self.params["z_floor"] = 30.0
self.wave.compute(self.params, self.unknowns)
npt.assert_almost_equal(self.unknowns["U"], U_exp)
npt.assert_almost_equal(self.unknowns["W"], W_exp)
npt.assert_almost_equal(self.unknowns["V"], V_exp)
npt.assert_almost_equal(self.unknowns["A"], A_exp)
npt.assert_almost_equal(self.unknowns["p"], p_exp)
def testPositiveZ(self):
self.params["Tsig_wave"] = 2.0
self.params["z"] = 2.0 * myones
self.wave.compute(self.params, self.unknowns)
npt.assert_equal(self.unknowns["U"], 0.0)
npt.assert_equal(self.unknowns["W"], 0.0)
npt.assert_equal(self.unknowns["V"], 0.0)
npt.assert_equal(self.unknowns["A"], 0.0)
npt.assert_equal(self.unknowns["p"], 0.0)
def testQuiet(self):
self.params["Hsig_wave"] = 0.0
self.params["Tsig_wave"] = 2.0
self.wave.compute(self.params, self.unknowns)
p_exp = 2e3 * g
npt.assert_equal(self.unknowns["U"], 5.0)
npt.assert_equal(self.unknowns["W"], 0.0)
npt.assert_equal(self.unknowns["V"], 5.0)
| npt.assert_equal(self.unknowns["A"], 0.0) | numpy.testing.assert_equal |
from pathlib import Path
import numpy as np
import gym
from gym import spaces, logger
from gym.utils import seeding
import matplotlib.pyplot as plt
from pycel import ExcelCompiler
class Parameters:
# (Avoid sampling random variables here: they would not be resampled upon reset())
# problem-specific parameters
techs = 3 # number of technologies (Offshore wind power, blue hydrogen, green hydrogen)
# fmt: off
reward_types = 6 # capital expenditure (capex), operating expenditure (opex), revenue, carbon emissions, total jobs supported, total economic impact
steps_per_episode = 20 # number of years in the planning horizon (2031 -> 2050 = 20)
# fmt: on
# This 'Pathways to Net Zero' environment manipulates a spreadsheet loaded in memory. The following 20 columns correspond to years 2031 to 2050 in tabs named 'Outputs' and 'CCUS':
# fmt: off
pathways2Net0ColumnInds = np.array(['P','Q','R','S','T','U','V','W','X','Y','Z','AA','AB','AC','AD','AE','AF','AG','AH','AI'])
# fmt: on
# The following 20 rows correspond to years 2031 to 2050 in tabs named 'BREEZE', 'GALE', and 'STORM':
pathways2Net0RowInds = np.arange(36, 36 + steps_per_episode)
# pathways2Net0ColumnInds[state.step_count] and pathways2Net0RowInds[state.step_count] will locate the current year's column / row respectively
# Multiplicative noise is applied to all costs. The parameters of this randomisation are:
noise_mu = 1.0
noise_sigma = 0.1
noise_clipping = 0.5 # (i.e., costs are reduced by 50% at the most)
noise_sigma_factor = np.sqrt(0.1) # this factor is applied to make CCUS capex & opex less volatile than other costs
# The costs in the Carbon capture utilisation and storage (CCUS) tab to be randomised are capex, opex, and carbon price, with these row numbers:
pathways2Net0RandomRowInds_CCUS = np.array([23, 24, 26])
# The costs in the 'Outputs' tab to be randomised are Offshore wind - Devex, Capex, and Opex, Green Hydrogen - Capex, Fixed Opex, and Variable Opex, Blue Hydrogen - price, Gas feedstock price, Capex, Fixed opex, Variable opex, and Natural gas cost, with these row numbers:
# fmt: off
pathways2Net0RandomRowInds_Outputs = np.array([148, 149, 150, 153, 154, 155, 158, 159, 163, 164, 165, 166])
# fmt: on
# multiplicative noise's mu and sigma, and clipping point:
noise_mu = 1.0
noise_sigma = 0.1 # or try 0.1, 0.0, np.sqrt(0.001), 0.02, np.sqrt(0.0003), 0.015, 0.01, np.sqrt(0.00001), 0.001
noise_clipping = 0.5 # or try 0.001, 0.1, 0.5 (i.e., original costs are reduced by 50% at the most)
noise_sigma_factor = np.sqrt(0.1) # as in https://github.com/rangl-labs/netzerotc/issues/36, CCUS capex & opex (CCUS row 23 and 24) should have smaller standard deviations
stochastic_sigma = False # set to False to use one single noise_sigma; set to True to randomly switch between two different std:
# noise_sigma_low = 0.001
# noise_sigma_high = np.sqrt(0.00001)
# OR, sample a sigma from a uniform distribution centered at noise_sigma with total 2-side range of noise_sigma_range:
noise_sigma_range = 0.002
noise_observability = False # set to True to make the observation_space contain randomized costs/prices; set to False to restrict the observation_space to contain only the state.step_count
class State:
def __init__(self, seed=None, param=Parameters()):
np.random.seed(seed=seed)
self.initialise_state(param)
def initialise_state(self, param):
# create local copy of spreadsheet model to be manipulated
self.pathways2Net0 = param.pathways2Net0
# create an array of costs for the current year and populate with 2030 costs (column 'O' in 'CCUS' and 'Outputs' tabs):
self.randomized_costs = np.ones(
len(param.pathways2Net0RandomRowInds_CCUS)
+ len(param.pathways2Net0RandomRowInds_Outputs)
)
for costRowID in np.arange(len(param.pathways2Net0RandomRowInds_CCUS)):
self.randomized_costs[costRowID] = param.pathways2Net0.evaluate(
"CCUS!O" + str(param.pathways2Net0RandomRowInds_CCUS[costRowID])
)
for costRowID in np.arange(len(param.pathways2Net0RandomRowInds_Outputs)):
self.randomized_costs[
len(param.pathways2Net0RandomRowInds_CCUS) + costRowID
] = param.pathways2Net0.evaluate(
"Outputs!O" + str(param.pathways2Net0RandomRowInds_Outputs[costRowID])
)
self.noise_observability = param.noise_observability
# time variables
# NOTE: our convention is to update step_count at the beginning of the gym step() function
self.step_count = -1
self.steps_per_episode = param.steps_per_episode
# initial jobs supported in 2030
self.jobs = np.float32(
110484
)
# variable to record jobs created each year
self.jobs_increment = np.zeros(1, dtype=np.float32) # initialized as 0
# fmt: off
# initial economic impact in 2030
self.econoImpact = np.float32(49938.9809739566)
# initial technology deployments in 2030
self.deployments = np.array([param.pathways2Net0.evaluate('GALE!P35'),
param.pathways2Net0.evaluate('GALE!X35'),
param.pathways2Net0.evaluate('GALE!Y35')],
dtype=np.float32)
# initial CO2 emissions in 2030
self.emission_amount = np.float32(param.pathways2Net0.evaluate('CCUS!O63'))
# fmt: on
# histories
self.observations_all = []
self.actions_all = []
self.rewards_all = []
self.weightedRewardComponents_all = []
self.deployments_all = []
self.emission_amount_all = []
def to_observation(self):
observation = (self.step_count,) + tuple(
self.randomized_costs
)
if self.noise_observability == False:
observation = (self.step_count,)
return observation
def is_done(self):
done = bool(self.step_count >= self.steps_per_episode - 1)
return done
def record(state, action, reward, weightedRewardComponents):
state.observations_all.append(state.to_observation())
state.actions_all.append(action)
state.rewards_all.append(reward)
state.weightedRewardComponents_all.append(weightedRewardComponents)
state.deployments_all.append(state.deployments)
state.emission_amount_all.append(state.emission_amount)
def observation_space(self):
obs_low = np.full_like(self.state.to_observation(), 0, dtype=np.float32)
obs_low[0] = -1 # first entry of obervation is the timestep
obs_high = np.full_like(self.state.to_observation(), 1e5, dtype=np.float32)
obs_high[0] = self.param.steps_per_episode # first entry of obervation is the timestep
if self.state.noise_observability == True:
obs_high[5] = 1e6
obs_high[7] = 1e6
result = spaces.Box(obs_low, obs_high, dtype=np.float32)
return result
def action_space(self):
# action specifies yearly increments in offshore wind power, blue hydrogen, and green hydrogen respectively
# lower limit on increments is zero
act_low = np.zeros(self.param.techs, dtype=np.float32)
# upper limits on increments depend on the technology
act_high = np.float32([27, 25, 24])
result = spaces.Box(act_low, act_high, dtype=np.float32)
return result
def apply_action(action, state, param):
# copy model from state to param
param.pathways2Net0 = state.pathways2Net0
# each technology gives rewards of various types (ie costs and revenues)
# create an array to hold the reward components (aggregated over all technologies):
weightedRewardComponents = np.zeros(
param.reward_types
)
# read in the current deployment for offshore wind power
offshoreWind = param.pathways2Net0.evaluate(
# "GALE!P" + str(param.pathways2Net0RowInds[state.step_count] - 1)
"GALE!S" + str(param.pathways2Net0RowInds[state.step_count] - 1)
)
# add the increment of offshore wind for this timestep (specified by the action), imposing a maximum deployment
offshoreWind = np.clip(offshoreWind + action[0], offshoreWind, 380)
# similarly for blue and green hydrogen
blueHydrogen = param.pathways2Net0.evaluate(
"GALE!X" + str(param.pathways2Net0RowInds[state.step_count] - 1)
)
blueHydrogen = np.clip(blueHydrogen + action[1], blueHydrogen, 270)
greenHydrogen = param.pathways2Net0.evaluate(
"GALE!Y" + str(param.pathways2Net0RowInds[state.step_count] - 1)
)
greenHydrogen = np.clip(greenHydrogen + action[2], greenHydrogen, 253)
# record the new deployments in an array
state.deployments = np.array(
[offshoreWind, blueHydrogen, greenHydrogen], dtype=np.float32
)
# evaluate the model cells containing the deployment values for the current timestep (for offshore wind power, blue hydrogen and green hydrogen respectively)
# this enables the current timestep's deployment values to be entered into the model
param.pathways2Net0.evaluate(
# "GALE!P" + str(param.pathways2Net0RowInds[state.step_count])
"GALE!S" + str(param.pathways2Net0RowInds[state.step_count])
)
param.pathways2Net0.evaluate(
"GALE!X" + str(param.pathways2Net0RowInds[state.step_count])
)
param.pathways2Net0.evaluate(
"GALE!Y" + str(param.pathways2Net0RowInds[state.step_count])
)
# similarly, evaluate the current timestep's capex, opex, revenue, and emissions values for all technologies
# fmt: off
capex_all = np.float32([param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'24'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'28'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'32'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'36'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'41')])
opex_all = np.float32([param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'25'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'29'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'33'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'37'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'42')])
revenue_all = np.float32([param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'26'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'30'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'34'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'38'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'43')])
# fmt: on
emissions = np.float32(
param.pathways2Net0.evaluate(
"CCUS!" + param.pathways2Net0ColumnInds[state.step_count] + "68"
)
)
# enter the deployment values for this timestep into the model
param.pathways2Net0.set_value(
# "GALE!P" + str(param.pathways2Net0RowInds[state.step_count]), offshoreWind
"GALE!S" + str(param.pathways2Net0RowInds[state.step_count]), offshoreWind
)
param.pathways2Net0.set_value(
"GALE!X" + str(param.pathways2Net0RowInds[state.step_count]), blueHydrogen
)
param.pathways2Net0.set_value(
"GALE!Y" + str(param.pathways2Net0RowInds[state.step_count]), greenHydrogen
)
# re-evaluate the current timestep's capex, opex, revenue, and emissions values for all technologies
# fmt: off
capex_all = np.float32([param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'24'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'28'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'32'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'36'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'41')])
opex_all = np.float32([param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'25'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'29'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'33'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'37'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'42')])
revenue_all = np.float32([param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'26'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'30'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'34'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'38'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'43')])
# fmt: on
# read gross carbon emissions (before CCUS) from model
state.emission_amount = np.float32(
param.pathways2Net0.evaluate(
"CCUS!" + param.pathways2Net0ColumnInds[state.step_count] + "63"
)
)
# read net carbon emissions (after CCUS) from model
emissions = np.float32(
param.pathways2Net0.evaluate(
"CCUS!" + param.pathways2Net0ColumnInds[state.step_count] + "68"
)
)
# calculate the total capex, opex, revenue and emissions
weightedRewardComponents[0] = np.sum(capex_all)
weightedRewardComponents[1] = np.sum(opex_all)
weightedRewardComponents[2] = np.sum(revenue_all)
weightedRewardComponents[3] = emissions
weightedRewardComponents[5] = state.econoImpact
# calculate numer of jobs supported as 0.25 * (capex + opex + 1050) / 0.05:
weightedRewardComponents[4] = (
0.25 * (weightedRewardComponents[0] + weightedRewardComponents[1] + 1050) / 0.05
)
state.jobs_increment = weightedRewardComponents[-2] - state.jobs
state.jobs = weightedRewardComponents[-2]
# calculate reward for this timestep: revenue - (capex + opex + emissions) + timestep * (increment in jobs)
reward = (
weightedRewardComponents[2] - np.sum(weightedRewardComponents[[0, 1, 3]]) + (state.step_count * state.jobs_increment)
)
# copy model from param to state
state.pathways2Net0 = param.pathways2Net0
return state, reward, weightedRewardComponents
def verify_constraints(state):
verify = True
return verify
def randomise(state, action, param):
# copy model from state to param
param.pathways2Net0 = state.pathways2Net0
# noise will be applied by multiplication
# evaluate capex, opex, revenue, and emissions for each technology:
# fmt: off
np.float32([param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'24'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'28'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'32'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'36'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'41')])
np.float32([param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'25'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'29'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'33'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'37'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'42')])
np.float32([param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'26'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'30'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'34'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'38'),
param.pathways2Net0.evaluate('Outputs!'+param.pathways2Net0ColumnInds[state.step_count]+'43')])
np.float32(param.pathways2Net0.evaluate('CCUS!'+param.pathways2Net0ColumnInds[state.step_count]+'68'))
# fmt: on
rowInds_CCUS = param.pathways2Net0RandomRowInds_CCUS
rowInds_Outputs = param.pathways2Net0RandomRowInds_Outputs
# specify smaller standard deviation for CCUS capex & opex
noise_sigma_CCUS = np.full(len(rowInds_CCUS), param.noise_sigma) * np.array([param.noise_sigma_factor, param.noise_sigma_factor, 1.0])
# generate Gaussian noise N~(1,0.1), clipped at a (positive) minimum value:
multiplicativeNoise_CCUS = np.maximum(
param.noise_clipping,
np.random.randn(len(rowInds_CCUS)) * noise_sigma_CCUS + param.noise_mu,
)
multiplicativeNoise_Outputs = np.maximum(
param.noise_clipping,
np.random.randn(len(rowInds_Outputs)) * param.noise_sigma + param.noise_mu,
)
# for each technology:
# for each of its costs and revenues:
# multiply the values at the current and all future timesteps by the same (independent) random number
year_counter = 0
# for each year in the model:
for yearColumnID in param.pathways2Net0ColumnInds[state.step_count :]:
# for each of the CCUS and emissions costs and revenues:
for costRowID in np.arange(len(rowInds_CCUS)):
# read the current cost / revenue
currentCost = param.pathways2Net0.evaluate(
"CCUS!" + yearColumnID + str(rowInds_CCUS[costRowID])
)
# apply noise
param.pathways2Net0.set_value(
"CCUS!" + yearColumnID + str(rowInds_CCUS[costRowID]),
multiplicativeNoise_CCUS[costRowID] * currentCost,
)
# for closed-loop mode, record the current timestep's random costs:
if year_counter == 0:
state.randomized_costs[costRowID] = (
multiplicativeNoise_CCUS[costRowID] * currentCost
)
# similarly for all other costs and revenues:
for costRowID in np.arange(len(rowInds_Outputs)):
currentCost = param.pathways2Net0.evaluate(
"Outputs!" + yearColumnID + str(rowInds_Outputs[costRowID])
)
param.pathways2Net0.set_value(
"Outputs!" + yearColumnID + str(rowInds_Outputs[costRowID]),
multiplicativeNoise_Outputs[costRowID] * currentCost,
)
if year_counter == 0:
state.randomized_costs[len(rowInds_CCUS) + costRowID] = (
multiplicativeNoise_Outputs[costRowID] * currentCost
)
# set blue hydrogen price = blue hydrogen gas feedstock price + 20:
param.pathways2Net0.set_value(
"Outputs!" + yearColumnID + "158",
param.pathways2Net0.evaluate("Outputs!" + yearColumnID + "159") + 20.0,
)
if year_counter == 0:
state.randomized_costs[
len(rowInds_CCUS) + 6
] = param.pathways2Net0.evaluate("Outputs!" + yearColumnID + "158")
year_counter = year_counter + 1
# copy model from param to state
state.pathways2Net0 = param.pathways2Net0
return state
def reset_param(param):
# use param.pathways2Net0_reset (the reference model) to reset the randomised costs and revenues in param.pathways2Net0 (the working model)
# tabs to reset:
spreadsheets = np.array(["GALE", "CCUS", "Outputs"])
# columns to reset in each tab:
columnInds_BySheets = np.array(
[
# np.array(["P", "X", "Y"]),
np.array(["S", "X", "Y"]),
param.pathways2Net0ColumnInds,
param.pathways2Net0ColumnInds,
]
)
# rows to reset in each tab:
rowInds_BySheets = np.array(
[
param.pathways2Net0RowInds,
param.pathways2Net0RandomRowInds_CCUS,
param.pathways2Net0RandomRowInds_Outputs,
]
)
# for each tab to reset:
for iSheet in np.arange(len(spreadsheets)):
# for each column to reset:
for iColumn in columnInds_BySheets[iSheet]:
# for each row to reset:
for iRow in rowInds_BySheets[iSheet]:
# reset cell to reference value
param.pathways2Net0.set_value(
spreadsheets[iSheet] + "!" + iColumn + str(iRow),
param.pathways2Net0_reset.evaluate(
spreadsheets[iSheet] + "!" + iColumn + str(iRow)
),
)
return param
def cal_reset_diff(param):
# a helper function to check that reset_param works correctly
abs_diff = 0.0
# reload the model:
workbooks_dir = Path(__file__).resolve().parent.parent / "compiled_workbook_objects"
# pathways2Net0_loaded = ExcelCompiler.from_file(filename=f"{workbooks_dir}/PathwaysToNetZero_Simplified_Anonymized_Compiled")
pathways2Net0_loaded = ExcelCompiler.from_file(filename=f"{workbooks_dir}/PathwaysToNetZero_Simplified_Anonymized_Modified_Compiled")
spreadsheets = np.array(["GALE", "CCUS", "Outputs"])
columnInds_BySheets = np.array(
[
# np.array(["P", "X", "Y"]),
np.array(["S", "X", "Y"]),
param.pathways2Net0ColumnInds,
param.pathways2Net0ColumnInds,
]
)
rowInds_BySheets = np.array(
[
param.pathways2Net0RowInds,
param.pathways2Net0RandomRowInds_CCUS,
param.pathways2Net0RandomRowInds_Outputs,
]
)
for iSheet in np.arange(len(spreadsheets)):
for iColumn in columnInds_BySheets[iSheet]:
for iRow in rowInds_BySheets[iSheet]:
if param.pathways2Net0.evaluate(spreadsheets[iSheet] + "!" + iColumn + str(iRow)) != None and pathways2Net0_loaded.evaluate(spreadsheets[iSheet] + "!" + iColumn + str(iRow)) != None:
abs_diff = abs_diff + np.abs(
param.pathways2Net0.evaluate(spreadsheets[iSheet] + "!" + iColumn + str(iRow)) - pathways2Net0_loaded.evaluate(spreadsheets[iSheet] + "!" + iColumn + str(iRow))
)
else:
if param.pathways2Net0.evaluate(spreadsheets[iSheet] + "!" + iColumn + str(iRow)) != None:
abs_diff = abs_diff + np.abs(param.pathways2Net0.evaluate(spreadsheets[iSheet] + "!" + iColumn + str(iRow)))
if pathways2Net0_loaded.evaluate(spreadsheets[iSheet] + "!" + iColumn + str(iRow)) != None:
abs_diff = abs_diff + np.abs(pathways2Net0_loaded.evaluate(spreadsheets[iSheet] + "!" + iColumn + str(iRow)))
# abs_diff should be 0 if reset_param works correctly:
return abs_diff
def plot_episode(state, fname):
# a helper function to plot each timestep in the most recent episode
fig, ax = plt.subplots(2, 2)
# plot cumulative total rewards and deployments for the 3 technologies:
ax1 = plt.subplot(221)
plt.plot(np.cumsum(state.rewards_all), label='cumulative reward',color='black')
plt.xlabel("time, avg reward: " + str(np.mean(state.rewards_all)))
plt.ylabel("cumulative reward")
plt.legend(loc='upper left', fontsize='xx-small')
plt.tight_layout()
ax2 = ax1.twinx()
ax2.plot(np.array(state.deployments_all)[:,0],label="offshore wind")
ax2.plot(np.array(state.deployments_all)[:,1],label="blue hydrogen")
ax2.plot(np.array(state.deployments_all)[:,2],label="green hydrogen")
ax2.plot(np.array(state.emission_amount_all),label="CO2 emissions amount")
ax2.set_ylabel("deployments and CO2 emissions")
plt.legend(loc='lower right',fontsize='xx-small')
plt.tight_layout()
# plot a subset of the observations:
plt.subplot(222)
# first 5 elements of observations are step counts and first 4 randomized costs
plt.plot(np.array(state.observations_all)[:,0], label="step counts", color='black')
if state.noise_observability == True:
plt.plot(np.array(state.observations_all)[:,1], label="CCS Capex £/tonne")
plt.plot(np.array(state.observations_all)[:,2], label="CCS Opex £/tonne")
plt.plot(np.array(state.observations_all)[:,3], label="Carbon price £/tonne")
plt.plot(np.array(state.observations_all)[:,4], label="Offshore wind Devex £/kW")
plt.plot(np.array(state.observations_all)[:,5], label="Offshore wind Capex £/kW")
plt.xlabel("time")
plt.ylabel("observations")
plt.legend(loc='lower right',fontsize='xx-small')
plt.tight_layout()
# plot the agent's actions:
plt.subplot(223)
# plt.plot(np.array(state.actions_all)[:,0],label="offshore wind capacity [GW]")
plt.plot(np.array(state.actions_all)[:,0],label="offshore wind to power [TWh]")
plt.plot(np.array(state.actions_all)[:,1],label="blue hydrogen energy [TWh]")
plt.plot(np.array(state.actions_all)[:,2],label="green hydrogen energy [TWh]")
plt.xlabel("time")
plt.ylabel("actions")
plt.legend(title="increment in",loc='lower right',fontsize='xx-small')
plt.tight_layout()
# plot jobs and increments in jobs:
plt.subplot(224)
to_plot = np.vstack(( | np.array(state.weightedRewardComponents_all) | numpy.array |
import numpy as np
import netket as nk
import sys
import scipy.optimize as spo
import netket.custom.utils as utls
from netket.utils import (
MPI_comm as _MPI_comm,
n_nodes as _n_nodes,
node_number as _rank
)
import mpi4py.MPI as mpi
from netket.stats import (
statistics as _statistics,
mean as _mean,
sum_inplace as _sum_inplace,
)
import netket.custom.fermionic_hilbert as fermhi
import gpw.utils as utls
import numpy_ml as ml
import netket.custom.hubbard as hub
from netket.custom.fermionic_hopping_sampler import FermionicHoppingSampler,FermionicHoppingKernel
from netket.custom.ab_initio_ham import AbInitio
from pyscf import scf, gto, ao2mo, fci
import scipy.optimize as spo
N = int(sys.argv[1])
U = float(sys.argv[2])
basis_rep = int(sys.argv[3]) # 0: local basis, 1: canonical basis
loss_fun = int(sys.argv[4]) # 0: overlap, 1: squared error
local_samples = int(sys.argv[5])
Lx = 3
Ly = 4
ha = hub.Hubbard(Lx=Lx, Ly=Ly, n_elec=((Lx*Ly)//2,(Lx*Ly)//2), U=U, pbx=-1, pby=-1)
hopping_prefactors = ha._operators[0]._prefactors
hopping_sites = ha._operators[0]._sites
hopping_spins = ha._operators[0]._spins
h1 = np.zeros((Lx*Ly, Lx*Ly))
for i in range(len(hopping_prefactors)):
if hopping_spins[i][0] == 0:
h1[hopping_sites[i][0][0], hopping_sites[i][0][1]] = hopping_prefactors[i].real
h2 = np.zeros((Lx*Ly, Lx*Ly, Lx*Ly, Lx*Ly))
np.fill_diagonal(h2, U)
# Run FCI
cisolver = fci.direct_spin1.FCISolver()
transformation_mat = np.linalg.eigh(h1)[1]
if basis_rep == 1:
h1_canonical = transformation_mat.T.dot(h1.dot(transformation_mat))
h2_canonical = np.einsum("ijkl,ia->ajkl",h2, transformation_mat)
h2_canonical = np.einsum("ajkl,jb->abkl",h2_canonical, transformation_mat)
h2_canonical = np.einsum("abkl,kc->abcl",h2_canonical, transformation_mat)
h2_canonical = np.einsum("abcl,ld->abcd",h2_canonical, transformation_mat)
h1 = h1_canonical
h2 = h2_canonical
norb=Lx*Ly
nelec=Lx*Ly
e, c = cisolver.kernel(h1, h2, Lx*Ly, Lx*Ly)
all_configs = []
amps = []
for i in range(c.shape[0]):
for j in range(c.shape[1]):
alpha = np.array(utls.bitfield(fci.cistring.addr2str(norb, nelec//2, i), norb))
beta = np.array(utls.bitfield(fci.cistring.addr2str(norb, nelec//2, j), norb))
all_configs.append(alpha + 2*beta)
amps.append(c[i,j])
all_configs = np.array(all_configs)
amps = np.array(amps)
hi = fermhi.Fermions(norb, n_elec=(nelec//2,nelec//2))
ha = AbInitio(hi, h1, h2)
ma = nk.machine.qgps.QGPSFermionicProdSym(hi, n_bond=N, automorphisms=None, spin_flip_sym=False, dtype=complex)
ma._exp_kern_representation = False
ma._fast_update = False
ma.init_random_parameters(sigma=0.1, start_from_uniform=True)
rng_par = np.random.default_rng(1234)
ma._epsilon = rng_par.normal(scale=0.1, size=ma._epsilon.shape) + 1.j*rng_par.normal(scale=0.5, size=ma._epsilon.shape)
ma._opt_params = ma._epsilon[ma._der_ids >= 0].copy()
amps = amps/np.exp(np.mean(np.log(amps + 0.j)))
indices = np.array_split(np.arange(len(amps)), mpi.COMM_WORLD.size)[_rank]
indices_reduced = indices[:local_samples]
full_basis = all_configs[indices]
basis = all_configs[indices_reduced]
amplitudes = amps[indices_reduced]
weightings = np.ones(len(amplitudes))
learning = nk.custom.QGPSLearningExp(ma, complex_expand=False)
def objective_fun():
if loss_fun == 0:
return -learning.overlap(basis, amplitudes, weightings)
else:
return learning.mean_squared_error(basis, amplitudes, weightings)
if mpi.COMM_WORLD.Get_rank() == 0:
with open("out.txt", "w") as fl:
fl.write("")
count = 0
steps = 1
def callback():
global count
if count % steps == 0:
val = objective_fun()
if loss_fun == 0:
val = -val
c_predicted_full = np.zeros(c.size, dtype = complex)
c_predicted_full.fill(0.0)
c_predicted_full[indices] = np.exp(ma.log_val(full_basis))
c_predicted_full = _sum_inplace(c_predicted_full)
if _rank == 0:
c_predicted_full /= np.sqrt(np.sum(abs(c_predicted_full)**2))
c_predicted = c_predicted_full.reshape(c.shape)
en = cisolver.energy(h1, h2, c_predicted.real, norb, nelec) + cisolver.energy(h1, h2, c_predicted.imag, norb, nelec)
print(val, en, learning.noise_tilde)
with open("out.txt", "a") as fl:
fl.write("{} {}\n".format(en, val))
count += 1
init_noise = learning.mean_squared_error(basis, amplitudes, weightings)/(local_samples * _n_nodes)
learning.noise_tilde = init_noise
best_loss = objective_fun()
no_improvement_count = 0
while no_improvement_count < 100:
callback()
loss = objective_fun()
if loss < best_loss:
best_loss = loss
no_improvement_count = 0
else:
no_improvement_count += 1
for i in rng_par.permutation(Lx*Ly):
if learning.ref_site is not None:
prior_std_dev = np.sqrt(1/(2*learning.alpha_mat[learning.ref_site, 0]))
for n in range(ma._epsilon.shape[1]):
if np.sum(np.abs(ma._epsilon[learning.ref_site, n, :])) < 1.e-10:
ma._epsilon[learning.ref_site, n, :] = rng_par.normal(scale=prior_std_dev, size=4) + 1.j*rng_par.normal(scale=prior_std_dev, size=4)
learning.fit_step(basis, amplitudes, i, opt_alpha=True, opt_noise=False, rvm=False, max_alpha_iterations=2, weightings=weightings)
ma._opt_params = ma._epsilon[ma._der_ids >= 0].copy()
ml_grad = learning.log_marg_lik_noise_der()
learning.noise_tilde = min(init_noise, np.exp( | np.log(learning.noise_tilde) | numpy.log |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
def decode_image(im_file, im_info):
"""read rgb image
Args:
im_file (str|np.ndarray): input can be image path or np.ndarray
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
if isinstance(im_file, str):
with open(im_file, 'rb') as f:
im_read = f.read()
data = np.frombuffer(im_read, dtype='uint8')
im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
else:
im = im_file
im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
im_info['scale_factor'] = np.array([1., 1.], dtype=np.float32)
return im, im_info
class Resize(object):
"""resize image by target_size and max_size
Args:
target_size (int): the target size of image
keep_ratio (bool): whether keep_ratio or not, default true
interp (int): method of resize
"""
def __init__(self, target_size, keep_ratio=True, interp=cv2.INTER_LINEAR):
if isinstance(target_size, int):
target_size = [target_size, target_size]
self.target_size = target_size
self.keep_ratio = keep_ratio
self.interp = interp
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
assert len(self.target_size) == 2
assert self.target_size[0] > 0 and self.target_size[1] > 0
im_channel = im.shape[2]
im_scale_y, im_scale_x = self.generate_scale(im)
im = cv2.resize(
im,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
im_info['im_shape'] = np.array(im.shape[:2]).astype('float32')
im_info['scale_factor'] = np.array(
[im_scale_y, im_scale_x]).astype('float32')
return im, im_info
def generate_scale(self, im):
"""
Args:
im (np.ndarray): image (np.ndarray)
Returns:
im_scale_x: the resize ratio of X
im_scale_y: the resize ratio of Y
"""
origin_shape = im.shape[:2]
im_c = im.shape[2]
if self.keep_ratio:
im_size_min = np.min(origin_shape)
im_size_max = np.max(origin_shape)
target_size_min = np.min(self.target_size)
target_size_max = np.max(self.target_size)
im_scale = float(target_size_min) / float(im_size_min)
if np.round(im_scale * im_size_max) > target_size_max:
im_scale = float(target_size_max) / float(im_size_max)
im_scale_x = im_scale
im_scale_y = im_scale
else:
resize_h, resize_w = self.target_size
im_scale_y = resize_h / float(origin_shape[0])
im_scale_x = resize_w / float(origin_shape[1])
return im_scale_y, im_scale_x
class NormalizeImage(object):
"""normalize image
Args:
mean (list): im - mean
std (list): im / std
is_scale (bool): whether need im / 255
is_channel_first (bool): if True: image shape is CHW, else: HWC
"""
def __init__(self, mean, std, is_scale=True):
self.mean = mean
self.std = std
self.is_scale = is_scale
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
im = im.astype(np.float32, copy=False)
mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
std = np.array(self.std)[np.newaxis, np.newaxis, :]
if self.is_scale:
im = im / 255.0
im -= mean
im /= std
return im, im_info
class Permute(object):
"""permute image
Args:
to_bgr (bool): whether convert RGB to BGR
channel_first (bool): whether convert HWC to CHW
"""
def __init__(self, ):
super(Permute, self).__init__()
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
im = im.transpose((2, 0, 1)).copy()
return im, im_info
class PadStride(object):
""" padding image for model with FPN, instead PadBatch(pad_to_stride) in original config
Args:
stride (bool): model with FPN need image shape % stride == 0
"""
def __init__(self, stride=0):
self.coarsest_stride = stride
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
coarsest_stride = self.coarsest_stride
if coarsest_stride <= 0:
return im, im_info
im_c, im_h, im_w = im.shape
pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride)
pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride)
padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32)
padding_im[:, :im_h, :im_w] = im
return padding_im, im_info
class WarpAffine(object):
"""Warp affine the image
"""
def __init__(self,
keep_res=False,
pad=31,
input_h=512,
input_w=512,
scale=0.4,
shift=0.1):
self.keep_res = keep_res
self.pad = pad
self.input_h = input_h
self.input_w = input_w
self.scale = scale
self.shift = shift
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
img = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
h, w = img.shape[:2]
if self.keep_res:
input_h = (h | self.pad) + 1
input_w = (w | self.pad) + 1
s = np.array([input_w, input_h], dtype=np.float32)
c = np.array([w // 2, h // 2], dtype=np.float32)
else:
s = max(h, w) * 1.0
input_h, input_w = self.input_h, self.input_w
c = np.array([w / 2., h / 2.], dtype=np.float32)
trans_input = get_affine_transform(c, s, 0, [input_w, input_h])
img = cv2.resize(img, (w, h))
inp = cv2.warpAffine(
img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)
return inp, im_info
class EvalAffine(object):
def __init__(self, size, stride=64):
super(EvalAffine, self).__init__()
self.size = size
self.stride = stride
def __call__(self, image, im_info):
s = self.size
h, w, _ = image.shape
trans, size_resized = get_affine_mat_kernel(h, w, s, inv=False)
image_resized = cv2.warpAffine(image, trans, size_resized)
return image_resized, im_info
def get_affine_mat_kernel(h, w, s, inv=False):
if w < h:
w_ = s
h_ = int(np.ceil((s / w * h) / 64.) * 64)
scale_w = w
scale_h = h_ / w_ * w
else:
h_ = s
w_ = int(np.ceil((s / h * w) / 64.) * 64)
scale_h = h
scale_w = w_ / h_ * h
center = np.array([np.round(w / 2.), np.round(h / 2.)])
size_resized = (w_, h_)
trans = get_affine_transform(
center, np.array([scale_w, scale_h]), 0, size_resized, inv=inv)
return trans, size_resized
def get_affine_transform(center,
input_size,
rot,
output_size,
shift=(0., 0.),
inv=False):
"""Get the affine transform matrix, given the center/scale/rot/output_size.
Args:
center (np.ndarray[2, ]): Center of the bounding box (x, y).
scale (np.ndarray[2, ]): Scale of the bounding box
wrt [width, height].
rot (float): Rotation angle (degree).
output_size (np.ndarray[2, ]): Size of the destination heatmaps.
shift (0-100%): Shift translation ratio wrt the width/height.
Default (0., 0.).
inv (bool): Option to inverse the affine transform direction.
(inv=False: src->dst or inv=True: dst->src)
Returns:
np.ndarray: The transform matrix.
"""
assert len(center) == 2
assert len(output_size) == 2
assert len(shift) == 2
if not isinstance(input_size, (np.ndarray, list)):
input_size = np.array([input_size, input_size], dtype=np.float32)
scale_tmp = input_size
shift = np.array(shift)
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = rotate_point([0., src_w * -0.5], rot_rad)
dst_dir = np.array([0., dst_w * -0.5])
src = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
src[2, :] = _get_3rd_point(src[0, :], src[1, :])
dst = np.zeros((3, 2), dtype=np.float32)
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def get_warp_matrix(theta, size_input, size_dst, size_target):
"""This code is based on
https://github.com/open-mmlab/mmpose/blob/master/mmpose/core/post_processing/post_transforms.py
Calculate the transformation matrix under the constraint of unbiased.
Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased
Data Processing for Human Pose Estimation (CVPR 2020).
Args:
theta (float): Rotation angle in degrees.
size_input (np.ndarray): Size of input image [w, h].
size_dst (np.ndarray): Size of output image [w, h].
size_target (np.ndarray): Size of ROI in input plane [w, h].
Returns:
matrix (np.ndarray): A matrix for transformation.
"""
theta = np.deg2rad(theta)
matrix = np.zeros((2, 3), dtype=np.float32)
scale_x = size_dst[0] / size_target[0]
scale_y = size_dst[1] / size_target[1]
matrix[0, 0] = np.cos(theta) * scale_x
matrix[0, 1] = -np.sin(theta) * scale_x
matrix[0, 2] = scale_x * (
-0.5 * size_input[0] * np.cos(theta) + 0.5 * size_input[1] *
np.sin(theta) + 0.5 * size_target[0])
matrix[1, 0] = np.sin(theta) * scale_y
matrix[1, 1] = np.cos(theta) * scale_y
matrix[1, 2] = scale_y * (
-0.5 * size_input[0] * np.sin(theta) - 0.5 * size_input[1] *
np.cos(theta) + 0.5 * size_target[1])
return matrix
def rotate_point(pt, angle_rad):
"""Rotate a point by an angle.
Args:
pt (list[float]): 2 dimensional point to be rotated
angle_rad (float): rotation angle by radian
Returns:
list[float]: Rotated point.
"""
assert len(pt) == 2
sn, cs = np.sin(angle_rad), np.cos(angle_rad)
new_x = pt[0] * cs - pt[1] * sn
new_y = pt[0] * sn + pt[1] * cs
rotated_pt = [new_x, new_y]
return rotated_pt
def _get_3rd_point(a, b):
"""To calculate the affine matrix, three pairs of points are required. This
function is used to get the 3rd point, given 2D points a & b.
The 3rd point is defined by rotating vector `a - b` by 90 degrees
anticlockwise, using b as the rotation center.
Args:
a (np.ndarray): point(x,y)
b (np.ndarray): point(x,y)
Returns:
np.ndarray: The 3rd point.
"""
assert len(a) == 2
assert len(b) == 2
direction = a - b
third_pt = b + np.array([-direction[1], direction[0]], dtype=np.float32)
return third_pt
class TopDownEvalAffine(object):
"""apply affine transform to image and coords
Args:
trainsize (list): [w, h], the standard size used to train
use_udp (bool): whether to use Unbiased Data Processing.
records(dict): the dict contained the image and coords
Returns:
records (dict): contain the image and coords after tranformed
"""
def __init__(self, trainsize, use_udp=False):
self.trainsize = trainsize
self.use_udp = use_udp
def __call__(self, image, im_info):
rot = 0
imshape = im_info['im_shape'][::-1]
center = im_info['center'] if 'center' in im_info else imshape / 2.
scale = im_info['scale'] if 'scale' in im_info else imshape
if self.use_udp:
trans = get_warp_matrix(
rot, center * 2.0,
[self.trainsize[0] - 1.0, self.trainsize[1] - 1.0], scale)
image = cv2.warpAffine(
image,
trans, (int(self.trainsize[0]), int(self.trainsize[1])),
flags=cv2.INTER_LINEAR)
else:
trans = get_affine_transform(center, scale, rot, self.trainsize)
image = cv2.warpAffine(
image,
trans, (int(self.trainsize[0]), int(self.trainsize[1])),
flags=cv2.INTER_LINEAR)
return image, im_info
def expand_crop(images, rect, expand_ratio=0.3):
imgh, imgw, c = images.shape
label, conf, xmin, ymin, xmax, ymax = [int(x) for x in rect.tolist()]
if label != 0:
return None, None, None
org_rect = [xmin, ymin, xmax, ymax]
h_half = (ymax - ymin) * (1 + expand_ratio) / 2.
w_half = (xmax - xmin) * (1 + expand_ratio) / 2.
if h_half > w_half * 4 / 3:
w_half = h_half * 0.75
center = [(ymin + ymax) / 2., (xmin + xmax) / 2.]
ymin = max(0, int(center[0] - h_half))
ymax = min(imgh - 1, int(center[0] + h_half))
xmin = max(0, int(center[1] - w_half))
xmax = min(imgw - 1, int(center[1] + w_half))
return images[ymin:ymax, xmin:xmax, :], [xmin, ymin, xmax, ymax], org_rect
class EvalAffine(object):
def __init__(self, size, stride=64):
super(EvalAffine, self).__init__()
self.size = size
self.stride = stride
def __call__(self, image, im_info):
s = self.size
h, w, _ = image.shape
trans, size_resized = get_affine_mat_kernel(h, w, s, inv=False)
image_resized = cv2.warpAffine(image, trans, size_resized)
return image_resized, im_info
def get_affine_mat_kernel(h, w, s, inv=False):
if w < h:
w_ = s
h_ = int(np.ceil((s / w * h) / 64.) * 64)
scale_w = w
scale_h = h_ / w_ * w
else:
h_ = s
w_ = int(np.ceil((s / h * w) / 64.) * 64)
scale_h = h
scale_w = w_ / h_ * h
center = np.array([np.round(w / 2.), np.round(h / 2.)])
size_resized = (w_, h_)
trans = get_affine_transform(
center, np.array([scale_w, scale_h]), 0, size_resized, inv=inv)
return trans, size_resized
def get_affine_transform(center,
input_size,
rot,
output_size,
shift=(0., 0.),
inv=False):
"""Get the affine transform matrix, given the center/scale/rot/output_size.
Args:
center (np.ndarray[2, ]): Center of the bounding box (x, y).
scale (np.ndarray[2, ]): Scale of the bounding box
wrt [width, height].
rot (float): Rotation angle (degree).
output_size (np.ndarray[2, ]): Size of the destination heatmaps.
shift (0-100%): Shift translation ratio wrt the width/height.
Default (0., 0.).
inv (bool): Option to inverse the affine transform direction.
(inv=False: src->dst or inv=True: dst->src)
Returns:
np.ndarray: The transform matrix.
"""
assert len(center) == 2
assert len(output_size) == 2
assert len(shift) == 2
if not isinstance(input_size, (np.ndarray, list)):
input_size = np.array([input_size, input_size], dtype=np.float32)
scale_tmp = input_size
shift = np.array(shift)
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = rotate_point([0., src_w * -0.5], rot_rad)
dst_dir = np.array([0., dst_w * -0.5])
src = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
src[2, :] = _get_3rd_point(src[0, :], src[1, :])
dst = | np.zeros((3, 2), dtype=np.float32) | numpy.zeros |
import os
import glob
import copy
import subprocess
import six
from collections import defaultdict
import re
import xarray as xr
import numpy as np
import datetime
from matplotlib.dates import date2num, num2date
from ... import utils, memoize
#from ..delft import dflow_model as dfm
from .. import hydro_model as hm
from ..delft import dfm_grid
from ...grid import unstructured_grid
from ...spatial import linestring_utils
from . import store_file
import logging as log
try:
import pytz
utc = pytz.timezone('utc')
except ImportError:
log.warning("Couldn't load utc timezone")
utc = None
datenum_precision_per_s = 100 # 10ms - should be evenly divisible into 1e6
def dt_round(dt):
""" Given a datetime or timedelta object, round it to datenum_precision
"""
if isinstance(dt,datetime.timedelta):
td = dt
# days are probably fine
dec_seconds = td.seconds + 1e-6 * td.microseconds
# the correct number of time quanta
quanta = int(round(dec_seconds * datenum_precision_per_s))
# how to get that back to an exact number of seconds?
new_seconds = quanta // datenum_precision_per_s
# careful to keep it integer arithmetic
us_per_quanta = 1000000 // datenum_precision_per_s
new_microseconds = (quanta % datenum_precision_per_s) * us_per_quanta
return datetime.timedelta( days=td.days,
seconds = new_seconds,
microseconds = new_microseconds )
else:
# same deal, but the fields have slightly different names
# And the integer arithmetic cannot be used to count absolute seconds -
# that will overflow 32-bit ints (okay with 64, but better not
# to assume 64-bit ints are available)
dec_seconds = dt.second + 1e-6 * dt.microsecond
quanta = int(round(dec_seconds * datenum_precision_per_s))
# how to get that back to an exact number of seconds?
new_seconds = quanta // datenum_precision_per_s
# careful to keep it integer arithmetic
us_per_quanta = 1000000// datenum_precision_per_s
new_microseconds = (quanta % datenum_precision_per_s) * us_per_quanta
# to handle the carries between microseconds, seconds, days,
# construct an exact timedelta object - also avoids having to do
# int arithmetic with seconds over many days, which could overflow.
td = datetime.timedelta(seconds = new_seconds - dt.second,
microseconds = new_microseconds - dt.microsecond)
return dt + td
# certainly there is a better way to do this...
MultiBC=hm.MultiBC
StageBC=hm.StageBC
FlowBC=hm.FlowBC
VelocityBC=hm.VelocityBC
ScalarBC=hm.ScalarBC
SourceSinkBC=hm.SourceSinkBC
OTPSStageBC=hm.OTPSStageBC
OTPSFlowBC=hm.OTPSFlowBC
OTPSVelocityBC=hm.OTPSVelocityBC
HycomMultiVelocityBC=hm.HycomMultiVelocityBC
HycomMultiScalarBC=hm.HycomMultiScalarBC
NOAAStageBC=hm.NOAAStageBC
NwisFlowBC=hm.NwisFlowBC
NwisStageBC=hm.NwisStageBC
CdecFlowBC=hm.CdecFlowBC
CdecStageBC=hm.CdecStageBC
class GenericConfig(object):
""" Handles reading and writing of suntans.dat formatted files.
Older code I think was case-insensitive, but seems that it is
now case-sensitive.
"""
keys_are_case_sensitive=True
def __init__(self,filename=None,text=None):
""" filename: path to file to open and parse
text: a string containing the entire file to parse
"""
self.filename = filename
if filename:
fp = open(filename,'rt')
else:
fp = [s+"\n" for s in text.split("\n")]
self.entries = {}
self.originals = []
for line in fp:
# save original text so we can write out a new suntans.dat with
# only minor changes
self.originals.append(line)
i = len(self.originals)-1
m = re.match(r"^\s*((\S+)\s+(\S+))?\s*.*",line)
if m and m.group(1):
key = m.group(2)
if not self.keys_are_case_sensitive:
key=key.lower()
val = m.group(3)
self.entries[key] = [val,i]
if filename:
fp.close()
def copy(self):
# punt copy semantics and handling to copy module
return copy.deepcopy(self)
def conf_float(self,key):
return self.conf_str(key,float)
def conf_int(self,key,default=None):
x=self.conf_str(key,int)
if x is None:
return default
return x
def conf_str(self,key,caster=lambda x:x):
if not self.keys_are_case_sensitive:
key = key.lower()
if key in self.entries:
return caster(self.entries[key][0])
else:
return None
def __setitem__(self,key,value):
self.set_value(key,value)
def __getitem__(self,key):
return self.conf_str(key)
def __delitem__(self,key):
# if the line already exists, it will be written out commented, otherwise
# it won't be written at all.
self.set_value(key,None)
def __contains__(self,key):
return self[key] is not None
def get(self,key,default=None):
if key in self:
return self[key]
else:
return default
def __eq__(self,other):
return self.is_equal(other)
def is_equal(self,other,limit_to_keys=None):
# key by key equality comparison:
log.debug("Comparing two configs")
for k in self.entries.keys():
if limit_to_keys and k not in limit_to_keys:
continue
if k not in other.entries:
log.debug("Other is missing key %s"%k)
return False
elif self.val_to_str(other.entries[k][0]) != self.val_to_str(self.entries[k][0]):
log.debug("Different values key %s => %s, %s"%(k,self.entries[k][0],other.entries[k][0]))
return False
for k in other.entries.keys():
if limit_to_keys and k not in limit_to_keys:
continue
if k not in self.entries:
log.debug("other has extra key %s"%k)
return False
return True
def disable_value(self,key):
if not self.keys_are_case_sensitive:
key = key.lower()
if key not in self.entries:
return
old_val,i = self.entries[key]
self.originals[i] = "# %s"%(self.originals[i])
self.entries[key][0] = None
def val_to_str(self,value):
# make sure that floats are formatted with plenty of digits:
# and handle annoyance of standard Python types vs. numpy types
# But None stays None, as it gets handled specially elsewhere
if value is None:
return None
if isinstance(value,float) or isinstance(value,np.floating):
value = "%.12g"%value
else:
value = str(value)
return value
def set_value(self,key,value):
""" Update a value in the configuration. Setting an item to None will
comment out the line if it already exists, and omit the line if it does
not yet exist.
"""
if not self.keys_are_case_sensitive:
key = key.lower()
else:
if (key not in self.entries):
for other in self.entries:
if key.lower()==other.lower():
raise Exception("Probably a case-sensitive error: %s vs %s"%(key,other))
if key not in self.entries:
if value is None:
return
self.originals.append("# blank #")
i = len(self.originals) - 1
self.entries[key] = [None,i]
old_val,i = self.entries[key]
value = self.val_to_str(value)
if value is not None:
self.originals[i] = "%s %s # from sunreader code\n"%(key,value)
else:
self.originals[i] = "# " + self.originals[i]
self.entries[key][0] = value
def write_config(self,filename=None,check_changed=True,backup=True):
"""
Write this config out to a text file
filename: defaults to self.filename
check_changed: if True, and the file already exists and is not materially different,
then do nothing. Good for avoiding unnecessary changes to mtimes.
backup: if true, copy any existing file to <filename>.bak
"""
filename = filename or self.filename
if filename is None:
raise Exception("No clue about the filename for writing config file")
if check_changed:
if os.path.exists(filename):
existing_conf = self.__class__(filename)
if existing_conf == self:
log.debug("No change in config")
return
if os.path.exists(filename) and backup:
filename_bak = filename + ".bak"
os.rename(filename,filename_bak)
fp = open(filename,'wt')
for line in self.originals:
fp.write(line)
fp.close()
class SunConfig(GenericConfig):
def time_zero(self):
""" return python datetime for the when t=0 is"""
# try the old way, where these are separate fields:
start_year = self.conf_int('start_year')
start_day = self.conf_float('start_day')
if start_year is not None:
# Note: we're dealing with 0-based start days here.
start_datetime = datetime.datetime(start_year,1,1,tzinfo=utc) + dt_round(datetime.timedelta(start_day))
return start_datetime
# That failed, so try the other way
log.debug("Trying the new way of specifying t0")
s = self.conf_str('TimeZero') # 1999-01-01-00:00
start_datetime = datetime.datetime.strptime(s,'%Y-%m-%d-%H:%M')
start_datetime = start_datetime.replace(tzinfo=utc)
return start_datetime
def simulation_seconds(self):
return self.conf_float('dt') * self.conf_int('nsteps')
def timestep(self):
""" Return a timedelta object for the timestep - should be safe from roundoff.
"""
return dt_round( datetime.timedelta(seconds=self.conf_float('dt')) )
def simulation_period(self):
""" This is more naive than the SunReader simulation_period(), in that
it does *not* look at any restart information, just start_year, start_day,
dt, and nsteps
WARNING: this used to add an extra dt to start_date - maybe trying to make it
the time of the first profile output?? this seems like a bad idea. As of
Nov 18, 2012, it does not do that (and at the same time, moves to datetime
arithmetic)
return a pair of python datetime objects for the start and end of the simulation.
"""
t0 = self.time_zero()
# why did it add dt here???
# start_date = t0 + datetime.timedelta( self.conf_float('dt') / (24.*3600) )
# simulation_days = self.simulation_seconds() / (24.*3600)
# end_date = start_date + datetime.timedelta(simulation_days)
start_date = t0
end_date = start_date + self.conf_int('nsteps')*self.timestep()
return start_date,end_date
def copy_t0(self,other):
self.set_value('start_year',other.conf_int('start_year'))
self.set_value('start_day',other.conf_float('start_day'))
# def set_simulation_period(self,start_date,end_date):
# """ Based on the two python datetime instances given, sets
# start_day, start_year and nsteps
# """
# self.set_value('start_year',start_date.year)
# t0 = datetime.datetime( start_date.year,1,1,tzinfo=utc )
# self.set_value('start_day',date2num(start_date) - date2num(t0))
#
# # roundoff dangers here -
# # self.set_simulation_duration_days( date2num(end_date) - date2num(start_date))
# self.set_simulation_duration(delta=(end_date - start_date))
#
# def set_simulation_duration_days(self,days):
# self.set_simulation_duration(days=days)
# def set_simulation_duration(self,
# days=None,
# delta=None,
# seconds = None):
# """ Set the number of steps for the simulation - exactly one of the parameters should
# be specified:
# days: decimal number of days - DANGER - it's very easy to get some round-off issues here
# delta: a datetime.timedelta object.
# hopefully safe, as long as any differencing between dates was done with UTC dates
# (or local dates with no daylight savings transitions)
# seconds: total number of seconds - this should be safe, though there are some possibilities for
# roundoff.
#
# """
# print("Setting simulation duration:")
# print(" days=",days)
# print(" delta=",delta)
# print(" seconds=",seconds)
#
# # convert everything to a timedelta -
# if (days is not None) + (delta is not None) + (seconds is not None) != 1:
# raise Exception("Exactly one of days, delta, or seconds must be specified")
# if days is not None:
# delta = datetime.timedelta(days=days)
# elif seconds is not None:
# delta = datetime.timedelta(seconds=seconds)
#
# # assuming that dt is also a multiple of the precision (currently 10ms), this is
# # safe
# delta = dt_round(delta)
# print(" rounded delta = ",delta)
# timestep = dt_round(datetime.timedelta(seconds=self.conf_float('dt')))
# print(" rounded timestep =",timestep)
#
# # now we have a hopefully exact simulation duration in integer days, seconds, microseconds
# # and a similarly exact timestep
# # would like to do this:
# # nsteps = delta / timestep
# # but that's not supported until python 3.3 or so
# def to_quanta(td):
# """ return integer number of time quanta in the time delta object
# """
# us_per_quanta = 1000000 // datenum_precision_per_s
# return (td.days*86400 + td.seconds)*datenum_precision_per_s + \
# int( round( td.microseconds/us_per_quanta) )
# quanta_timestep = to_quanta(timestep)
# quanta_delta = to_quanta(delta)
#
# print(" quanta_timestep=",quanta_timestep)
# print(" quanta_delta=",quanta_delta)
# nsteps = quanta_delta // quanta_timestep
#
# print(" nsteps = ",nsteps)
# # double-check, going back to timedelta objects:
# err = nsteps * timestep - delta
# self.set_value('nsteps',int(nsteps))
# print("Simulation duration requires %i steps (rounding error=%s)"%(self.conf_int('nsteps'),err))
def is_grid_compatible(self,other):
""" Compare two config's, and return False if any parameters which would
affect grid partitioning/celldata/edgedata/etc. are different.
Note that differences in other input files can also cause two grids to be different,
esp. vertspace.dat
"""
# keep all lowercase
keys = ['Nkmax',
'stairstep',
'rstretch',
'CorrectVoronoi',
'VoronoiRatio',
'vertgridcorrect',
'IntDepth',
'pslg',
'points',
'edges',
'cells',
'depth',
# 'vertspace.dat.in' if rstretch==0
'topology.dat',
'edgedata',
'celldata',
'vertspace.dat']
return self.is_equal(other,limit_to_keys=keys)
class SuntansModel(hm.HydroModel):
# Annoying, but suntans doesn't like signed elevations
# this offset will be applied to grid depths and freesurface boundary conditions.
# This is error prone, though, and makes it difficult to "round-trip"
# grid information. In particular, if a new run is created by loading an old
# run, there will be an issue where the grid may get z_offset applied twice.
# This should be reimplemented as a z_datum. So no behind-the-scenes offsets,
# just have a standardized place for saying that my model's z=0 is z_offset
# from z_datum, e.g. z_datum='NAVD88' and z_offset.
# maybe the appropriate thing is a dictionary, mapping datum names to offsets.
# like z_datum['NAVD88']=-5.
z_offset=0.0
ic_ds=None
met_ds=None
# None: not a restart, or
# path to suntans.dat for the run being restarted, or True if this is
# a restart but we don't we have a separate directory for the restart,
# just StartFiles
restart=None
restart_model=None # model instance being restarted
restart_symlink=True # default to symlinking restarts
# for partition, run, etc.
sun_bin_dir=None
mpi_bin_dir=None
# 'auto': the grid and projection information will be used to
# update the coriolis parameter.
# None: leave whatever value is in the template
# <float>: use that as the coriolis parameter
coriolis_f='auto'
# experimental -- not yet working.
# the suntans code does not yet remap edge data from the original
# order to the -g ordering (which is different, even when running
# single-core).
use_edge_depths=False # write depth data per-edge in a separate file.
def __init__(self):
super(SuntansModel,self).__init__()
self.load_template(os.path.join(os.path.dirname(__file__),"data","suntans.dat"))
@property
def time0(self):
self.config['starttime']
dt=datetime.datetime.strptime(self.config['starttime'],
"%Y%m%d.%H%M%S")
return utils.to_dt64(dt)
def create_restart(self,symlink=True):
new_model=self.__class__() # in case of subclassing
# SuntansModel()
new_model.config=self.config.copy()
# things that have to match up, but are not part of the config:
new_model.num_procs=self.num_procs
new_model.restart=self.config_filename
new_model.restart_model=self
new_model.restart_symlink=symlink
# There is some extra machinery in load_grid(...) to get the right cell and
# edge depths -- this call would lose those
# new_model.set_grid(unstructured_grid.UnstructuredGrid.read_suntans(self.run_dir))
# So copy the grid we already have.
# UnstructuredGrid.copy() is naive and doesn't get all the depth fields, so
# here just pass self.grid, even though it may get mutated.
new_model.set_grid(self.grid)
new_model.run_start=self.restartable_time()
return new_model
@classmethod
def run_completed(cls,fn):
"""
fn: path to either folder containing suntans.dat, or path
to suntans.dat itself.
returns: True if the file exists and the folder contains a run which
ran to completion. Otherwise False.
"""
if not os.path.exists(fn):
return False
if os.path.isdir(fn):
fn=os.path.join(fn,"suntans.dat")
if not os.path.exists(fn):
return False
model=cls.load(fn)
if model is None:
return False
return model.is_completed()
def is_completed(self):
step_fn=os.path.join(self.run_dir,self.config['ProgressFile'])
if not os.path.exists(step_fn):
return False
with open(step_fn,'rt') as fp:
progress=fp.read()
return "100% Complete" in progress
def set_grid(self,grid):
"""
read/load grid, check for depth data and edge marks.
This does not apply the z_offset -- that is only
applied during writing out the rundata.
"""
if isinstance(grid,six.string_types):
# step in and load as suntans, rather than generic
grid=unstructured_grid.SuntansGrid(grid)
# depending on the source of the grid, it may need edges flipped
# to be consistent with suntans expectations that nc1 is always into
# the domain, and nc2 may be external
grid.orient_edges()
super(SuntansModel,self).set_grid(grid)
# 2019-05-29: trying to transition to using z for elevation, since
# 'depth' has a positive-down connotation
# make sure we have the fields expected by suntans
if 'z_bed' not in grid.cells.dtype.names:
if 'depth' in grid.cells.dtype.names:
self.log.warning("For now, assuming that cells['depth'] is positive up")
cell_z_bed=grid.cells['depth']
elif 'z_bed' in grid.nodes.dtype.names:
cell_z_bed=grid.interp_node_to_cell(grid.nodes['z_bed'])
# and avoid overlapping names
grid.delete_node_field('z_bed')
elif 'depth' in grid.nodes.dtype.names:
cell_z_bed=grid.interp_node_to_cell(grid.nodes['depth'])
self.log.warning("For now, assuming that nodes['depth'] is positive up")
else:
self.log.warning("No depth information in grid nodes or cells. Creating zero-depth")
cell_z_bed=np.zeros(grid.Ncells(),np.float64)
grid.add_cell_field('z_bed',cell_z_bed)
# with the current suntans version, depths are on cells, but model driver
# code in places wants an edge depth. so copy those here.
e2c=grid.edge_to_cells() # this is assumed in other parts of the code that do not recalculate it.
nc1=e2c[:,0].copy() ; nc2=e2c[:,1].copy()
nc1[nc1<0]=nc2[nc1<0] ; nc2[nc2<0]=nc1[nc2<0]
# edge depth is shallower of neighboring cells
# these depths are still positive up, though.
edge_z_bed=np.maximum(grid.cells['z_bed'][nc1],grid.cells['z_bed'][nc2])
if 'edge_z_bed' in grid.edges.dtype.names:
deep_edges=(grid.edges['edge_z_bed']<edge_z_bed)
if np.any(deep_edges):
self.log.info("%d edges had a specified depth deeper than neighboring cells. Replaced them"%
deep_edges.sum())
grid.edges['edge_z_bed'][deep_edges]=edge_z_bed[deep_edges]
else:
grid.add_edge_field('edge_z_bed',edge_z_bed)
if 'mark' not in grid.edges.dtype.names:
mark=np.zeros( grid.Nedges(), np.int32)
grid.add_edge_field('mark',mark)
self.grid=grid
self.set_default_edge_marks()
def set_default_edge_marks(self):
# update marks to a reasonable starting point
e2c=self.grid.edge_to_cells()
bc_edge=e2c.min(axis=1)<0
mark=self.grid.edges['mark']
mark[mark<0] = 0
mark[ (mark==0) & bc_edge ] = 1
# allow other marks to stay
self.grid.edges['mark'][:]=mark
def edge_depth(self,j,datum=None):
"""
Return the bed elevation for edge j, in meters, positive=up.
Suntans implementation relies on set_grid() having set edge depths
to be the min. of neighboring cells
"""
z=self.grid.edges['edge_z_bed'][j]
if datum is not None:
if datum=='eta0':
z+=self.initial_water_level()
return z
@classmethod
def load(cls,fn,load_grid=True,load_met=False,load_ic=False,load_bc=False):
"""
Open an existing model setup, from path to its suntans.dat
return None if run could not be loaded.
load_met: if true, load an existing Met netcdf file to self.met_ds
load_ic: likewise for initial conditions
load_bc: likewise for boundary conditions
"""
model=cls()
if os.path.isdir(fn):
fn=os.path.join(fn,'suntans.dat')
if not os.path.exists(fn):
return None
model.load_template(fn)
model.set_run_dir(os.path.dirname(fn),mode='existing')
# infer number of processors based on celldata files
# for restarts, this is overridden in infer_restart() by looking
# at the number of restart files, since in some scripts those
# are created earlier, while the celldata files aren't created until
# partition is called.
sub_cells=glob.glob( os.path.join(model.run_dir,'celldata.dat.*') )
if len(sub_cells)>0:
model.num_procs=len(sub_cells)
else:
# probably better to test whether it has even been processed
model.num_procs=1
model.infer_restart()
model.set_times_from_config()
# This will need some tweaking to fail gracefully
if load_grid:
try:
model.load_grid()
except OSError:
# this may be too strict -- a multiproc run could be fine but not
# necessarily have the global grid.
return None
if load_met:
model.load_met_ds()
if load_ic:
model.load_ic_ds()
if load_bc:
model.load_bc_ds()
return model
def load_grid(self):
"""
Set self.grid from existing suntans-format grid in self.run_dir.
"""
g=unstructured_grid.UnstructuredGrid.read_suntans(self.run_dir)
# hacked in support to read cell depths
cell_depth_fn=self.file_path('depth')+"-voro"
if ( ('z_bed' not in g.cells.dtype.names)
and
(os.path.exists(cell_depth_fn)) ):
self.log.debug("Will read cell depths, too")
cell_xyz=np.loadtxt(cell_depth_fn)
assert cell_xyz.shape[0]==g.Ncells(),"%s didn't have the right number of cells (%d vs %d)"%(cell_depth_fn,
cell_xyz.shape[0],
g.Ncells())
# cell centers can be a bit lenient in case there are centroid vs. circumcenter vs nudged
# differences.
if not np.allclose(cell_xyz[:,:2], g.cells_center()):
self.log.warning("%s cell locations don't match grid"%cell_depth_fn)
self.log.warning("Will forge ahead nevertheless")
# on disk these are positive down, but model driver convention is positive up
# (despite being called depth...)
g.add_cell_field('z_bed',-cell_xyz[:,2])
g.add_cell_field('depth',-cell_xyz[:,2]) # will be phased out
# hacked in support to read depth on edges
edge_depth_fn=self.file_path('depth')+"-edge"
if ( ('edge_z_bed' not in g.edges.dtype.names)
and
(os.path.exists(edge_depth_fn)) ):
self.log.debug("Will read edge depths, too")
edge_xyz=np.loadtxt(edge_depth_fn)
assert edge_xyz.shape[0]==g.Nedges(),"%s didn't have the right number of edges (%d vs %d)"%(edge_depth_fn,
edge_xyz.shape[0],
g.Nedges())
assert np.allclose(edge_xyz[:,:2], g.edges_center()),"%s edge locations don't match"%edge_depth_fn
# on disk these are positive down, but model driver convention is positive up
# (despite being called depth...) -- in the process of using edge_z_bed in the driver r
# script to make the sign convention more apparent.
# g.add_edge_field('edge_depth',-edge_xyz[:,2]) # being phased out
g.add_edge_field('edge_z_bed',-edge_xyz[:,2])
self.set_grid(g)
return g
def infer_restart(self):
"""
See if this run is a restart.
Sets self.restart to:
None: not a restart
True: is a restart, but insufficient information to find the parent run
string: path to suntans.dat for the parent run
"""
if self.config['StartFile'] is None:
# Possibly not a valid config file
self.restart=None
return
start_path=os.path.join(self.run_dir,self.config['StartFile']+".0")
if os.path.exists(start_path):
log.debug("Looks like a restart")
self.restart=True
# Get num_procs from the number of restart files.
for proc in range(1024):
fn=os.path.join(self.run_dir,self.config['StartFile']+".%d"%proc)
if not os.path.exists(fn):
break
self.num_procs=proc
log.debug("Restart appears to have %d subdomains"%self.num_procs)
if os.path.islink(start_path):
start_path=os.path.realpath(start_path)
parent_dir=os.path.dirname(start_path)
assert not os.path.samefile(parent_dir,self.run_dir)
parent_sun=os.path.join(parent_dir,"suntans.dat")
if os.path.exists(parent_sun):
log.debug("And the previous suntans.dat: %s"%parent_sun)
self.restart=parent_sun
else:
log.info("Checked for %s but no luck"%parent_sun)
else:
log.info("Restart file %s is not a link"%start_path)
else:
log.debug("Does not look like a restart based on %s"%start_path)
self.restart=None
def chain_restarts(self,count=None,load_grid=False):
"""
return a list of up to count (None: unlimited) Model instances
in forward chronological order of consecutive restarts.
load_grid: defaults to *not* loading the grid of the earlier runs.
The last item is always self.
count: either the count of how many runs to return, or a np.datetime64
such that we'll go back to a run covering that date if possible.
if this is a tuple of datetimes, only return the runs covering that time
range.
"""
runs=[self]
run=self
while 1:
if isinstance(count,np.datetime64):
if runs[0].run_start <=count:
break
elif isinstance(count,tuple):
if runs[0].run_start < count[0]:
break
elif count and len(runs)>=count:
break
run.infer_restart()
if run.restart and run.restart is not True:
run=SuntansModel.load(run.restart,load_grid=load_grid)
runs.insert(0,run)
else:
break
if isinstance(count,tuple):
# Trim runs coming after the requested period
runs=[run for run in runs if run.run_start<count[1]]
if len(runs)==0:
log.warning("chain_restarts wound up with zero runs for count=%s"%str(count))
return runs
def chain_start(self,count=None):
"""
Analog of run_start, but across chained restarts.
count is passed to chain_restarts().
"""
runs=self.chain_restarts(count=count)
return runs[0].run_start
def chain_stop(self,count=None):
"""
Analog of run_stop, but across chained restarts.
Included for completeness, but this is always the same
as self.run_stop (since we don't chain forward in time).
"""
return self.run_stop
def load_template(self,fn):
self.template_fn=fn
self.config=SunConfig(fn)
def set_run_dir(self,path,mode='create'):
assert mode!='clean',"Suntans driver doesn't know what clean is"
return super(SuntansModel,self).set_run_dir(path,mode)
def file_path(self,key,proc=None):
fn=os.path.join(self.run_dir,self.config[key])
if proc is not None:
fn+=".%d"%proc
return fn
@property
def config_filename(self):
return os.path.join(self.run_dir,"suntans.dat")
def write_config(self):
log.info("Writing config to %s"%self.config_filename)
self.config.write_config(self.config_filename)
def write_monitor(self):
if not self.mon_points: return
xys=[ np.array(feat['geom']) for feat in self.mon_points]
valid_xys=[xy
for xy in xys
if self.grid.select_cells_nearest(xy,inside=True) is not None]
np.savetxt( os.path.join(self.run_dir,self.config['DataLocations']),
np.array(valid_xys) )
def write(self):
self.update_config()
self.write_config()
self.write_monitor()
self.write_extra_files()
self.write_forcing()
# Must come after write_forcing() to allow BCs to modify grid
self.write_grid()
# Must come after write_forcing(), to get proper grid and to
# have access to freesurface BCs
if self.restart:
self.log.info("Even though this is a restart, write IC")
# There are times that it is useful to be able to read the IC
# back in, e.g. to set a boundary condition equal to its initial
# condition. For a restart, this would ideally be the same state
# as in the StartFiles. That's going to take some work for
# relatively little gain. So just create the IC as if this was
# not a restart.
self.write_ic()
if self.restart:
self.write_startfiles()
def initialize_initial_condition(self):
"""
Populate self.ic_ds with a baseline initial condition.
This should be called after all boundary conditions are in place.
"""
self.ic_ds=self.zero_initial_condition()
self.set_initial_h_from_bc()
def write_ic(self):
"""
Will have to think about how best to order this -- really need
to set this as a zero earlier on, and then have some known time
for the script to modify it, before finally writing it out here.
"""
# Creating an initial condition netcdf file:
if self.ic_ds is None:
self.initialize_initial_condition()
self.write_ic_ds()
def write_startfiles(self):
src_base=os.path.join(os.path.dirname(self.restart),
self.restart_model.config['StoreFile'])
dst_base=os.path.join(self.run_dir,self.config['StartFile'])
for proc in range(self.num_procs):
src=src_base+".%d"%proc
dst=dst_base+".%d"%proc
self.restart_copier(src,dst)
def copy_ic_to_bc(self,ic_var,bc_var):
"""
Copy IC values to the boundary conditions
Copies data for the given IC variable (e.g. 'salt'), to
open and flow boundaries for bc_var (e.g. 'S').
for flow boundaries, 'boundary_' is prepended to bc_var.
The initial condition is copied into bc_ds for all time steps,
and all layers.
"""
# Open boundaries
for ci,c in enumerate(utils.progress(self.bc_ds.cellp.values,msg="IC=>Open BCs")):
ic_values = self.ic_ds[ic_var].values[0,:,c]
self.bc_ds[bc_var].isel(Ntype3=ci).values[:,:]=ic_values[None,:]
# Flow boundaries
for ei,e in enumerate(utils.progress(self.bc_ds.edgep.values,msg="IC=>Flow BCs")):
c=self.grid.edges['cells'][e,0]
assert c>=0,"Is this edge flipped"
ic_values=self.ic_ds[ic_var].values[0,:,c]
self.bc_ds["boundary_"+bc_var].isel(Ntype2=ei).values[:,:]=ic_values[None,:]
def write_ic_ds(self):
self.ic_ds.to_netcdf( os.path.join(self.run_dir,self.config['initialNCfile']) )
def load_ic_ds(self):
fn=os.path.join(self.run_dir,self.config['initialNCfile'])
if not os.path.exists(fn): return False
self.ic_ds=xr.open_dataset(fn)
def set_initial_h_from_bc(self):
"""
prereq: self.bc_ds has been set.
"""
if len(self.bc_ds.Ntype3)==0:
log.warning("Cannot set initial h from BC because there are no type 3 edges")
return
time_i=np.searchsorted(self.bc_ds.time.values,self.run_start)
# both bc_ds and ic_ds should already incorporate the depth offset, so
# no further adjustment here.
h=self.bc_ds.h.isel(Nt=time_i).mean().values
# this is positive down, already shifted, clipped.
#cell_depths=self.ic_ds['dv'].values
# This led to drying issues in 3D, and ultimately was not the fix
# for issues in 2D
#self.ic_ds.eta.values[:]=np.maximum(h,-cell_depths)
self.ic_ds.eta.values[...]=h
log.info("Setting initial eta from BCs, value=max(z_bed,%.4f) (including z_offset of %.2f)"%(h,self.z_offset))
def write_forcing(self,overwrite=True):
# these map to lists of BCs, in case there are BC with mode='add'
# map edge to BC data
self.bc_type2=defaultdict(lambda: defaultdict(list)) # [<edge index>][<variable>]=>[DataArray,...]
# map cell to BC data
self.bc_type3=defaultdict(lambda: defaultdict(list)) # [<cell index>][<variable>]=>[DataArray,...]
# Flow BCs are handled specially since they apply across a group of edges
# Each participating edge should have an entry in bc_type2,
# [<edge index>]["Q"]=>"segment_name"
# and a corresponding entry in here:
self.bc_type2_segments=defaultdict(lambda: defaultdict(list)) # [<segment name>][<variable>]=>[DataArray,...]
# point sources.
# indexed by a tuple of (cell,k)
# [(cell,k][<variable>] => [DataArray]
self.bc_point_sources=defaultdict(lambda: defaultdict(list))
super(SuntansModel,self).write_forcing()
# Get a time series that's the superset of all given timeseries
all_times=[]
# edge, cells, groups of edges
for bc_typ in [self.bc_type2,self.bc_type3,self.bc_type2_segments]:
for bc in bc_typ.values(): # each edge idx/cell idx/segment name
for vlist in bc.values(): # each variable on that edge/cell/segment
for v in vlist: #list of BCs for this variable on this element
if isinstance(v,six.string_types):
# type2 edges which reference a segment have no
# time series of their own.
continue
if 'time' in v.dims:
all_times.append( v['time'].values )
if all_times:
common_time=np.unique(np.concatenate(all_times))
else:
# no boundary conditions have times, so fabricate.
common_time=np.array( [self.run_start,self.run_stop] )
# Make sure that brackets the run:
pad=np.timedelta64(1,'D')
if common_time[0]>=self.run_start:
common_time=np.concatenate(( [self.run_start-pad],
common_time ))
# make sure there are *two* times beyond the end for quadratic
# interpolation
while len(common_time)<3 or common_time[-2]<=self.run_stop:
if common_time[-1]<self.run_stop+pad:
new_time=self.run_stop+pad
else:
new_time=common_time[-1]+pad
common_time=np.concatenate((common_time,[new_time]))
# SUNTANS applies quadratic interpolation in time, so it requires at least
# 3 time values - seems that it wants one time before and two times after
# the current time.
assert len(common_time)>2
self.bc_time=common_time
self.bc_ds=self.compile_bcs()
self.write_bc_ds()
if self.met_ds is None:
self.met_ds=self.zero_met()
self.write_met_ds()
def ds_time_units(self):
"""
setting for how to write time to netcdf
specifically as suntans expects. pass as
...
encoding=dict(time={'units':self.ds_time_units()}),
...
in xarray dataset to_netcdf(..)
"""
basetime=self.config['basetime']
assert len(basetime)==15 # YYYYMMDD.hhmmss
time_units="seconds since %s-%s-%s %s:%s:%s"%(basetime[0:4],
basetime[4:6],
basetime[6:8],
basetime[9:11],
basetime[11:13],
basetime[13:15])
return time_units
def write_bc_ds(self):
self.bc_ds.to_netcdf( os.path.join(self.run_dir,
self.config['netcdfBdyFile']),
encoding=dict(time={'units':self.ds_time_units()}))
def load_bc_ds(self):
fn=os.path.join(self.run_dir,
self.config['netcdfBdyFile'])
if not os.path.exists(fn): return False
self.bc_ds=xr.open_dataset(fn)
return self.bc_ds
def write_met_ds(self):
fn=os.path.join(self.run_dir,
self.config['metfile'])
if os.path.exists(fn):
log.info("Will replace %s"%fn)
os.unlink(fn)
else:
log.debug("Writing met ds to %s"%fn)
log.debug(str(self.met_ds))
self.met_ds.to_netcdf( fn,
encoding=dict(nt={'units':self.ds_time_units()},
Time={'units':self.ds_time_units()}) )
def load_met_ds(self):
fn=os.path.join(self.run_dir,
self.config['metfile'])
if not os.path.exists(fn): return False
self.met_ds=xr.open_dataset(fn)
def layer_data(self,with_offset=False,edge_index=None,cell_index=None,z_bed=None):
"""
Returns layer data without z_offset applied, and
positive up.
with no additional arguments, returns global information. edge_index or
cell_index will use a z_bed based on that element. z_bed is used to clip
z layers. z_bed should be a positive-up quantity. A specified z_bed
takes precendece over edge_index or cell_index.
Returns a xr.Dataset
with z_min, z_max, Nk, z_interface, z_mid.
z_interface and z_mid are ordered surface to bed.
if with_offset is True, the z_offset is included, which yields
more accurate (i.e. similar to suntans) layers when there is stretching
"""
if z_bed is None:
if edge_index is not None:
z_bed=self.grid.edge_depths()[edge_index]
elif cell_index is not None:
z_bed=self.grid.cell_depths()[cell_index]
Nk=int(self.config['Nkmax'])
z_min=self.grid.cells['z_bed'].min() # bed
z_max=self.grid.cells['z_bed'].max() # surface
r=float(self.config['rstretch'])
if with_offset:
z_min-=self.z_offset
z_max=0
depth=-z_min # positive:down
dzs=np.zeros(Nk, np.float64)
if r>1.0:
dzs[0]=depth*(r-1)/(r**Nk-1)
for k in range(1,Nk):
dzs[k]=r*dzs[k-1]
else:
dzs[:]=depth/float(Nk)
z_interface=np.concatenate( ( [z_max],
z_max-np.cumsum(dzs) ) )
z_mid=0.5*(z_interface[:-1]+z_interface[1:])
ds=xr.Dataset()
ds['z_min']=(),z_min
ds['z_max']=(),z_max
ds['z_interface']=('Nkp1',),z_interface
ds['z_mid']=('Nk',),z_mid
for v in ['z_min','z_max','z_interface','z_mid']:
ds[v].attrs['positive']='up'
return ds
def compile_bcs(self):
"""
Postprocess the information from write_forcing()
to create the BC netcdf dataset.
Note that bc_ds includes the z_offset.
"""
ds=xr.Dataset()
layers=self.layer_data()
Nk=layers.dims['Nk']
ds['z']=('Nk',),-(layers.z_mid.values + self.z_offset)
# suntans assumes that this dimension is Nt, not time
Nt=len(self.bc_time)
ds['time']=('Nt',),self.bc_time
# Scalars will introduce type3 and type2 because they may not know
# what type of flow forcing is there. Here we skim out scalars that
# do not have an associated h (type3) or flow (type2) boundary
# the list(...keys()) part is to make a copy, so the del's
# don't upset the iteration
for cell in list(self.bc_type3.keys()):
if 'h' not in self.bc_type3[cell]:
del self.bc_type3[cell]
# 'u' 'v' and 'Q' for type2
for edge in list(self.bc_type2.keys()):
if not ( ('u' in self.bc_type2[edge]) or
('v' in self.bc_type2[edge]) or
('Q' in self.bc_type2[edge])):
del self.bc_type2[edge]
Ntype3=len(self.bc_type3)
ds['cellp']=('Ntype3',),np.zeros(Ntype3,np.int32)-1
ds['xv']=('Ntype3',),np.zeros(Ntype3,np.float64)
ds['yv']=('Ntype3',),np.zeros(Ntype3,np.float64)
# the actual data variables for type 3:
ds['uc']=('Nt','Nk','Ntype3',),np.zeros((Nt,Nk,Ntype3),np.float64)
ds['vc']=('Nt','Nk','Ntype3',),np.zeros((Nt,Nk,Ntype3),np.float64)
ds['wc']=('Nt','Nk','Ntype3',),np.zeros((Nt,Nk,Ntype3),np.float64)
ds['T']=('Nt','Nk','Ntype3',),20*np.ones((Nt,Nk,Ntype3),np.float64)
ds['S']=('Nt','Nk','Ntype3',),np.zeros((Nt,Nk,Ntype3),np.float64)
ds['h']=('Nt','Ntype3'),np.zeros( (Nt, Ntype3), np.float64 )
def interp_time(da):
if 'time' not in da.dims: # constant value
# this should do the right thing for both scalar and vector
# values
return da.values * np.ones( (Nt,)+da.values.shape )
if da.ndim==2:
assert da.dims[0]=='time'
# recursively call per-layer, which is assumed to be the second
# dimension
profiles=[ interp_time(da[:,i]) for i in range(da.shape[1]) ]
return np.vstack(profiles).T
return np.interp( utils.to_dnum(ds.time.values),
utils.to_dnum(da.time.values), da.values )
import time
elapsed=[0.0]
def combine_items(values,bc_items,offset=0.0):
base_item=None
# include the last mode='overwrite' bc, and sum the mode='add'
# bcs.
values[:]=offset
# aside from Q and h, other variables are 3D, which means
# that if the data comes back 2D, pad out the layer dimension
def pad_dims(data):
if values.ndim==2 and data.ndim==1:
return data[:,None] # broadcastable vertical dimension
else:
return data
for bc_item in bc_items:
if bc_item.mode=='add':
t0=time.time()
values[:] += pad_dims(interp_time(bc_item))
elapsed[0]+=time.time()-t0
else:
base_item=bc_item
if base_item is None:
self.log.warning("BC for cell %d has no overwrite items"%type3_cell)
else:
t0=time.time()
values[:] += pad_dims(interp_time(base_item))
elapsed[0]+=time.time()-t0
cc=self.grid.cells_center()
for type3_i,type3_cell in enumerate(self.bc_type3): # each edge/cell
ds['cellp'].values[type3_i]=type3_cell
ds['xv'].values[type3_i]=cc[type3_cell,0]
ds['yv'].values[type3_i]=cc[type3_cell,1]
bc=self.bc_type3[type3_cell]
for v in bc.keys(): # each variable on that edge/cell
if v=='h':
offset=self.z_offset
else:
offset=0
# will set bc values in place
combine_items(ds[v].isel(Ntype3=type3_i).values,
bc[v],
offset=offset)
Ntype2=len(self.bc_type2)
Nseg=len(self.bc_type2_segments)
ds['edgep']=('Ntype2',),np.zeros(Ntype2,np.int32)-1
ds['xe']=('Ntype2',),np.zeros(Ntype2,np.float64)
ds['ye']=('Ntype2',),np.zeros(Ntype2,np.float64)
ds['boundary_h']=('Nt','Ntype2'),np.zeros( (Nt, Ntype2), np.float64) + self.z_offset
ds['boundary_u']=('Nt','Nk','Ntype2'),np.zeros( (Nt, Nk, Ntype2), np.float64)
ds['boundary_v']=('Nt','Nk','Ntype2'),np.zeros( (Nt, Nk, Ntype2), np.float64)
ds['boundary_w']=('Nt','Nk','Ntype2'),np.zeros( (Nt, Nk, Ntype2), np.float64)
ds['boundary_T']=('Nt','Nk','Ntype2'),np.zeros( (Nt, Nk, Ntype2), np.float64)
ds['boundary_S']=('Nt','Nk','Ntype2'),np.zeros( (Nt, Nk, Ntype2), np.float64)
ds['boundary_Q']=('Nt','Nseg'),np.zeros( (Nt, Nseg), np.float64)
# Iterate over segments first, so that edges below can grab the correct
# index.
segment_names=list(self.bc_type2_segments.keys()) # this establishes the order of the segments
# make this distinct from 0 or 1 to aid debugging
segment_ids=100 + np.arange(len(segment_names))
ds['seg_name']=('Nseg',),segment_names # not read by suntans, but maybe helps debugging
ds['segedgep']=('Ntype2',),np.zeros(Ntype2,np.int32)-1
ds['segp']=('Nseg',),segment_ids # np.arange(Nseg,dtype=np.int32)
for seg_i,seg_name in enumerate(segment_names):
bc=self.bc_type2_segments[seg_name]
for v in bc.keys(): # only Q, but stick to the same pattern
combine_items(ds['boundary_'+v].isel(Nseg=seg_i).values,
bc[v])
ec=self.grid.edges_center()
for type2_i,type2_edge in enumerate(self.bc_type2): # each edge
ds['edgep'].values[type2_i]=type2_edge
ds['xe'].values[type2_i]=ec[type2_edge,0]
ds['ye'].values[type2_i]=ec[type2_edge,1]
bc=self.bc_type2[type2_edge]
for v in bc.keys(): # each variable on that edge/cell
if v=='h':
offset=self.z_offset
else:
offset=0.0
if v!='Q':
combine_items(ds['boundary_'+v].isel(Ntype2=type2_i).values,
bc[v],offset)
else:
seg_name=bc[v]
# too lazy to work through the right way to deal with combined
# bcs for Q right now, so just warn the user that it may be
# a problem.
if len(seg_name)!=1:
log.warning("Only tested with a single value, but got %s"%str(seg_name))
seg_name=seg_name[0]
seg_idx=segment_ids[segment_names.index(seg_name)]
ds['segedgep'].values[type2_i] = seg_idx
# -- Set grid marks --
for c in ds.cellp.values:
assert c>=0
for j in self.grid.cell_to_edges(c):
j_cells=self.grid.edge_to_cells(j)
if j_cells.min()<0:# boundary
self.grid.edges['mark'][j]=3 # set to type 3
for j in ds.edgep.values:
assert j>=0,"Some edge pointers did not get set"
self.grid.edges['mark'][j]=2
# --- Point source code ---
Npoint=len(self.bc_point_sources)
ds['point_cell']=('Npoint',), np.zeros(Npoint,np.int32) # point_cell
ds['point_layer']=('Npoint',), np.zeros(Npoint,np.int32) # point_layer
ds['point_Q']=('Nt','Npoint'), | np.zeros( (Nt,Npoint), np.float64) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
import sympy
from sympy import *
import sys
sys.path.append(r'C:\Users\elira\Google Drive\butools2\Python')
sys.path.append('/home/d/dkrass/eliransc/Python')
from tqdm import tqdm
from butools.ph import *
from butools.map import *
from butools.queues import *
from butools.mam import *
from butools.dph import *
from scipy.linalg import expm, sinm, cosm
from sympy import *
from sympy import Symbol
from sympy.physics.quantum import TensorProduct
import pickle as pkl
import pandas as pd
from sympy import diff, sin, exp
from numpy.linalg import matrix_power
def busy(s, lam2, mu2):
return ((lam2 + mu2 + s) - ((lam2 + mu2 + s) ** 2 - 4 * lam2 * mu2) ** 0.5) / (2 * lam2)
def ser_lap(s, mu):
return mu / (s + mu)
def hyper(s, lam1, lam2, mu1, mu2):
return ser_lap(s, mu1) * lam1 / (lam1 + lam2) + ser_lap(s, mu2) * lam2 / (lam1 + lam2)
def rho(lam1, lam2, mu1, mu2):
return (lam1 + lam2) * ((lam1 / ((lam1 + lam2) * mu1)) + (lam2 / ((lam1 + lam2) * mu2)))
def w_lap(s, lam1, lam2, mu1, mu2):
return ((1 - rho(lam1, lam2, mu1, mu2)) * s) / (s - (lam1 + lam2) * (1 - hyper(s, lam1, lam2, mu1, mu2)))
def F(s, lam1, lam2, mu1, mu2):
return w_lap(s, lam1, lam2, mu1, mu2) * ser_lap(s, mu1)
def A(s, lam1, lam2, mu2):
return (lam1 / (lam1 + lam2 - lam2 * (ser_lap(s, mu2))))
def beta(s, lam1, lam2, mu1, mu2):
return (lam1 / (lam1 + lam2 + s) + ((A(s, lam1, lam2, mu2) * lam2) / (lam1 + lam2 + s)) * (
ser_lap(s, mu2) - busy(s + lam1, lam2, mu2))) / (
1 - ((lam2 * busy(s + lam1, lam2, mu2)) / (lam1 + lam2 + s)))
def tau(s, lam1, lam2, mu1, mu2):
return ser_lap(s, mu1) * (A(s, lam1, lam2, mu2) * (
1 - F(lam1 + lam2 - lam2 * busy(s + lam1, lam2, mu2), lam1, lam2, mu1, mu2)) + F(
lam1 + lam2 - lam2 * busy(s + lam1, lam2, mu2), lam1, lam2, mu1, mu2) * beta(s, lam1, lam2, mu1, mu2))
def get_var(lam1, lam2, mu1, mu2):
s = Symbol('s')
y = tau(s, lam1, lam2, mu1, mu2)
dx = diff(y, s)
dxdx = diff(dx, s)
return dxdx.subs(s, 0) - (dx.subs(s, 0)) ** 2
def get_nth_moment(lam1, lam2, mu1, mu2, n):
s = Symbol('s')
y = tau(s, lam1, lam2, mu1, mu2)
for i in range(n):
if i == 0:
dx = diff(y, s)
else:
dx = diff(dx, s)
return dx.subs(s, 0)
def get_first_n_moments(parameters, n=5):
lam1, lam2, mu1, mu2 = parameters
moments = []
for n in range(1, n + 1):
moments.append(get_nth_moment(lam1, lam2, mu1, mu2, n) * (-1) ** n)
moments = np.array([moments], dtype='float')
return moments
def kroneker_sum(G, H):
size_g = G.shape[0]
size_h = H.shape[0]
return np.kron(G, np.identity(size_h)) + np.kron(np.identity(size_g), H)
def give_boundry_probs(R, A0, A1, A, B, C0, ro):
p00, p01, p02, p100, p110, p120, p101, p111, p121 = symbols('p00 p01 p02 p100 p110 p120 p101 p111 p121')
eqns = [np.dot(np.array([p00, p01, p02]), np.ones((A0.shape[0]))) - (1 - ro)]
eq3 = np.dot(np.array([p00, p01, p02]), A0) + np.dot(np.array([p100, p110, p120, p101, p111, p121]), A1)
eq1 = np.dot(np.array([p00, p01, p02]), C0)
eq2 = np.dot(np.array([p100, p110, p120, p101, p111, p121]), B + np.dot(R, A))
for eq_ind in range(B.shape[0]):
eqns.append(eq1[0, eq_ind] + eq2[0, eq_ind])
for eq_ind in range(A0.shape[0]):
eqns.append(eq3[0, eq_ind])
A_mat, b = linear_eq_to_matrix(eqns[:-1], [p00, p01, p02, p100, p110, p120, p101, p111, p121])
return A_mat, b
def get_expect_gph_system(R, p1_arr, xm_max=5000):
expected = 0
for pi_val in range(1, xm_max):
ui = p1_arr.reshape((1, R.shape[0]))
Ri = np.linalg.matrix_power(R, pi_val - 1)
expected += np.dot(np.dot(ui, Ri), np.ones((R.shape[0], 1))) * pi_val
return expected[0, 0]
def get_expect_gph_system(R, p1_arr, xm_max=5000):
expected = 0
for pi_val in range(1, xm_max):
ui = p1_arr.reshape((1, R.shape[0]))
Ri = np.linalg.matrix_power(R, pi_val - 1)
expected += np.dot(np.dot(ui, Ri), np.ones((R.shape[0], 1))) * pi_val
return expected[0, 0]
def get_A0(Ts):
krom_sum = kroneker_sum(Ts[0], Ts[1])
if len(Ts) > 2:
for T_ind in range(2, len(Ts)):
krom_sum = kroneker_sum(krom_sum, Ts[T_ind])
return krom_sum
def get_C_first(T0s, Ts, s):
krom_sum = kroneker_sum(T0s[0], T0s[1])
if len(Ts) > 2:
for T_ind in range(2, len(Ts)):
krom_sum = kroneker_sum(krom_sum, T0s[T_ind])
return krom_sum
def get_B(Ts, s):
krom_sum = kroneker_sum(Ts[0], Ts[1])
if len(Ts) > 2:
for T_ind in range(2, len(Ts)):
krom_sum = kroneker_sum(krom_sum, Ts[T_ind])
return kroneker_sum(krom_sum, s)
def get_A(Ts, new_beta, s0):
kron_sum = kroneker_sum(np.zeros(Ts[0].shape[0]), np.zeros(Ts[1].shape[0]))
if len(Ts) > 2:
for T_ind in range(2, len(Ts)):
kron_sum = kroneker_sum(kron_sum, np.zeros(Ts[T_ind].shape[0]))
kron_sum = kroneker_sum(kron_sum, np.dot(s0, new_beta))
return kron_sum
def compute_s_beta(r, mu, num_stations=2):
s_ = np.array([])
total_arrivals_to_station = np.sum(r[:, station_ind]) + np.sum(r[station_ind, :]) - np.sum(
r[station_ind, station_ind])
beta = np.array([])
for stream_ind in range(r.shape[0]):
if r[station_ind, stream_ind] > 0:
beta = np.append(beta, r[station_ind, stream_ind] / total_arrivals_to_station)
s_ = np.append(s_, -mu[station_ind, stream_ind])
for out_station in range(num_stations):
if out_station != station_ind:
if r[out_station, station_ind] > 0:
beta = np.append(beta, r[out_station, station_ind] / total_arrivals_to_station)
s_ = np.append(s_, -mu[station_ind, station_ind])
new_beta = np.array([])
new_s_ = np.unique(s_)
for val in new_s_:
new_beta = np.append(new_beta, np.sum(beta[np.argwhere(s_ == val)]))
new_beta = new_beta.reshape((1, new_beta.shape[0]))
s = np.identity(new_s_.shape[0]) * new_s_
return s, new_beta, new_s_
def compute_curr_t(curr_ind, r, mu):
r_mismatched = np.sum(r[curr_ind, :]) - r[curr_ind, curr_ind]
r_matched = r[curr_ind, curr_ind]
mu_mismatched = np.mean(np.delete(mu[curr_ind, :], curr_ind, 0))
mu_matched = mu[curr_ind, curr_ind]
parameters = (r_mismatched, r_matched, mu_mismatched, mu_matched)
moments = get_first_n_moments(parameters)
return moments
def get_Ts_alphas(r, mu, station_ind):
alphas = []
Ts = []
T0s = []
for curr_ind in range(r.shape[0]):
if curr_ind != station_ind:
mome = compute_curr_t(curr_ind, r, mu)
curr_alpha, curr_T = PH3From5Moments(mome[0])
alphas.append(curr_alpha)
Ts.append(curr_T)
T0s.append(-np.dot(np.dot(curr_T, np.ones((curr_T.shape[0], 1))), curr_alpha))
for stream_ind in range(r[station_ind, :].shape[0]):
Ts.append(np.array(-r[station_ind, stream_ind]).reshape((1, 1)))
alphas.append(1.)
T0s.append(-np.dot(np.dot(Ts[-1], np.ones(1)), alphas[-1]))
return Ts, T0s, alphas
def total_arrivals_to_station(r):
return np.sum(r[:, station_ind]) + np.sum(r[station_ind, :]) - np.sum(r[station_ind, station_ind])
def get_ro(r, mu, new_beta, new_s_):
return np.sum(new_beta * total_arrivals_to_station(r) * (-1 / new_s_))
def get_ro_2(lam_0, lam_1, new_beta, s0):
return (lam_0 + lam_1) * np.dot(new_beta, 1 / s0)
from numpy.linalg import matrix_power
def get_bound_steady_state(R, A0, A1, AA, B, C0, ro):
u0, u10, u11 = symbols('u0 u10 u11')
eqns = [u0 - (1 - ro[0][0])]
for ind in range(2):
eqns.append(np.dot(u0, C0)[0][ind] + np.dot(np.array([u10, u11]), B)[ind] +
np.dot(np.dot(np.array([u10, u11]), R), AA)[0][0, ind])
A_mat, b = linear_eq_to_matrix(eqns, [u0, u10, u11])
u0, u10, u11 = np.linalg.solve(np.array(A_mat, dtype=np.float), np.array(b, dtype=np.float))
return u0[0], u10[0], u11[0]
def get_Avg_system(R, u10, u11):
p1 = np.array([u10, u11])
total_avg = 0
for ind in range(1, 500):
total_avg += ind * np.sum(np.dot(p1, matrix_power(R, ind - 1)))
return total_avg
def get_steady(lam_0, lam_1, mu_0, mu_1):
T0 = np.array([-lam_0])
T1 = np.array([-lam_1])
Ts = [T0, T1]
T00 = np.array([-np.dot(T0, np.ones(1))])
T10 = np.array([-np.dot(T1, np.ones(1))])
T0s = [T00, T10]
alphas = [np.array(1.), np.array(1.), ]
new_beta = np.array([lam_0 / (lam_0 + lam_1), lam_1 / (lam_0 + lam_1)]).reshape(1, 2)
s = np.array([[-mu_0, 0], [0, -mu_1]])
s0 = -np.dot(s, np.ones((s.shape[0], 1)))
s0 = -np.dot(s, np.ones((s.shape[0], 1)))
A0 = get_A0(Ts)
A1 = np.kron(np.identity(A0.shape[0]), s0)
AA = get_A(Ts, new_beta, s0)
B = get_B(Ts, s)
C = kroneker_sum(get_C_first(T0s, Ts, s), np.zeros(s.shape))
C0 = np.kron(get_C_first(T0s , Ts, s), new_beta)
R = QBDFundamentalMatrices(AA, B, C, "R")
ro = get_ro_2(lam_0, lam_1, new_beta, s0)
u0, u10, u11 = get_bound_steady_state(R, A0, A1, AA, B, C0, ro)
u1 = u10 + u11
return u0, u10, u11, R
def geometric_pdf(lam0,lam1,n):
p = lam1/(lam1+lam0)
return p*((1-p)**(n))
def get_steady_for_given_v(u0, u10, u11, R, v):
steady = [u0, u10+u11]
for steady_prob in range(2, v+2):
steady.append(np.sum(np.dot(np.array([u10, u11]), matrix_power(R, steady_prob-1))))
steady = np.array(steady)
steady = np.append(steady, 1-np.sum(steady))
return steady
def create_ph_matrix_for_each_case(event_list, lam_0, lam_1, mu_0, mu_1):
size, size_arr = get_matrix_size(event_list)
s = np.zeros((size, size))
a = | np.zeros(size) | numpy.zeros |
import matplotlib.pyplot as plt
import pydicom
import numpy as np
from skimage.measure import label
import cv2 as cv
from scipy.signal import argrelextrema
from scipy import ndimage
import cv2
try:
from utils.LUT_table_codes import extract_parameters, get_name_from_df
except:
from LUT_table_codes import extract_parameters, get_name_from_df
import pandas as pd
import matplotlib as mpl
mpl.rcParams['figure.dpi']= 72
def getLargestCC(segmentation):
'''
Parameters
----------
segmentation: binary image
Returns
-------
largestCC: Largest connected component
'''
labels = label(segmentation)
largestCC = labels == np.argmax(np.bincount(labels.flat))
return largestCC
def fillhole(input_image):
'''
input gray binary image get the filled image by floodfill method
Note: only holes surrounded in the connected regions will be filled.
:param input_image: binary image
:return: filled image
'''
im_flood_fill = input_image.copy()
h, w = input_image.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)
im_flood_fill = im_flood_fill.astype("uint8")
cv.floodFill(im_flood_fill, mask, (0, 0), 255)
im_flood_fill_inv = cv.bitwise_not(im_flood_fill)
img_out = input_image | im_flood_fill_inv
return img_out
def rgb2gray(rgb):
'''
Parameters
----------
rgb : RGB - image
Returns
-------
gray : Grayscale image
'''
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
def crop_US_im(im, crop2half=True):
'''
This function crops the ultrasound image to the image content ie. extracts the outer regions of the US dicom image.
Parameters
----------
crop2half: flag for additional half row crop
im : Ultrasound image in RGB or grayscale
Returns
-------
im_crop : Cropped image to analysis content
'''
if len(im.shape) == 3: #Change to grayscale
im = rgb2gray(im)
#--- Pre-process ---
BW = im > 0 #Threshold image to find the largest element.
label_im, nb_labels = ndimage.label(BW) #label components
#Check the upper half of image:
sizes = ndimage.sum(BW[0:int(BW.shape[0]/2),:], label_im[0:int(BW.shape[0]/2),:], range(nb_labels + 1))
loc = np.argmax(sizes)
if (loc==0).all(): #if nothign is foudn from the upper image then assing loc to 1
loc=1
BW = label_im == loc
## Sometimes the upper border may be the largest element which is uncorrect
#--> Check if that was selected and correct:
vals = np.where(BW==1) #locations for largest elements
y_vals=vals[0]
if (y_vals == 10).any(): #Tent pixel is still image header -->border is the largest component so the next biggest component is the actual image regions
sizes[loc]=0
loc = np.argmax(sizes) #Find next largest component
BW = label_im==loc
L = BW.astype(float)
# Crop in row direction to content:
vals = np.argwhere(L==1)
x = vals[:,0]
x_min = np.min(x)
x_max = L.shape[0] #np.max(x)
im_crop = im[x_min:x_max,:]
BW = BW[x_min:x_max,:]
#Crop in column direction to content:
BW = BW[0:100,:] #take first 100 rows to avoid the colorbar
L = BW.astype(float)
vals = np.argwhere(L==1)
y = vals[:,1]
y_min = np.min(y) #find the locations
y_max = np.max(y)
im_crop = im[x_min:x_max,y_min:y_max] #cropped image
if crop2half==True: #Reverberations are present in the upper half of the ultrasound image
#Vertical crop to half:
x = np.round(im_crop.shape[0]/2)
im_crop = im_crop[0 : x.astype(int), :]
return im_crop
def smooth(y, box_pts):
'''
smooths vector y with box convolution length box_pts
Parameters
----------
y : vector profile
box_pts : box vector length
Returns
-------
y_smooth : smoothed vector
'''
box = np.ones(box_pts)/box_pts
y_smooth = | np.convolve(y, box, mode='same') | numpy.convolve |
import warnings
warnings.simplefilter('ignore', category=[DeprecationWarning, FutureWarning])
import numpy as np
import time
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel, ConstantKernel as C
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from paper_egp.utils import plot_gp, r_assessment
from paper_egp.egp import NIGP
from scipy import interpolate
import matplotlib.pyplot as plt
try:
from gp_extras import HeteroscedasticKernel
from sklearn.cluster import KMeans
extras_install = True
except ImportError:
print("GP Extras file not found. Won't do Example")
extras_install = False
class Example1D(object):
def __init__(self, func=1, x_cov=0.3):
self.func = func
self.x_cov = x_cov
self.models = None
self.data = None
self.n_restarts = 10
self.models_fitted = True
self.empirical_variance_fitted = True
self.average_scores_fitted = None
self.fig_save_1d = "/figures/experiment_1d"
self.fig_emp_error = "/home/emmanuel/projects/error_propagation/figures/paper/experiment_1d/"
def get_data(self, func=None, x_error=None):
if func is None:
func = self.func
if x_error is None:
x_error = self.x_cov
X, y, error_params = example_error_1d(func, x_error)
self.X = X
self.y = y
self.error_params = error_params
self.x_cov = error_params['x']
self.sigma_y = error_params['y']
self.f = error_params['f']
self.data = True
return self
def get_gp_models(self):
if self.data is not True:
self.get_data()
self.models = get_models(self.X['train'], self.y['train'], x_cov=self.x_cov)
return self
def fit_gps(self):
if self.models is None:
self.get_gp_models()
df = pd.DataFrame(columns=['model', 'mae', 'mse', 'rmse', 'r2'])
for imodel in self.models.keys():
# Make Predictions
y_pred = self.models[imodel].predict(self.X['test'])
# Get Error Stats
mae, mse, rmse, r2 = r_assessment(y_pred, self.y['test'], verbose=0)
df = df.append({
'model': imodel,
'mae': mae,
'mse': mse,
'rmse': rmse,
'r2': r2
}, ignore_index=True)
self.results = df
self.models = self.models
self.models_fitted = True
return self
def show_gp_fit(self, show=True):
if self.models_fitted is not True:
self.fit_gps()
for imodel in self.models.keys(): # Plot
# Get plot data
mean, std = self.models[imodel].predict(self.X['plot'], return_std=True)
save_name = self.fig_save_1d + 'gp_' + imodel + '.png'
plot_gp(self.X['plot'], mean,
std=std, xtrain=self.X['train'],
ytrain=self.y['train'],
save_name=save_name)
return self
def get_empirical_variance(self, n_points=1000, n_trials=100):
if self.models_fitted is not True:
self.fit_gps()
rng = np.random.RandomState(None)
#
mae_score = {ikey: list() for ikey in self.models.keys()}
mse_score = {ikey: list() for ikey in self.models.keys()}
abs_error = {ikey: list() for ikey in self.models.keys()}
squared_error = {ikey: list() for ikey in self.models.keys()}
x = np.linspace(self.X['plot'].min(), self.X['plot'].max(), n_points)
# Testing set (noise-less)
ytest = self.f(x)
ytest += self.sigma_y * rng.randn(n_points)
ytest = ytest.reshape(-1, 1)
# loop through trials
for itrial in range(n_trials):
if itrial % 10 == 0:
print('Trial: {}'.format(itrial + 1))
# Generate x samples with random error
xtest = x + self.x_cov * rng.randn(n_points)
xtest = xtest.reshape(-1, 1)
# Loop through model
for imodel in self.models.keys():
mean = self.models[imodel].predict(xtest)
abs_error[imodel].append(np.abs(mean.squeeze() - ytest.squeeze()))
squared_error[imodel].append((mean.squeeze() - ytest.squeeze())**2)
mae_score[imodel].append(
mean_absolute_error(mean.squeeze(), ytest.squeeze()))
mse_score[imodel].append(
mean_squared_error(mean.squeeze(), ytest.squeeze()))
# Convert to arrays
for imodel in self.models.keys():
abs_error[imodel] = np.array(abs_error[imodel])
squared_error[imodel] = np.array(squared_error[imodel])
mae_score[imodel] = np.array(mae_score[imodel])
mse_score[imodel] = np.array(mse_score[imodel])
self.abs_error = abs_error
self.squared_error = squared_error
self.mae_score = mae_score
self.mse_score = mse_score
self.empirical_variance_fitted = True
return self
def get_average_empirical(self):
if self.empirical_variance_fitted is not True:
self.get_empirical_variance()
avg_abs_error = dict()
avg_squared_error = dict()
avg_mae_score = dict()
avg_mse_score = dict()
for imodel in self.models.keys():
avg_abs_error[imodel] = np.mean(
np.array(self.abs_error[imodel]).squeeze(), axis=0)
avg_squared_error[imodel] = np.mean(
np.array(self.squared_error[imodel]).squeeze(), axis=0)
avg_mae_score[imodel] = np.mean(np.array(self.mae_score[imodel]))
avg_mse_score[imodel] = np.mean( | np.array(self.mse_score[imodel]) | numpy.array |
import matplotlib.pyplot as plt
import numpy as np
import param_gedi as param
import imageio
import os
class Plotty:
def __init__(self, model_timestamp):
self.p = param.Param()
self.model_timestamp = model_timestamp
def show_batch(self, image_batch, label_batch):
"""Vgg montage"""
plt.figure(figsize=(4, 4))
for n in range(self.p.BATCH_SIZE):
ax = plt.subplot(5, 5, n + 1)
img = image_batch[n]
print('mx', np.max(img))
print(np.min(img))
b, g, r = img[..., 0], img[..., 1], img[..., 2]
b = b + self.p.VGG_MEAN[0]
g = g + self.p.VGG_MEAN[1]
r = r + self.p.VGG_MEAN[2]
rgb = np.dstack((r, g, b))
print('mx rgb', np.max(rgb))
print(np.min(rgb))
rgb = np.uint8(rgb)
plt.imshow(rgb)
# plt.title(CLASS_NAMES[label_batch[n]==1][0].title())
plt.axis('off')
plt.show()
def make_montage(self, im_lbl_lst, title, size=16):
side = int(np.sqrt(size))
montage = np.zeros((self.p.target_size[0] * side,
self.p.target_size[1] * side,
self.p.target_size[2]), dtype=np.uint8)
step = self.p.target_size[0]
chklbls = []
partitions = len(im_lbl_lst) // size
for k in range(partitions):
savepath = os.path.join(self.p.confusion_dir, self.model_timestamp + '_' + title + '_' + str(k) + '.tif')
im_split = im_lbl_lst[k * size: (k + 1) * size]
for cnt, lst in enumerate(im_split):
i = int(cnt % np.sqrt(size))
j = int(cnt // np.sqrt(size))
img = lst[0]
lbl = lst[1]
chklbls.append(np.argmax(lbl))
b, g, r = img[..., 0], img[..., 1], img[..., 2]
b = b + self.p.VGG_MEAN[0]
g = g + self.p.VGG_MEAN[1]
r = r + self.p.VGG_MEAN[2]
rgb = | np.dstack((r, g, b)) | numpy.dstack |
"""ImbalancedEnsembleClassifierMixin: mixin class for all
imbalanced ensemble estimators.
BaseImbalancedEnsemble: a general base class for imbalanced ensemble.
"""
# Authors: <NAME> <<EMAIL>>
# License: MIT
from abc import ABCMeta, abstractmethod
import numpy as np
from collections import Counter
from joblib import Parallel
import numpy as np
from sklearn.base import ClassifierMixin, clone
from sklearn.ensemble import BaseEnsemble
from sklearn.ensemble._base import _set_random_states
from sklearn.ensemble._base import _partition_estimators
from sklearn.ensemble._bagging import _parallel_predict_proba
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_random_state
from sklearn.utils import check_array
from sklearn.utils.fixes import delayed
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import (_check_sample_weight,
check_random_state,
check_is_fitted,
column_or_1d,
check_array,
has_fit_parameter,)
from ..base import TRAINING_TYPES
from ..utils._validation import _deprecate_positional_args
from ..utils._docstring import Substitution, _get_parameter_docstring
# # For local test
# import sys
# sys.path.append("..")
# from base import TRAINING_TYPES
# from utils._validation import _deprecate_positional_args
# from utils._docstring import Substitution, _get_parameter_docstring
TRAINING_LOG_HEAD_TITLES = {
'iter': '#Estimators',
'class_distr': 'Class Distribution',
'datasets': 'Datasets',
'metrics': 'Metrics',
}
MAX_INT = np.iinfo(np.int32).max
def sort_dict_by_key(d):
"""Sort a dict by key, return sorted dict."""
return dict(sorted(d.items(), key=lambda k: k[0]))
class ImbalancedEnsembleClassifierMixin(ClassifierMixin):
"""Mixin class for all ensemble classifiers in imbalanced-ensemble.
This class is essential for a derived class to be identified by the
sklearn and imbalanced-ensemble package. Additionally, it provides
several utilities for formatting training logs of imbalanced-ensemble
classifiers.
Attributes
----------
_estimator_type : ``'classifier'``
scikit-learn use this attribute to identify a classifier.
_estimator_ensemble_type : ``'imbalanced_ensemble_classifier'``
imbalanced-ensemble use this attribute to identify a classifier.
"""
_estimator_type = "classifier"
_estimator_ensemble_type = "imbalanced_ensemble_classifier"
def _evaluate(self,
dataset_name:str,
eval_metrics:dict=None,
return_header:bool=False,
return_value_dict:bool=False,) -> str or dict:
"""Private function for performance evaluation during the
ensemble training process.
"""
eval_datasets_ = self.eval_datasets_
classes_ = self.classes_
verbose_format_ = self.train_verbose_format_
# Temporarily disable verbose
support_verbose = hasattr(self, 'verbose')
if support_verbose:
verbose, self.verbose = self.verbose, 0
# If no eval_metrics is given, use self.eval_metrics_
if eval_metrics == None:
eval_metrics = self.eval_metrics_
# If return numerical results
if return_value_dict == True:
value_dict = {}
for data_name, (X_eval, y_eval) in eval_datasets_.items():
y_predict_proba = self.predict_proba(X_eval)
data_value_dict = {}
for metric_name, (metric_func, kwargs, ac_proba, ac_labels) \
in eval_metrics.items():
if ac_labels: kwargs['labels'] = classes_
if ac_proba: # If the metric take predict probabilities
score = metric_func(y_eval, y_predict_proba, **kwargs)
else: # If the metric do not take predict probabilities
y_predict = classes_.take(np.argmax(
y_predict_proba, axis=1), axis=0)
score = metric_func(y_eval, y_predict, **kwargs)
data_value_dict[metric_name] = score
value_dict[data_name] = data_value_dict
out = value_dict
# If return string
else:
eval_info = ""
if return_header == True:
for metric_name in eval_metrics.keys():
eval_info = self._training_log_add_block(
eval_info, metric_name, "", "", " ",
verbose_format_['len_metrics'][metric_name], strip=False)
else:
(X_eval, y_eval) = eval_datasets_[dataset_name]
y_predict_proba = self.predict_proba(X_eval)
for metric_name, (metric_func, kwargs, ac_proba, ac_labels) \
in eval_metrics.items():
if ac_labels: kwargs['labels'] = classes_
if ac_proba: # If the metric take predict probabilities
score = metric_func(y_eval, y_predict_proba, **kwargs)
else: # If the metric do not take predict probabilities
y_predict = classes_.take(np.argmax(
y_predict_proba, axis=1), axis=0)
score = metric_func(y_eval, y_predict, **kwargs)
eval_info = self._training_log_add_block(
eval_info, "{:.3f}".format(score), "", "", " ",
verbose_format_['len_metrics'][metric_name], strip=False)
out = eval_info[:-1]
# Recover verbose state
if support_verbose:
self.verbose = verbose
return out
def _init_training_log_format(self):
"""Private function for initialization of the training verbose format"""
if self.train_verbose_:
len_iter = max(
len(str(self.n_estimators)),
len(TRAINING_LOG_HEAD_TITLES['iter'])) + 2
if self.train_verbose_['print_distribution']:
len_class_distr = max(
len(str(self.target_distr_)),
len(str(self.origin_distr_)),
len(TRAINING_LOG_HEAD_TITLES['class_distr'])) + 2
else: len_class_distr = 0
len_metrics = {
metric_name: max(len(metric_name), 5) + 2
for metric_name in self.eval_metrics_.keys()
}
metrics_total_length = sum(len_metrics.values()) + len(len_metrics) - 1
len_datasets = {
dataset_name: max(metrics_total_length, len("Data: "+dataset_name)+2)
for dataset_name in self.eval_datasets_.keys()
}
self.train_verbose_format_ = {
'len_iter': len_iter,
'len_class_distr': len_class_distr,
'len_metrics': len_metrics,
'len_datasets': len_datasets,}
return
def _training_log_add_block(self, info, text, sta_char, fill_char,
end_char, width, strip=True):
"""Private function for adding a block to training log."""
info = info.rstrip(end_char) if strip else info
info += "{}{:{fill}^{width}s}{}".format(
sta_char, text, end_char,
fill=fill_char, width=width)
return info
def _training_log_add_line(self, info="", texts=None, tabs=None,
widths=None, flags=None):
"""Private function for adding a line to training log."""
if texts == None:
texts = ("", "", tuple("" for _ in self.eval_datasets_.keys()))
if tabs == None:
tabs = ("┃", "┃", "┃", " ")
if widths == None:
widths = (
self.train_verbose_format_['len_iter'],
self.train_verbose_format_['len_class_distr'],
tuple(self.train_verbose_format_['len_datasets'].values())
)
if flags == None:
flags = (True, self.train_verbose_['print_distribution'], self.train_verbose_['print_metrics'])
(sta_char, mid_char, end_char, fill_char) = tabs
(flag_iter, flag_distr, flag_metric) = flags
(text_iter, text_distr, text_metrics) = texts
(width_iter, width_distr, width_metrics) = widths
if flag_iter:
info = self._training_log_add_block(
info, text_iter, sta_char, fill_char, end_char, width_iter)
if flag_distr:
info = self._training_log_add_block(
info, text_distr, mid_char, fill_char, end_char, width_distr)
if flag_metric:
for text_metric, width_metric in zip(text_metrics, width_metrics):
info = self._training_log_add_block(
info, text_metric, mid_char, fill_char, end_char, width_metric)
return info
def _training_log_to_console_head(self):
"""Private function for printing a table header."""
# line 1
info = self._training_log_add_line(
tabs=("┏", "┳", "┓", "━"),
)+"\n"
# line 2
info = self._training_log_add_line(info,
texts=("", "", tuple("Data: "+data_name
for data_name in self.eval_datasets_.keys()))
)+"\n"
# line 3
info = self._training_log_add_line(info,
texts=(
TRAINING_LOG_HEAD_TITLES['iter'],
TRAINING_LOG_HEAD_TITLES['class_distr'],
tuple("Metric" for data_name in self.eval_datasets_.keys())
)
)+"\n"
# line 4
info = self._training_log_add_line(info,
texts=("", "", tuple(
self._evaluate('', return_header=True)
for data_name in self.eval_datasets_.keys()))
)+"\n"
# line 5
info = self._training_log_add_line(info,
tabs=("┣", "╋", "┫", "━"))
return info
def _training_log_to_console(self, i_iter=None, y=None):
"""Private function for printing training log to sys.stdout."""
if self.train_verbose_:
if not hasattr(self, '_properties'):
raise AttributeError(
f"All imbalanced-ensemble estimators should" + \
f" have a `_properties` attribute to specify" + \
f" the method family they belong to."
)
try:
training_type = self._properties['training_type']
except Exception as e:
e_args = list(e.args)
e_args[0] += \
f" The key 'training_type' does not exist in" + \
f" the `_properties` attribute, please check" + \
f" your usage."
e.args = tuple(e_args)
raise e
if training_type not in TRAINING_TYPES:
raise ValueError(f"'training_type' should be in {TRAINING_TYPES}")
if training_type == 'iterative':
self._training_log_to_console_iterative(i_iter, y)
elif training_type == 'parallel':
self._training_log_to_console_parallel()
else: raise NotImplementedError(
f"'_training_log_to_console' for 'training_type' = {training_type}"
f" needs to be implemented."
)
def _training_log_to_console_iterative(self, i_iter, y_resampled):
"""Private function for printing training log to sys.stdout.
(for ensemble classifiers that train in an iterative manner)"""
if i_iter == 0:
print(self._training_log_to_console_head())
eval_data_names = self.eval_datasets_.keys()
if (i_iter+1) % self.train_verbose_['granularity'] == 0 or i_iter == 0:
print(self._training_log_add_line(texts=(
f"{i_iter+1}", f"{sort_dict_by_key(Counter(y_resampled))}",
tuple(self._evaluate(data_name) for data_name in eval_data_names)
)))
if (i_iter+1) == self.n_estimators:
print(self._training_log_add_line(tabs=("┣", "╋", "┫", "━")))
print(self._training_log_add_line(texts=(
"final", f"{sort_dict_by_key(Counter(y_resampled))}",
tuple(self._evaluate(data_name) for data_name in eval_data_names)
)))
print(self._training_log_add_line(tabs=("┗", "┻", "┛", "━")))
def _training_log_to_console_parallel(self):
"""Private function for printing training log to sys.stdout.
(for ensemble classifiers that train in a parallel manner)"""
eval_data_names = self.eval_datasets_.keys()
print(self._training_log_to_console_head())
print(self._training_log_add_line(texts=(
str(self.n_estimators), "",
tuple(self._evaluate(data_name) for data_name in eval_data_names)
)))
print(self._training_log_add_line(tabs=("┗", "┻", "┛", "━")))
_properties = {
'ensemble_type': 'general',
}
@Substitution(
random_state=_get_parameter_docstring('random_state'),
n_jobs=_get_parameter_docstring('n_jobs', **_properties),
)
class BaseImbalancedEnsemble(ImbalancedEnsembleClassifierMixin,
BaseEnsemble, metaclass=ABCMeta):
"""Base class for all imbalanced-ensemble classes that are
NOT based an existing ensemble learning framework like Boosting,
Bagging or RandomForest.
Warning: This class should not be used directly. Use derived classes
instead.
Parameters
----------
base_estimator : object
The base estimator from which the ensemble is built.
n_estimators : int, default=50
The number of estimators in the ensemble.
estimator_params : list of str, default=tuple()
The list of attributes to use as parameters when instantiating a
new base estimator. If none are given, default parameters are used.
{n_jobs}
{random_state}
verbose : int, default=0
Controls the verbosity when predicting.
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
estimators_ : list of estimators
The collection of fitted base estimators.
"""
def __init__(self,
base_estimator,
n_estimators:int=50,
estimator_params=tuple(),
random_state=None,
n_jobs=None,
verbose=0,):
self.random_state = random_state
self.n_jobs = n_jobs
self.verbose = verbose
self.check_x_y_args = {
'accept_sparse': ['csr', 'csc'],
'force_all_finite': False,
'dtype': None,
}
super(BaseImbalancedEnsemble, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
)
self._properties = _properties
def _validate_y(self, y):
"""Validate the label vector."""
y = column_or_1d(y, warn=True)
check_classification_targets(y)
self.classes_, y_encoded = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def _validate_estimator(self, default):
"""Check the estimator, sampler and the n_estimator attribute.
Sets the base_estimator_` and base_sampler_` attributes.
"""
# validate estimator using
# sklearn.ensemble.BaseEnsemble._validate_estimator
super()._validate_estimator(default=default)
if hasattr(self, 'base_sampler'):
# validate sampler and sampler_kwargs
# validated sampler stored in self.base_sampler_
try:
self.base_sampler_ = clone(self.base_sampler)
except Exception as e:
e_args = list(e.args)
e_args[0] = "Exception occurs when trying to validate" + \
" base_sampler: " + e_args[0]
e.args = tuple(e_args)
raise e
def _make_sampler(self, append=True, random_state=None, **overwrite_kwargs):
"""Make and configure a copy of the `base_sampler_` attribute.
Warning: This method should be used to properly instantiate new
sub-samplers.
"""
sampler = clone(self.base_sampler_)
if hasattr(self, 'sampler_kwargs_'):
sampler.set_params(**self.sampler_kwargs_)
# Arguments passed to _make_sampler function have higher priority,
# they will overwrite the self.sampler_kwargs_
sampler.set_params(**overwrite_kwargs)
if random_state is not None:
_set_random_states(sampler, random_state)
if append:
self.samplers_.append(sampler)
return sampler
@_deprecate_positional_args
def fit(self, X, y, *, sample_weight=None, **kwargs):
"""Build the ensemble classifier from the training set (X, y)."""
# Check random state
self.random_state = check_random_state(self.random_state)
# Convert data (X is required to be 2d and indexable)
X, y = self._validate_data(X, y, **self.check_x_y_args)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float64)
sample_weight /= sample_weight.sum()
if np.any(sample_weight < 0):
raise ValueError("sample_weight cannot contain negative weights")
# Remap output
n_samples, self.n_features_ = X.shape
self.features_ = np.arange(self.n_features_)
self._n_samples = n_samples
y = self._validate_y(y)
self._encode_map = {c: | np.where(self.classes_==c) | numpy.where |
# lyft dataset process module
# modiflied from nuscenes_dataset.py
import json
import pickle
import time
import random
from copy import deepcopy
from functools import partial
from pathlib import Path
import subprocess
import fire
import numpy as np
from second.core import box_np_ops
from second.core import preprocess as prep
from second.data import kitti_common as kitti
from second.data.dataset import Dataset, register_dataset
from second.utils.eval import get_coco_eval_result, get_official_eval_result
from second.utils.progress_bar import progress_bar_iter as prog_bar
from second.utils.timer import simple_timer
# @register_dataset
class LyftDataset(Dataset):
NumPointFeatures = 4
NameMapping = {
'animal': 'animal',
'bicycle': 'bicycle',
'bus': 'bus',
'car': 'car',
'emergency_vehicle': 'emergency_vehicle',
'motorcycle': 'motorcycle',
'other_vehicle': 'other_vehicle',
'pedestrian': 'pedestrain',
'truck': 'truck'
}
DefaultAttribute = {
"car": "object_action_parked",
"pedestrain": "object_action_walking",
"bicycle": "object_action_driving_straight_forward",
"motorcycle": "object_action_parked",
"other_vehicle": "object_action_driving_straight_forward",
"emergency_vehicle": "object_action_driving_straight_forward",
"truck": "object_action_parked",
"animal": "",
"bus": "object_action_driving_straight_forward",
}
def __init__(self,
root_path,
info_path,
class_names=None,
prep_func=None,
num_point_features=None):
self._root_path = Path(root_path)
with open(info_path, 'rb') as f:
data = pickle.load(f)
self._lyft_infos = data["infos"]
self._lyft_infos = list(
sorted(self._lyft_infos, key=lambda e: e["timestamp"])
)
self._metadata = data["metadata"]
self._class_names = class_names
self._prep_func = prep_func
self._kitti_name_mapping = {
"car": "car",
"pedestrain": "pedestrain",
}
self.version = self._metadata["version"]
self.eval_version = "ICLR 2019"
self._with_velocity = False
def __len__(self):
return len(self._lyft_infos)
@property
def ground_truth_annotations(self):
pass
def __getitem__(self, idx):
input_dict = self.get_sensor_data(idx)
example = self._prep_func(input_dict=input_dict)
example["metadata"] = input_dict["metadata"]
if "anchors_mask" in example:
example["anchors_mask"] = example["anchors_mask"].astype(np.uint8)
return example
def get_sensor_data(self, query):
idx = query
read_test_image = False
if isinstance(query, dict):
assert "lidar" in query
idx = query["lidar"]["idx"]
read_test_image = "cam" in query
info = self._lyft_infos[idx]
res = {
"lidar": {
"type": "lidar",
"points": None,
},
"metadata": {
"token": info["token"]
},
}
lidar_path = Path(info['lidar_path'])
spec_lidar_path = '/home/muzi2045/nvme0/lyft_dataset/v1.01-train/lidar/host-a011_lidar1_1233090652702363606.bin'
if str(lidar_path) != spec_lidar_path:
# print("read lidar path:", str(lidar_path))
points = np.fromfile(
str(lidar_path), dtype=np.float32).reshape((-1,5))[:, :5]
points[:, 3] /= 255
points[:, 4] = 0
sweep_points_list = [points]
ts = info["timestamp"] / 1e6
# print("info sweeps:", len(info["sweeps"]))
for sweep in info["sweeps"]:
if str(sweep["lidar_path"]) == spec_lidar_path:
continue
points_sweep = np.fromfile(
str(sweep["lidar_path"]), dtype=np.float32,
count=-1).reshape([-1, 5])[:, :5]
sweep_ts = sweep["timestamp"] / 1e6
points_sweep[:, 3] /= 255
points_sweep[:, :3] = points_sweep[:, :3] @ sweep[
"sweep2lidar_rotation"].T
points_sweep[:, :3] += sweep["sweep2lidar_translation"]
points_sweep[:, 4] = ts - sweep_ts
sweep_points_list.append(points_sweep)
points = np.concatenate(sweep_points_list, axis=0)[:, [0, 1, 2, 4]]
res["lidar"]["points"] = points
else:
points = np.fromfile(str(lidar_path), dtype=np.float32).reshape((-1,4))[:, :4]
points[:, 3] /= 255
res["lidar"]["points"] = points
if read_test_image:
if Path(info["cam_front_path"]).exists():
with open(str(info["cam_front_path"]), 'rb') as f:
image_str = f.read()
else:
image_str = None
res["cam"] = {
"type": "camera",
"data": image_str,
"datatype": Path(info["cam_front_path"]).suffix[1:],
}
if 'gt_boxes' in info:
# mask = info["num_lidar_pts"] > 0
# gt_boxes = info["gt_boxes"][mask]
gt_boxes = info["gt_boxes"]
# print("gt_boxes:", gt_boxes)
if self._with_velocity:
# gt_velocity = info["gt_velocity"][mask]
gt_velocity = info["gt_velocity"]
nan_mask = np.isnan(gt_velocity[:, 0])
gt_velocity[nan_mask] = [0.0, 0.0]
gt_boxes = np.concatenate([gt_boxes, gt_velocity], axis=-1)
res["lidar"]["annotations"] = {
'boxes': gt_boxes,
# 'names': info["gt_names"][mask],
'names': info['gt_names']
}
return res
def evaluation_kitti(self, detections, output_dir):
pass
def evaluation_nusc(self, detections, output_dir):
pass
def evaluation_lyft(self, detections, output_dir):
pass
def evaluation(self, detections, output_dir):
res_nusc = self.evaluation_nusc(detections, output_dir)
res = {
"results": {
"nusc": res_nusc["result"]["nusc"],
},
"detail": {
"eval.nusc": res_nusc["detail"]["nusc"],
},
}
return res
def _second_det_to_nusc_box(detection):
from lyft_dataset_sdk.utils.data_classes import Box
import pyquaternion
box3d = detection["box3d_lidar"].detach().cpu().numpy()
scores = detection["scores"].detach().cpu().numpy()
labels = detection["label_preds"].detach().cpu().numpy()
box3d[:, 6] = -box3d[:, 6] - np.pi/2
box_list = []
for i in range(box3d.shape[0]):
quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box3d[i,6])
velocity = (np.nan, np.nan, np.nan)
if box3d.shape[1] == 9:
velocity = (*box3d[i, 7:9], 0.0)
box = Box(
box3d[i, :3],
box3d[i, 3:6],
quat,
label=labels[i],
score=scores[i],
velocity=velocity)
box_list.append(box)
return box_list
def _lidar_nusc_box_to_global(info, boxes, classes, eval_version="ICLR 2019"):
import pyquaternion
box_list = []
for box in boxes:
box.rotate(pyquaternion.Quaternion(info['lidar2ego_rotation']))
box.translate(np.array(info['lidar2ego_translation']))
# from lyft_dataset_sdk.eval.detection.mAP_eva
#filter det in ego
box.rotate(pyquaternion.Quaternion(info['ego2global_rotation']))
box.translate(np.array(info['ego2global_translation']))
box_list.append(box)
return box_list
def _get_available_scenes(lyft):
available_scenes = []
print("total scene num:", len(lyft.scene))
for scene in lyft.scene:
scene_token = scene["token"]
scene_rec = lyft.get('scene', scene_token)
sample_rec = lyft.get('sample', scene_rec['first_sample_token'])
sd_rec = lyft.get('sample_data', sample_rec['data']["LIDAR_TOP"])
has_more_frames = True
scene_not_exist = False
while has_more_frames:
lidar_path, boxes, _ = lyft.get_sample_data(sd_rec['token'])
if not Path(lidar_path).exists():
scenes_not_exist = True
break
else:
break
if not sd_rec['next'] == "":
sd_rec = lyft.get('sample_data', sd_rec['next'])
else:
has_more_frames = False
if scene_not_exist:
continue
available_scenes.append(scene)
print("exist scene num:", len(available_scenes))
return available_scenes
def _fill_train_infos(lyft,
train_scenes,
test = False,
max_sweeps=10):
train_lyft_infos = []
from pyquaternion import Quaternion
print("sample number:", len(lyft.sample))
for sample in prog_bar(lyft.sample):
lidar_token = sample["data"]["LIDAR_TOP"]
cam_front_token = sample["data"]["CAM_FRONT"]
sd_rec = lyft.get('sample_data', sample['data']["LIDAR_TOP"])
cs_record = lyft.get('calibrated_sensor',
sd_rec['calibrated_sensor_token'])
pose_record = lyft.get('ego_pose', sd_rec['ego_pose_token'])
lidar_path, boxes, _ = lyft.get_sample_data(lidar_token)
cam_path, _, cam_intrinsic = lyft.get_sample_data(cam_front_token)
assert Path(lidar_path).exists()
info = {
"lidar_path": lidar_path,
"cam_front_path": cam_path,
"token": sample["token"],
"sweeps": [],
"lidar2ego_translation": cs_record['translation'],
"lidar2ego_rotation": cs_record['rotation'],
"ego2global_translation": pose_record['translation'],
"ego2global_rotation": pose_record['rotation'],
"timestamp": sample["timestamp"],
}
# print("info:", info)
l2e_r = info["lidar2ego_rotation"]
l2e_t = info["lidar2ego_translation"]
e2g_r = info["ego2global_rotation"]
e2g_t = info["ego2global_translation"]
l2e_r_mat = Quaternion(l2e_r).rotation_matrix
e2g_r_mat = Quaternion(e2g_r).rotation_matrix
sd_rec = lyft.get('sample_data', sample['data']["LIDAR_TOP"])
sweeps = []
while len(sweeps) < max_sweeps:
if not sd_rec['prev'] == "":
sd_rec = lyft.get('sample_data', sd_rec['prev'])
cs_record = lyft.get('calibrated_sensor',
sd_rec['calibrated_sensor_token'])
pose_record = lyft.get('ego_pose', sd_rec['ego_pose_token'])
lidar_path = lyft.get_sample_data_path(sd_rec['token'])
sweep = {
"lidar_path": lidar_path,
"sample_data_token": sd_rec['token'],
"lidar2ego_translation": cs_record['translation'],
"lidar2ego_rotation": cs_record['rotation'],
"ego2global_translation": pose_record['translation'],
"ego2global_rotation": pose_record['rotation'],
"timestamp": sd_rec["timestamp"]
}
l2e_r_s = sweep["lidar2ego_rotation"]
l2e_t_s = sweep["lidar2ego_translation"]
e2g_r_s = sweep["ego2global_rotation"]
e2g_t_s = sweep["ego2global_translation"]
# sweep->ego->global->ego'->lidar
l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix
e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix
R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ (
| np.linalg.inv(e2g_r_mat) | numpy.linalg.inv |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Additive State Space Model Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
from tensorflow_probability import distributions as tfd
from tensorflow_probability.python.sts import AdditiveStateSpaceModel
from tensorflow_probability.python.sts import LocalLinearTrendStateSpaceModel
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
tfl = tf.linalg
class _AdditiveStateSpaceModelTest(tf.test.TestCase):
def test_identity(self):
# Test that an additive SSM with a single component defines the same
# distribution as the component model.
y = self._build_placeholder([1.0, 2.5, 4.3, 6.1, 7.8])
local_ssm = LocalLinearTrendStateSpaceModel(
num_timesteps=5,
level_scale=0.3,
slope_scale=0.6,
observation_noise_scale=0.1,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=self._build_placeholder([1., 1.])))
additive_ssm = AdditiveStateSpaceModel([local_ssm])
local_lp = local_ssm.log_prob(y[:, np.newaxis])
additive_lp = additive_ssm.log_prob(y[:, np.newaxis])
self.assertAllClose(self.evaluate(local_lp), self.evaluate(additive_lp))
def test_nesting_additive_ssms(self):
ssm1 = self._dummy_model(batch_shape=[1, 2])
ssm2 = self._dummy_model(batch_shape=[3, 2])
observation_noise_scale = 0.1
additive_ssm = AdditiveStateSpaceModel(
[ssm1, ssm2],
observation_noise_scale=observation_noise_scale)
nested_additive_ssm = AdditiveStateSpaceModel(
[AdditiveStateSpaceModel([ssm1]),
AdditiveStateSpaceModel([ssm2])],
observation_noise_scale=observation_noise_scale)
# Test that both models behave equivalently.
y = self.evaluate(nested_additive_ssm.sample())
additive_lp = additive_ssm.log_prob(y)
nested_additive_lp = nested_additive_ssm.log_prob(y)
self.assertAllClose(self.evaluate(additive_lp),
self.evaluate(nested_additive_lp))
additive_mean = additive_ssm.mean()
nested_additive_mean = nested_additive_ssm.mean()
self.assertAllClose(
self.evaluate(additive_mean),
self.evaluate(nested_additive_mean))
additive_variance = additive_ssm.variance()
nested_additive_variance = nested_additive_ssm.variance()
self.assertAllClose(
self.evaluate(additive_variance),
self.evaluate(nested_additive_variance))
def test_sum_of_local_linear_trends(self):
# We know analytically that the sum of two local linear trends is
# another local linear trend, with means and variances scaled
# accordingly, so the additive model should match this behavior.
level_scale = 0.5
slope_scale = 1.1
initial_level = 3.
initial_slope = -2.
observation_noise_scale = 0.
num_timesteps = 5
y = self._build_placeholder([1.0, 2.5, 4.3, 6.1, 7.8])
# Combine two local linear trend models, one a full model, the other
# with just a moving mean (zero slope).
local_ssm = LocalLinearTrendStateSpaceModel(
num_timesteps=num_timesteps,
level_scale=level_scale,
slope_scale=slope_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
loc=self._build_placeholder([initial_level, initial_slope]),
scale_diag=self._build_placeholder([1., 1.])))
second_level_scale = 0.3
second_initial_level = 1.1
moving_level_ssm = LocalLinearTrendStateSpaceModel(
num_timesteps=num_timesteps,
level_scale=second_level_scale,
slope_scale=0.,
initial_state_prior=tfd.MultivariateNormalDiag(
loc=self._build_placeholder([second_initial_level, 0.]),
scale_diag=self._build_placeholder([1., 0.])))
additive_ssm = AdditiveStateSpaceModel(
[local_ssm, moving_level_ssm],
observation_noise_scale=observation_noise_scale)
# Build the analytical sum of the two processes.
target_ssm = LocalLinearTrendStateSpaceModel(
num_timesteps=num_timesteps,
level_scale=np.float32(np.sqrt(level_scale**2 + second_level_scale**2)),
slope_scale=np.float32(slope_scale),
observation_noise_scale=observation_noise_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
loc=self._build_placeholder(
[initial_level + second_initial_level, initial_slope + 0.]),
scale_diag=self._build_placeholder(
np.sqrt([2., 1.]))))
# Test that both models behave equivalently.
additive_mean = additive_ssm.mean()
target_mean = target_ssm.mean()
self.assertAllClose(
self.evaluate(additive_mean), self.evaluate(target_mean))
additive_variance = additive_ssm.variance()
target_variance = target_ssm.variance()
self.assertAllClose(
self.evaluate(additive_variance), self.evaluate(target_variance))
additive_lp = additive_ssm.log_prob(y[:, np.newaxis])
target_lp = target_ssm.log_prob(y[:, np.newaxis])
self.assertAllClose(self.evaluate(additive_lp), self.evaluate(target_lp))
def test_batch_shape(self):
batch_shape = [3, 2]
ssm = self._dummy_model(batch_shape=batch_shape)
additive_ssm = AdditiveStateSpaceModel([ssm, ssm])
y = additive_ssm.sample()
if self.use_static_shape:
self.assertAllEqual(additive_ssm.batch_shape.as_list(), batch_shape)
self.assertAllEqual(y.shape.as_list()[:-2], batch_shape)
else:
self.assertAllEqual(self.evaluate(additive_ssm.batch_shape_tensor()),
batch_shape)
self.assertAllEqual(self.evaluate(tf.shape(input=y))[:-2], batch_shape)
def test_multivariate_observations(self):
# since STS components are scalar by design, we manually construct
# a multivariate-output model to verify that the additive SSM handles
# this case.
num_timesteps = 5
observation_size = 2
multivariate_ssm = self._dummy_model(num_timesteps=num_timesteps,
observation_size=observation_size)
# Note it would not work to specify observation_noise_scale here;
# multivariate observations need to derive the (multivariate)
# observation noise distribution from their components.
combined_ssm = AdditiveStateSpaceModel([multivariate_ssm,
multivariate_ssm])
y = combined_ssm.sample()
expected_event_shape = [num_timesteps, observation_size]
if self.use_static_shape:
self.assertAllEqual(combined_ssm.event_shape.as_list(),
expected_event_shape)
self.assertAllEqual(y.shape.as_list()[-2:], expected_event_shape)
else:
self.assertAllEqual(self.evaluate(combined_ssm.event_shape_tensor()),
expected_event_shape)
self.assertAllEqual(
self.evaluate(tf.shape(input=y))[-2:], expected_event_shape)
def test_mismatched_num_timesteps_error(self):
ssm1 = self._dummy_model(num_timesteps=10)
ssm2 = self._dummy_model(num_timesteps=8)
with self.assertRaisesWithPredicateMatch(
ValueError, 'same number of timesteps'):
# In the static case, the constructor should raise an exception.
additive_ssm = AdditiveStateSpaceModel(
component_ssms=[ssm1, ssm2])
# In the dynamic case, the exception is raised at runtime.
_ = self.evaluate(additive_ssm.sample())
def test_broadcasting_batch_shape(self):
# Build three SSMs with broadcast batch shape.
ssm1 = self._dummy_model(batch_shape=[2])
ssm2 = self._dummy_model(batch_shape=[3, 2])
ssm3 = self._dummy_model(batch_shape=[1, 2])
additive_ssm = AdditiveStateSpaceModel(
component_ssms=[ssm1, ssm2, ssm3])
y = additive_ssm.sample()
broadcast_batch_shape = [3, 2]
if self.use_static_shape:
self.assertAllEqual(additive_ssm.batch_shape.as_list(),
broadcast_batch_shape)
self.assertAllEqual(y.shape.as_list()[:-2],
broadcast_batch_shape)
else:
self.assertAllEqual(self.evaluate(additive_ssm.batch_shape_tensor()),
broadcast_batch_shape)
self.assertAllEqual(
self.evaluate(tf.shape(input=y))[:-2], broadcast_batch_shape)
def test_broadcasting_correctness(self):
# This test verifies that broadcasting of component parameters works as
# expected. We construct a SSM with no batch shape, and test that when we
# add it to another SSM of batch shape [3], we get the same model
# as if we had explicitly broadcast the parameters of the first SSM before
# adding.
num_timesteps = 5
transition_matrix = np.random.randn(2, 2)
transition_noise_diag = np.exp(np.random.randn(2))
observation_matrix = np.random.randn(1, 2)
observation_noise_diag = np.exp( | np.random.randn(1) | numpy.random.randn |
# Copyright 2019 Jij Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import cxxjij
import openjij
import warnings
class BinaryQuadraticModel:
"""Represents Binary quadratic model
Attributes:
var_type (openjij.VariableType): variable type SPIN or BINARY
linear (dict): represents linear term
quad (dict): represents quadratic term
indices (list): labels of each variables sorted by results variables
energy_bias (float): represents constant energy term when convert to SPIN from BINARY
size (int): number of variables
"""
def __init__(self, h=None, J=None, Q=None, var_type=openjij.SPIN):
self.var_type = openjij.cast_var_type(var_type)
if self.var_type == openjij.SPIN:
if (h is None) or (J is None):
raise ValueError('Input h and J.')
self.linear = h
self.quad = J
elif self.var_type == openjij.BINARY:
if not isinstance(Q, dict) or Q is None:
raise ValueError('Q should be dictionary.')
self.linear = {}
self.quad = {}
for (i, j), qij in Q.items():
if i == j:
self.linear[i] = qij
else:
self.quad[(i, j)] = qij
index_set = set(self.linear.keys())
for v1, v2 in self.quad.keys():
index_set.add(v1)
index_set.add(v2)
self.indices = list(index_set)
if var_type == openjij.SPIN:
self.energy_bias = 0.0
else: # BINARY
self.energy_bias = (sum(list(self.linear.values()))
* 2 + sum(list(self.quad.values())))/4
self._interaction_matrix = None # calculated at interactions()
self.size = len(self.indices)
def get_cxxjij_ising_graph(self, sparse=False):
"""
Convert to cxxjij.graph.Dense or Sparse class from Python dictionary (h, J) or Q
Args:
sparse (bool): if true returns sparse graph
Returns:
openjij.graph.Dense openjij.graph.Sparse
"""
if not sparse:
GraphClass = cxxjij.graph.Dense
else:
GraphClass = cxxjij.graph.Sparse
cxxjij_graph = GraphClass(self.size)
ising_int = self.ising_interactions()
# cxxjij.graph.dense
for i in range(self.size):
if ising_int[i, i] != 0.0:
cxxjij_graph[i, i] = ising_int[i, i]
for j in range(i+1, self.size):
if ising_int[i, j] != 0.0:
cxxjij_graph[i, j] = ising_int[i, j]
return cxxjij_graph
def ising_interactions(self):
""" Interactions in the Ising representation
QUBO formulation to the Ising formulation
We assumption Q is a triangular matrix.
H = q^T Q q
= 1/4 (1+s)^T Q (1+s)
= s^T Q/4 s + 1^T Q/4 s + s^T Q/4 1 + 1^T Q/4 1
= s^T nondiag(Q)/4 s + 1^T Q/4 s + s^T Q/4 1 + 1^T (Q + diag(Q))/4 1
= \sum_{i<j} Q_{ij}/4 s_i s_j
+ \sum{i<=j} (Q_{ij} + Q_{ji}) s_i
+ sum_{i<=j} (Q_{ij} + Q_{ii})/4
Therefore
J_{ij} = Q_{ij}/4
h_i = \sum{i<=j} (Q_{ij} + Q_{ji})/4
constant_term = sum_{i<=j} Q_{ij}/4 + Tr(Q) (energy bias)
"""
if self.var_type == openjij.SPIN:
return self.interactions()
interactions = self.interactions()/4
if self.var_type == openjij.BINARY:
# convert to the Ising interaction
self.energy_bias = (
np.sum(np.triu(interactions)) + np.trace(interactions))
for i in range(len(interactions)):
interactions[i, i] = np.sum(
interactions[i, :]) + interactions[i, i]
return interactions
def interactions(self, re_calculate=False):
"""make interaction matrix
The Ising model: E = ΣJ_ij σiσj + Σhiσi
Interaction matrix -> H_ij = J_ij + J_ji, H_ii = hi
QUBO: E = Σ1/2Q_ij q_iq_j + ΣQ_ii q_i
Args:
re_calculate (bool): Whether to force a recalculation
Returns:
numpy.ndarray: interactioin matrix H_{ij} or Q_{ij}
"""
if (self._interaction_matrix is not None) and (not re_calculate):
return self._interaction_matrix
system_size = len(self.indices)
interactions = np.zeros((system_size, system_size))
for i, i_index in enumerate(self.indices):
interactions[i, i] = self.linear[i_index] if i_index in self.linear else 0.0
for j, j_index in enumerate(self.indices[i+1:]):
j += i+1
jval = 0.0
if (i_index, j_index) in self.quad:
jval += self.quad[(i_index, j_index)]
if (j_index, i_index) in self.quad:
jval += self.quad[(j_index, i_index)]
interactions[i, j] = jval
interactions[j, i] = jval
self._interaction_matrix = interactions
return self._interaction_matrix
def calc_energy(self, state, need_to_convert_from_spin=False):
"""calculate energy from state
Args:
state (list, numpy.array): BINARY or SPIN state
need_to_convet_to_spin (bool): if state is SPIN and need
to convert to BINARY from SPIN
"""
_state = | np.array(state) | numpy.array |
# -*- coding: utf-8 -*-
"""Testing the functions in typhon.atmosphere.
"""
import numpy as np
import pytest
from typhon.physics import atmosphere
class TestAtmosphere:
"""Testing the atmosphere functions."""
def test_integrate_water_vapor_hse(self):
"""Test the IWV calculation in hydrostatic equilibrium."""
p = np.linspace(1000e2, 500e2, 10)
vmr = np.linspace(0.025, 0.0025, p.size)
iwv = atmosphere.integrate_water_vapor(vmr, p)
assert np.allclose(iwv, 43.8845)
def test_integrate_water_vapor_nonhse(self):
"""Test the IWV calculation in non-hydrostatic atmosphere."""
p = np.linspace(1000e2, 500e2, 10)
T = 288 * np.ones(p.size)
z = np.linspace(0, 5000, p.size)
vmr = np.linspace(0.025, 0.0025, p.size)
iwv = atmosphere.integrate_water_vapor(vmr, p, T, z)
assert np.allclose(iwv, 42.4062)
def test_integrate_water_vapor_ivalidin(self):
"""Test invalid number of arguments to IWV calculation."""
dummy = | np.ones(10) | numpy.ones |
import numpy as np
import random
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
class MatchTurn:
def __init__(self):
self.epochs = [100, 1000, 10000]
self.nrange = 51
self.E_Rn_x = [[], [], []]
self.E_Rn_y = [[], [], []]
# print(self.E_Rn_x)
def Q1(self):
for n in range(2, self.nrange):
print(n)
for e_index, epoch in enumerate(self.epochs):
print(e_index, epoch)
average_Rn = 0
for i in range(epoch):
init = [i for i in range(0, n)]
# print(init)
iter_nums = 0
# print(len(init))
while len(init) != 0:
old_init = init.copy()
# print('old', old_init)
random.shuffle(init)
# print(init)
init = [old_init[i] for i in range(len(init)) if init[i] != old_init[i]]
iter_nums += 1
# print(init)
average_Rn += iter_nums / epoch
self.E_Rn_x[e_index].append(n)
self.E_Rn_y[e_index].append(average_Rn)
x = range(2, self.nrange)
y = [x_ for x_ in x]
fig, ax = plt.subplots(1, 1)
handle_1, = plt.plot(x, y, lw=6, color='navajowhite')
handle_2, = plt.plot(self.E_Rn_x[0], self.E_Rn_y[0], color='tomato', linestyle='--')
handle_3, = plt.plot(self.E_Rn_x[1], self.E_Rn_y[1], color='violet', linestyle='--')
handle_4, = plt.plot(self.E_Rn_x[2], self.E_Rn_y[2], color='aqua', linestyle='--')
ax.legend(handles=[handle_1, handle_2, handle_3, handle_4],
labels=[' Theoretical value ', 'simulate: epoch=100', 'simulate: epoch=1000',
'simulate: epoch=10000'], loc='best')
# plt.plot(self.E_Rn_x, self.E_Rn_y)
# 嵌入绘制局部放大图的坐标系
axins = inset_axes(ax, width="40%", height="30%", loc='lower left',
bbox_to_anchor=(0.5, 0.1, 1, 1),
bbox_transform=ax.transAxes)
axins.plot(x, y, lw=6, color='navajowhite')
axins.plot(self.E_Rn_x[0], self.E_Rn_y[0], color='tomato', linestyle='--')
axins.plot(self.E_Rn_x[1], self.E_Rn_y[1], color='violet', linestyle='--')
axins.plot(self.E_Rn_x[2], self.E_Rn_y[2], color='aqua', linestyle='--')
# 设置放大区间
zone_left = 45
zone_right = 47
# 坐标轴的扩展比例(根据实际数据调整)
x_ratio = 0.5 # x轴显示范围的扩展比例
y_ratio = 1 # y轴显示范围的扩展比例
# X轴的显示范围
xlim0 = x[zone_left] - (x[zone_right] - x[zone_left]) * x_ratio
xlim1 = x[zone_right] + (x[zone_right] - x[zone_left]) * x_ratio
# Y轴的显示范围
y = np.hstack((self.E_Rn_y[2][zone_left:zone_right], self.E_Rn_y[2][zone_left:zone_right]))
ylim0 = np.min(y) - (np.max(y) - np.min(y)) * y_ratio
ylim1 = np.max(y) + (np.max(y) - np.min(y)) * y_ratio
# 调整子坐标系的显示范围
axins.set_xlim(xlim0, xlim1)
axins.set_ylim(ylim0, ylim1)
# 建立父坐标系与子坐标系的连接线
# loc1 loc2: 坐标系的四个角
# 1 (右上) 2 (左上) 3(左下) 4(右下)
mark_inset(ax, axins, loc1=3, loc2=1, fc="none", ec='k', lw=1)
self.plot_config(True, 'Num of People', 'E(Rn)', 'E(Rn)', '3_14_1_epoch10000.pdf')
plt.savefig('3_14_1_epoch.pdf')
def Q2(self):
for n in range(2, self.nrange):
print(n)
for e_index, epoch in enumerate(self.epochs):
print(e_index, epoch)
average_Sn = 0
for i in range(epoch):
init = [i for i in range(0, n)]
iter_nums = 0
while len(init) != 0:
old_init = init.copy()
iter_nums += len(old_init)
random.shuffle(init)
init = [old_init[i] for i in range(len(init)) if init[i] != old_init[i]]
average_Sn += iter_nums / epoch
self.E_Rn_x[e_index].append(n)
self.E_Rn_y[e_index].append(average_Sn)
fig, ax = plt.subplots(1, 1)
x = range(2, self.nrange)
y = [x_ + x_ * x_ / 2 for x_ in x]
handle_1, = plt.plot(x, y, lw=6, color='navajowhite')
handle_2, = plt.plot(self.E_Rn_x[0], self.E_Rn_y[0], color='tomato', linestyle='--')
handle_3, = plt.plot(self.E_Rn_x[1], self.E_Rn_y[1], color='violet', linestyle='--')
handle_4, = plt.plot(self.E_Rn_x[2], self.E_Rn_y[2], color='aqua', linestyle='--')
ax.legend(handles=[handle_1, handle_2, handle_3, handle_4],
labels=[' Theoretical value ', 'simulate: epoch=100', 'simulate: epoch=1000',
'simulate: epoch=10000'], loc='best')
self.plot_config(True, 'Num of People', 'E(Sn)', 'E(Sn)', '3_14_2_epoch.pdf')
# plt.plot(self.E_Rn_x, self.E_Rn_y)
# 嵌入绘制局部放大图的坐标系
axins = inset_axes(ax, width="40%", height="30%", loc='lower left',
bbox_to_anchor=(0.5, 0.1, 1, 1),
bbox_transform=ax.transAxes)
axins.plot(x, y, lw=6, color='navajowhite')
axins.plot(self.E_Rn_x[0], self.E_Rn_y[0], color='tomato', linestyle='--')
axins.plot(self.E_Rn_x[1], self.E_Rn_y[1], color='violet', linestyle='--')
axins.plot(self.E_Rn_x[2], self.E_Rn_y[2], color='aqua', linestyle='--')
# 设置放大区间
zone_left = 45
zone_right = 47
# 坐标轴的扩展比例(根据实际数据调整)
x_ratio = 0.5 # x轴显示范围的扩展比例
y_ratio = 1 # y轴显示范围的扩展比例
# X轴的显示范围
xlim0 = x[zone_left] - (x[zone_right] - x[zone_left]) * x_ratio
xlim1 = x[zone_right] + (x[zone_right] - x[zone_left]) * x_ratio
# Y轴的显示范围
y = np.hstack((self.E_Rn_y[2][zone_left:zone_right], self.E_Rn_y[2][zone_left:zone_right]))
ylim0 = np.min(y) - (np.max(y) - np.min(y)) * y_ratio
ylim1 = | np.max(y) | numpy.max |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 9 11:06:16 2018
Test for the function of the chi2 script of the omnitool package.
@author: misiak
"""
import sys
from os import path
import numpy as np
import scipy.signal as sgl
import matplotlib.pyplot as plt
import mcmc_red as mcr
plt.close('all')
def butter_lowpass(cutoff, fs, order=5):
"""
Design a low-pass filter.
"""
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = sgl.butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(data, cutoff, fcut, order=5):
"""
Filter the input data with a low-pass.
"""
b, a = butter_lowpass(cutoff, fcut, order=order)
y = sgl.lfilter(b, a, data)
return y
fs = 1e3
T = 2.
time = np.arange(0, T, fs**-1)
noise = np.random.normal(0, 1.0, size=int(T*fs))
lnoise = butter_lowpass_filter(noise, 10, 1000, order=2)
plt.figure('Temporal')
plt.plot(time, noise, alpha=0.2)
plt.plot(time, lnoise)
plt.grid()
fft = | np.fft.fft(lnoise) | numpy.fft.fft |
from itertools import product
import numpy as np
from numpy import ma
import pandas as pd
import pytest
from scipy import sparse as sp
from scipy.sparse import csr_matrix, issparse
from anndata import AnnData
from anndata.tests.helpers import assert_equal, gen_adata
# some test objects that we use below
adata_dense = AnnData(np.array([[1, 2], [3, 4]]))
adata_dense.layers["test"] = adata_dense.X
adata_sparse = AnnData(
csr_matrix([[0, 2, 3], [0, 5, 6]]),
dict(obs_names=["s1", "s2"], anno1=["c1", "c2"]),
dict(var_names=["a", "b", "c"]),
)
def test_creation():
AnnData(np.array([[1, 2], [3, 4]]))
AnnData(np.array([[1, 2], [3, 4]]), {}, {})
AnnData(ma.array([[1, 2], [3, 4]]), uns=dict(mask=[0, 1, 1, 0]))
AnnData(sp.eye(2))
X = np.array([[1, 2, 3], [4, 5, 6]])
adata = AnnData(
X=X,
obs=dict(Obs=["A", "B"]),
var=dict(Feat=["a", "b", "c"]),
obsm=dict(X_pca=np.array([[1, 2], [3, 4]])),
raw=dict(X=X, var=dict(var_names=["a", "b", "c"])),
)
assert adata.raw.X.tolist() == X.tolist()
assert adata.raw.var_names.tolist() == ["a", "b", "c"]
with pytest.raises(ValueError):
AnnData(np.array([[1, 2], [3, 4]]), dict(TooLong=[1, 2, 3, 4]))
# init with empty data matrix
shape = (3, 5)
adata = AnnData(None, uns=dict(test=np.array((3, 3))), shape=shape)
assert adata.X is None
assert adata.shape == shape
assert "test" in adata.uns
def test_create_with_dfs():
X = np.ones((6, 3))
obs = pd.DataFrame(dict(cat_anno=pd.Categorical(["a", "a", "a", "a", "b", "a"])))
obs_copy = obs.copy()
adata = AnnData(X=X, obs=obs)
assert obs.index.equals(obs_copy.index)
assert obs.index.astype(str).equals(adata.obs.index)
def test_create_from_df():
df = pd.DataFrame(np.ones((3, 2)), index=["a", "b", "c"], columns=["A", "B"])
ad = AnnData(df)
assert df.values.tolist() == ad.X.tolist()
assert df.columns.tolist() == ad.var_names.tolist()
assert df.index.tolist() == ad.obs_names.tolist()
def test_create_from_sparse_df():
s = sp.random(20, 30, density=0.2)
obs_names = [f"obs{i}" for i in range(20)]
var_names = [f"var{i}" for i in range(30)]
df = pd.DataFrame.sparse.from_spmatrix(s, index=obs_names, columns=var_names)
a = AnnData(df)
b = AnnData(s, obs=pd.DataFrame(index=obs_names), var=pd.DataFrame(index=var_names))
assert_equal(a, b)
assert issparse(a.X)
def test_create_from_df_with_obs_and_var():
df = pd.DataFrame(np.ones((3, 2)), index=["a", "b", "c"], columns=["A", "B"])
obs = pd.DataFrame(np.ones((3, 1)), index=df.index, columns=["C"])
var = pd.DataFrame(np.ones((2, 1)), index=df.columns, columns=["D"])
ad = AnnData(df, obs=obs, var=var)
assert df.values.tolist() == ad.X.tolist()
assert df.columns.tolist() == ad.var_names.tolist()
assert df.index.tolist() == ad.obs_names.tolist()
assert obs.equals(ad.obs)
assert var.equals(ad.var)
with pytest.raises(ValueError, match=r"Index of obs must match index of X."):
AnnData(df, obs=obs.reset_index())
with pytest.raises(ValueError, match=r"Index of var must match columns of X."):
AnnData(df, var=var.reset_index())
def test_from_df_and_dict():
df = pd.DataFrame(dict(a=[0.1, 0.2, 0.3], b=[1.1, 1.2, 1.3]))
adata = AnnData(df, dict(species=pd.Categorical(["a", "b", "a"])))
assert adata.obs["species"].values.tolist() == ["a", "b", "a"]
def test_df_warnings():
df = pd.DataFrame(dict(A=[1, 2, 3], B=[1.0, 2.0, 3.0]), index=["a", "b", "c"])
with pytest.warns(UserWarning, match=r"X.*dtype float64"):
adata = AnnData(df)
with pytest.warns(UserWarning, match=r"X.*dtype float64"):
adata.X = df
def test_attr_deletion():
full = gen_adata((30, 30))
# Empty has just X, obs_names, var_names
empty = AnnData(None, obs=full.obs[[]], var=full.var[[]])
for attr in ["X", "obs", "var", "obsm", "varm", "obsp", "varp", "layers", "uns"]:
delattr(full, attr)
assert_equal(getattr(full, attr), getattr(empty, attr))
assert_equal(full, empty, exact=True)
def test_names():
adata = AnnData(
np.array([[1, 2, 3], [4, 5, 6]]),
dict(obs_names=["A", "B"]),
dict(var_names=["a", "b", "c"]),
)
assert adata.obs_names.tolist() == "A B".split()
assert adata.var_names.tolist() == "a b c".split()
adata = AnnData(np.array([[1, 2], [3, 4], [5, 6]]), var=dict(var_names=["a", "b"]))
assert adata.var_names.tolist() == ["a", "b"]
@pytest.mark.parametrize(
"names,after",
[
pytest.param(["a", "b"], None, id="list"),
pytest.param(
pd.Series(["AAD", "CCA"], name="barcodes"), "barcodes", id="Series-str"
),
pytest.param(pd.Series(["x", "y"], name=0), None, id="Series-int"),
],
)
@pytest.mark.parametrize("attr", ["obs_names", "var_names"])
def test_setting_index_names(names, after, attr):
adata = adata_dense.copy()
assert getattr(adata, attr).name is None
setattr(adata, attr, names)
assert getattr(adata, attr).name == after
if hasattr(names, "name"):
assert names.name is not None
# Testing for views
new = adata[:, :]
assert new.is_view
setattr(new, attr, names)
assert_equal(new, adata, exact=True)
assert not new.is_view
@pytest.mark.parametrize("attr", ["obs_names", "var_names"])
def test_setting_index_names_error(attr):
orig = adata_sparse[:2, :2]
adata = adata_sparse[:2, :2]
assert getattr(adata, attr).name is None
with pytest.raises(ValueError, match=fr"AnnData expects \.{attr[:3]}\.index\.name"):
setattr(adata, attr, pd.Index(["x", "y"], name=0))
assert adata.is_view
assert getattr(adata, attr).tolist() != ["x", "y"]
assert getattr(adata, attr).tolist() == getattr(orig, attr).tolist()
assert_equal(orig, adata, exact=True)
@pytest.mark.parametrize("dim", ["obs", "var"])
def test_setting_dim_index(dim):
index_attr = f"{dim}_names"
mapping_attr = f"{dim}m"
orig = gen_adata((5, 5))
orig.raw = orig
curr = orig.copy()
view = orig[:, :]
new_idx = pd.Index(list("abcde"), name="letters")
setattr(curr, index_attr, new_idx)
pd.testing.assert_index_equal(getattr(curr, index_attr), new_idx)
pd.testing.assert_index_equal(getattr(curr, mapping_attr)["df"].index, new_idx)
pd.testing.assert_index_equal(getattr(curr, mapping_attr).dim_names, new_idx)
pd.testing.assert_index_equal(curr.obs_names, curr.raw.obs_names)
# Testing view behaviour
setattr(view, index_attr, new_idx)
assert not view.is_view
pd.testing.assert_index_equal(getattr(view, index_attr), new_idx)
pd.testing.assert_index_equal(getattr(view, mapping_attr)["df"].index, new_idx)
pd.testing.assert_index_equal(getattr(view, mapping_attr).dim_names, new_idx)
with pytest.raises(AssertionError):
pd.testing.assert_index_equal(
getattr(view, index_attr), getattr(orig, index_attr)
)
assert_equal(view, curr, exact=True)
# test case in #459
fake_m = pd.DataFrame(curr.X.T, index=getattr(curr, index_attr))
getattr(curr, mapping_attr)["df2"] = fake_m
def test_indices_dtypes():
adata = AnnData(
np.array([[1, 2, 3], [4, 5, 6]]),
dict(obs_names=["A", "B"]),
dict(var_names=["a", "b", "c"]),
)
adata.obs_names = ["ö", "a"]
assert adata.obs_names.tolist() == ["ö", "a"]
def test_slicing():
adata = AnnData(np.array([[1, 2, 3], [4, 5, 6]]))
# assert adata[:, 0].X.tolist() == adata.X[:, 0].tolist() # No longer the case
assert adata[0, 0].X.tolist() == np.reshape(1, (1, 1)).tolist()
assert adata[0, :].X.tolist() == np.reshape([1, 2, 3], (1, 3)).tolist()
assert adata[:, 0].X.tolist() == np.reshape([1, 4], (2, 1)).tolist()
assert adata[:, [0, 1]].X.tolist() == [[1, 2], [4, 5]]
assert adata[:, np.array([0, 2])].X.tolist() == [[1, 3], [4, 6]]
assert adata[:, np.array([False, True, True])].X.tolist() == [
[2, 3],
[5, 6],
]
assert adata[:, 1:3].X.tolist() == [[2, 3], [5, 6]]
assert adata[0:2, :][:, 0:2].X.tolist() == [[1, 2], [4, 5]]
assert adata[0:1, :][:, 0:2].X.tolist() == np.reshape([1, 2], (1, 2)).tolist()
assert adata[0, :][:, 0].X.tolist() == np.reshape(1, (1, 1)).tolist()
assert adata[:, 0:2][0:2, :].X.tolist() == [[1, 2], [4, 5]]
assert adata[:, 0:2][0:1, :].X.tolist() == np.reshape([1, 2], (1, 2)).tolist()
assert adata[:, 0][0, :].X.tolist() == np.reshape(1, (1, 1)).tolist()
def test_boolean_slicing():
adata = AnnData(np.array([[1, 2, 3], [4, 5, 6]]))
obs_selector = np.array([True, False], dtype=bool)
vars_selector = np.array([True, False, False], dtype=bool)
assert adata[obs_selector, :][:, vars_selector].X.tolist() == [[1]]
assert adata[:, vars_selector][obs_selector, :].X.tolist() == [[1]]
assert adata[obs_selector, :][:, 0].X.tolist() == [[1]]
assert adata[:, 0][obs_selector, :].X.tolist() == [[1]]
assert adata[0, :][:, vars_selector].X.tolist() == [[1]]
assert adata[:, vars_selector][0, :].X.tolist() == [[1]]
obs_selector = np.array([True, False], dtype=bool)
vars_selector = np.array([True, True, False], dtype=bool)
assert adata[obs_selector, :][:, vars_selector].X.tolist() == [[1, 2]]
assert adata[:, vars_selector][obs_selector, :].X.tolist() == [[1, 2]]
assert adata[obs_selector, :][:, 0:2].X.tolist() == [[1, 2]]
assert adata[:, 0:2][obs_selector, :].X.tolist() == [[1, 2]]
assert adata[0, :][:, vars_selector].X.tolist() == [[1, 2]]
assert adata[:, vars_selector][0, :].X.tolist() == [[1, 2]]
obs_selector = np.array([True, True], dtype=bool)
vars_selector = np.array([True, True, False], dtype=bool)
assert adata[obs_selector, :][:, vars_selector].X.tolist() == [
[1, 2],
[4, 5],
]
assert adata[:, vars_selector][obs_selector, :].X.tolist() == [
[1, 2],
[4, 5],
]
assert adata[obs_selector, :][:, 0:2].X.tolist() == [[1, 2], [4, 5]]
assert adata[:, 0:2][obs_selector, :].X.tolist() == [[1, 2], [4, 5]]
assert adata[0:2, :][:, vars_selector].X.tolist() == [[1, 2], [4, 5]]
assert adata[:, vars_selector][0:2, :].X.tolist() == [[1, 2], [4, 5]]
def test_oob_boolean_slicing():
len1, len2 = np.random.choice(100, 2, replace=False)
with pytest.raises(IndexError) as e:
AnnData(np.empty((len1, 100)))[np.random.randint(0, 2, len2, dtype=bool), :]
assert str(len1) in str(e.value)
assert str(len2) in str(e.value)
len1, len2 = np.random.choice(100, 2, replace=False)
with pytest.raises(IndexError) as e:
AnnData(np.empty((100, len1)))[:, np.random.randint(0, 2, len2, dtype=bool)]
assert str(len1) in str(e.value)
assert str(len2) in str(e.value)
def test_slicing_strings():
adata = AnnData(
np.array([[1, 2, 3], [4, 5, 6]]),
dict(obs_names=["A", "B"]),
dict(var_names=["a", "b", "c"]),
)
assert adata["A", "a"].X.tolist() == [[1]]
assert adata["A", :].X.tolist() == [[1, 2, 3]]
assert adata[:, "a"].X.tolist() == [[1], [4]]
assert adata[:, ["a", "b"]].X.tolist() == [[1, 2], [4, 5]]
assert adata[:, np.array(["a", "c"])].X.tolist() == [[1, 3], [4, 6]]
assert adata[:, "b":"c"].X.tolist() == [[2, 3], [5, 6]]
with pytest.raises(KeyError):
_ = adata[:, "X"]
with pytest.raises(KeyError):
_ = adata["X", :]
with pytest.raises(KeyError):
_ = adata["A":"X", :]
with pytest.raises(KeyError):
_ = adata[:, "a":"X"]
# Test if errors are helpful
with pytest.raises(KeyError, match=r"not_in_var"):
adata[:, ["A", "B", "not_in_var"]]
with pytest.raises(KeyError, match=r"not_in_obs"):
adata[["A", "B", "not_in_obs"], :]
def test_slicing_graphs():
# Testing for deprecated behaviour of connectivity matrices in .uns["neighbors"]
with pytest.warns(FutureWarning, match=r".obsp\['connectivities'\]"):
adata = AnnData(
np.array([[1, 2], [3, 4], [5, 6]]),
uns=dict(neighbors=dict(connectivities=sp.csr_matrix(np.ones((3, 3))))),
)
adata_sub = adata[[0, 1], :]
with pytest.warns(FutureWarning):
assert adata_sub.uns["neighbors"]["connectivities"].shape[0] == 2
assert adata.uns["neighbors"]["connectivities"].shape[0] == 3
assert adata_sub.copy().uns["neighbors"]["connectivities"].shape[0] == 2
def test_slicing_series():
adata = AnnData(
np.array([[1, 2], [3, 4], [5, 6]]),
dict(obs_names=["A", "B", "C"]),
dict(var_names=["a", "b"]),
)
df = pd.DataFrame(dict(a=["1", "2", "2"]))
df1 = pd.DataFrame(dict(b=["1", "2"]))
assert adata[df["a"].values == "2"].X.tolist() == adata[df["a"] == "2"].X.tolist()
assert (
adata[:, df1["b"].values == "2"].X.tolist()
== adata[:, df1["b"] == "2"].X.tolist()
)
def test_strings_to_categoricals():
adata = AnnData(
np.array([[1, 2], [3, 4], [5, 6], [7, 8]]), dict(k=["a", "a", "b", "b"])
)
adata.strings_to_categoricals()
assert adata.obs["k"].cat.categories.tolist() == ["a", "b"]
def test_slicing_remove_unused_categories():
adata = AnnData(
np.array([[1, 2], [3, 4], [5, 6], [7, 8]]), dict(k=["a", "a", "b", "b"])
)
adata._sanitize()
assert adata[2:4].obs["k"].cat.categories.tolist() == ["b"]
def test_get_subset_annotation():
adata = AnnData(
np.array([[1, 2, 3], [4, 5, 6]]),
dict(S=["A", "B"]),
dict(F=["a", "b", "c"]),
)
assert adata[0, 0].obs["S"].tolist() == ["A"]
assert adata[0, 0].var["F"].tolist() == ["a"]
def test_append_col():
adata = AnnData(np.array([[1, 2, 3], [4, 5, 6]]))
adata.obs["new"] = [1, 2]
# this worked in the initial AnnData, but not with a dataframe
# adata.obs[['new2', 'new3']] = [['A', 'B'], ['c', 'd']]
with pytest.raises(ValueError):
adata.obs["new4"] = "far too long".split()
def test_delete_col():
adata = AnnData(np.array([[1, 2, 3], [4, 5, 6]]), dict(o1=[1, 2], o2=[3, 4]))
assert ["o1", "o2"] == adata.obs_keys()
del adata.obs["o1"]
assert ["o2"] == adata.obs_keys()
assert [3, 4] == adata.obs["o2"].tolist()
def test_set_obs():
adata = AnnData(np.array([[1, 2, 3], [4, 5, 6]]))
adata.obs = pd.DataFrame(dict(a=[3, 4]))
assert adata.obs_names.tolist() == [0, 1]
with pytest.raises(ValueError):
adata.obs = pd.DataFrame(dict(a=[3, 4, 5]))
adata.obs = dict(a=[1, 2])
def test_multicol():
adata = AnnData(np.array([[1, 2, 3], [4, 5, 6]]))
# 'c' keeps the columns as should be
adata.obsm["c"] = np.array([[0.0, 1.0], [2, 3]])
assert adata.obsm_keys() == ["c"]
assert adata.obsm["c"].tolist() == [[0.0, 1.0], [2, 3]]
def test_n_obs():
adata = AnnData(np.array([[1, 2], [3, 4], [5, 6]]))
assert adata.n_obs == 3
adata1 = adata[:2]
assert adata1.n_obs == 2
def test_equality_comparisons():
adata1 = AnnData(np.array([[1, 2], [3, 4], [5, 6]]))
adata2 = AnnData(np.array([[1, 2], [3, 4], [5, 6]]))
with pytest.raises(NotImplementedError):
adata1 == adata1
with pytest.raises(NotImplementedError):
adata1 == adata2
with pytest.raises(NotImplementedError):
adata1 != adata2
with pytest.raises(NotImplementedError):
adata1 == 1
with pytest.raises(NotImplementedError):
adata1 != 1
def test_rename_categories():
X = np.ones((6, 3))
obs = pd.DataFrame(dict(cat_anno=pd.Categorical(["a", "a", "a", "a", "b", "a"])))
adata = AnnData(X=X, obs=obs)
adata.uns["tool"] = {}
adata.uns["tool"]["cat_array"] = np.rec.fromarrays(
[np.ones(2) for cat in adata.obs["cat_anno"].cat.categories],
dtype=[(cat, "float32") for cat in adata.obs["cat_anno"].cat.categories],
)
adata.uns["tool"]["params"] = dict(groupby="cat_anno")
new_categories = ["c", "d"]
adata.rename_categories("cat_anno", new_categories)
assert list(adata.obs["cat_anno"].cat.categories) == new_categories
assert list(adata.uns["tool"]["cat_array"].dtype.names) == new_categories
def test_pickle():
import pickle
adata = AnnData()
adata2 = pickle.loads(pickle.dumps(adata))
assert adata2.obsm.parent is adata2
def test_to_df_dense():
X_df = adata_dense.to_df()
layer_df = adata_dense.to_df(layer="test")
np.testing.assert_array_equal(adata_dense.layers["test"], layer_df.values)
np.testing.assert_array_equal(adata_dense.X, X_df.values)
pd.testing.assert_index_equal(X_df.columns, layer_df.columns)
pd.testing.assert_index_equal(X_df.index, layer_df.index)
def test_convenience():
adata = adata_sparse.copy()
adata.layers["x2"] = adata.X * 2
adata.var["anno2"] = ["p1", "p2", "p3"]
adata.raw = adata
adata.X = adata.X / 2
adata_dense = adata.copy()
adata_dense.X = adata_dense.X.toarray()
def assert_same_op_result(a1, a2, op):
r1 = op(a1)
r2 = op(a2)
assert np.all(r1 == r2)
assert type(r1) is type(r2)
assert np.allclose(adata.obs_vector("b"), np.array([1.0, 2.5]))
assert np.allclose(adata.raw.obs_vector("c"), np.array([3, 6]))
assert np.all(adata.obs_vector("anno1") == np.array(["c1", "c2"]))
assert np.allclose(adata.var_vector("s1"), np.array([0, 1.0, 1.5]))
assert np.allclose(adata.raw.var_vector("s2"), np.array([0, 5, 6]))
for obs_k, layer in product(["a", "b", "c", "anno1"], [None, "x2"]):
assert_same_op_result(
adata, adata_dense, lambda x: x.obs_vector(obs_k, layer=layer)
)
for obs_k in ["a", "b", "c"]:
assert_same_op_result(adata, adata_dense, lambda x: x.raw.obs_vector(obs_k))
for var_k, layer in product(["s1", "s2", "anno2"], [None, "x2"]):
assert_same_op_result(
adata, adata_dense, lambda x: x.var_vector(var_k, layer=layer)
)
for var_k in ["s1", "s2", "anno2"]:
assert_same_op_result(adata, adata_dense, lambda x: x.raw.var_vector(var_k))
def test_1d_slice_dtypes():
N, M = 10, 20
obs_df = pd.DataFrame(
dict(
cat=pd.Categorical(np.arange(N, dtype=int)),
int=np.arange(N, dtype=int),
float=np.arange(N, dtype=float),
obj=[str(i) for i in np.arange(N, dtype=int)],
),
index=[f"cell{i}" for i in np.arange(N, dtype=int)],
)
var_df = pd.DataFrame(
dict(
cat=pd.Categorical(np.arange(M, dtype=int)),
int=np.arange(M, dtype=int),
float=np.arange(M, dtype=float),
obj=[str(i) for i in np.arange(M, dtype=int)],
),
index=[f"gene{i}" for i in np.arange(M, dtype=int)],
)
adata = AnnData(X= | np.random.random((N, M)) | numpy.random.random |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Utility code for converting between protein representations.
Note that several methods here are no longer used in any of the training routines.
However, they were quite useful to us during the course of research,
so we are releasing them here in case they help others.
"""
import collections
import os
import os.path as osp
import pickle
import random
from itertools import product
from multiprocessing import Pool
import numpy as np
import pandas as pd
import gemmi
from amino_acid_config import kvs, res_atoms, res_children, res_chis, res_parents
from config import MMCIF_PATH, ROTAMER_LIBRARY_PATH
from constants import atom_names, residue_names
from math_utils import rotate_v1_v2, rotate_v1_v2_vec
def parse_dense_format(node_embed):
"""
In protein-ebm, we represent amino acids in two different formats.
This method converts from the dense format to a sparse format.
===============
==== Dense ====
===============
The dense format represents a protein using a is a D x 6 dimensional represention.
Each 6 dimensional vector represents an atom, following this scheme:
[1]: amino acid identity of the amino acid the atom is part of (residue_idx)
[2]: element identity of the amino acid the atom is part of (atom_idx)
[3]: positional location of atom in the amino acid (atom_num)
[4..6]: x,y,z coordinates
The dense format is useful for feeding data into a neural network.
===============
==== Sparse ===
===============
The sparse format represents a data based on its topology (parent/child/etc).
It follows this scheme:
amino_name: amino acid to substitue
par: A N x 20 encoding of the relative offset of the parent of each atom. For example,
the amino acid glycine would be represented as [-18 -1 -1 -1 0, ...]
child: A N x 20 encoding of the child of each atom. For example, the amino acid glycine
would be represented as [1 1 18 0 0 0 ..]
pos_exist: A N x 20 mask encoding of which atoms are valid for each amino acid so for
example the amino acid glycine would be represented as [1 1 1 1 0 0 ...]
chi_valid: A N x 5 mask encoding which chi angles are valid, so for example glycine would
be represented as [0 0 0 0 0]
pos: A N x 20 x 3 encoding the (x, y, z) coordinates of each atom per amino acid in a protein
i: amino acid position to substitute
sequence_map: map from amino acid to structure
rotate_matrix: matrix of rotation to amino acid position
This format is easier for manipulating the proteins, e.g changing the rotamers
during negative sampling.
See comments in the implementation below for more details.
"""
# The input is a list of atoms. We keep track of how many we have processed.
start = 0
# Construct amino acid-level information from the atomic inputs
# Each amino acid is described on the atomic-level by 20-dim lists
pars = [] # ordinal distance of parent atoms
childs = [] # ordinal distance of cildren atoms
pos = [] # 3d translations of each atom
pos_exists = [] # whether a position exists or not
residues = [] # the name of the amino acid
chis_valid = [] # a 20-dim list describing which atoms are part of the chi angle
# consume all of the atoms in the input
while start < node_embed.shape[0]:
idx = int(node_embed[start, 0])
residue = residue_names[idx]
# Get the parent and child representation (see amino_acid_config.py)
par = res_parents[residue].copy()
child = res_children[residue].copy()
n = len(par)
# 20-dim mask of which positions encode meaningful values
pos_exist = [1] * n + [0] * (20 - n) # this is the mask
# pad up to 20-dim with 0s
par = par + [0] * (20 - n)
child = child + [0] * (20 - len(child))
# x,y,z coordinates for each of the atoms in the amino acid, padded to 20-dim
pos_temp = np.concatenate(
[node_embed[start : start + n, -3:], np.zeros((20 - n, 3))], axis=0
)
# If we can fit these n atom in, then record the information
if start + n <= node_embed.shape[0]:
pars.append(par)
childs.append(child)
pos.append(pos_temp)
pos_exists.append(pos_exist)
chis = res_chis[residue]
chis_valid.append([1] * len(chis) + [0] * (20 - len(chis)))
residues.append(residue.lower())
# All atoms from start <-> start+n should belong to the same amino acid
if not (node_embed[start : start + n, 0] == idx).all():
return None, None, None, None, None, None
# keep track of number of atoms consumeed
start = start + n
# Don't proceess single amino acid proteins
if len(pos) < 2:
return None, None, None, None, None, None
# Wrap the results in numpy arrays
pars, childs, pos, pos_exists, chis_valid = (
np.array(pars),
np.array(childs),
np.stack(pos, axis=0),
np.array(pos_exists),
np.array(chis_valid),
)
# The code above assumes that each nitrogen is connected to previous carbon
# and each carbon is connected to the next nitrogen. This is not the case
# for the N-terminus and C-terminus, so we need to override those cases.
pars[0, 0] = 0
childs[-1, 2] = 0
# return the new encoding in amino acid form
return pars, childs, pos, pos_exists, residues, chis_valid
def reencode_dense_format(node_embed, pos_new, pos_exist):
"""Updates x,y,z positions in dense encoding with new positions"""
node_embed_new = node_embed.copy()
pos_mask = pos_exist.astype(np.bool)
elem_num = pos_mask.sum()
node_embed_new[:elem_num, -3:] = pos_new[pos_mask]
return node_embed_new
def cif_to_embed(cif_file, ix=None, parse_skip=False):
"""
Parses a CIF file into a more convenient representation.
# Embedding format for nodes:
# 'one hot amino acid' amino type of molecule
# 'x, y, z' positional encoding
# 'one hot representation of atom type', either C, CA, N, O,
"""
st = gemmi.read_structure(cif_file)
# print(st)
# for model in st:
# print(model)
# for chain in model:
# print(chain)
# for residue in chain:
# print(residue)
results = []
skips = []
for model in st:
for i, chain in enumerate(model):
if (ix is not None) and (ix != i):
continue
atoms = []
node_embeddings = []
for j, residue in enumerate(chain):
translation = []
if residue.name not in residue_names:
# Skip over any structure that contains nucleotides
if residue.name in ["DA", "DC", "DG", "DT"]:
return None, None
else:
continue
residue_counter = 0
namino_elements = len(res_parents[residue.name])
amino_atoms = res_atoms[residue.name]
residue_atoms = []
residue_embed = []
# reisdue object contains information about the residue, including identity
# and spatial coordiantes for atoms in the residue. We parse this into a
# dense encoding, for feeding into a neural network.
node_embed = parse_residue_embed(residue)
if len(node_embed) == 0:
skips.append(j)
node_embeddings.extend(node_embed)
node_embeddings = np.array(node_embeddings)
result = (node_embeddings,)
results.append(result)
if parse_skip:
return st, results, skips
else:
return st, results
def vis_cif(cif_path, im_path):
import pymol
from pymol import cmd
cmd.load(cif_path, "mov")
cmd.zoom()
cmd.png(im_path, 300, 200)
def compute_chi_angle_st(st, ix):
angles = []
num = int(ix)
chain_counter = 0
for model in st:
for chain in model:
if num != chain_counter:
chain_counter += 1
continue
else:
for residue in chain:
if residue.name in residue_names:
chi_angles = compute_chi_angle_residue(residue)
if chi_angles is not None:
angles.append(chi_angles)
return angles
def compute_chi_angle_residue(residue):
# look up the atoms that are used for computing the chi angles.
chi_angles_atoms = kvs[residue.name]
angles = []
try:
for chi_angles_atom in chi_angles_atoms:
atoms = chi_angles_atom.split("-")
pos = []
for atom in atoms:
# In some cases, amino acid side chains are listed with CD1 instead of CD
if atom == "CD":
if "CD" not in residue:
atom = residue["CD1"]
else:
atom = residue[atom]
else:
atom = residue[atom]
pos.append((atom.pos.x, atom.pos.y, atom.pos.z))
pos = np.array(pos)
diff_vec = pos[2] - pos[1]
# Compute the axis in which we are computing the dihedral angle
diff_vec_normalize = diff_vec / np.linalg.norm(diff_vec)
diff_bot = pos[0] - pos[1]
diff_top = pos[3] - pos[2]
# Now project diff_bot and diff_top to be on the plane
diff_bot = diff_bot - diff_bot.dot(diff_vec_normalize) * diff_vec_normalize
diff_top = diff_top - diff_top.dot(diff_vec_normalize) * diff_vec_normalize
diff_bot_normalize = diff_bot / | np.linalg.norm(diff_bot) | numpy.linalg.norm |
"""
Make a movie out of a shotgun VAE projection and an audio file.
Reduce speed by 50%:
::
ffmpeg -i out.mp4 -filter_complex "[0:v]setpts=PTS/0.5[v];[0:a]atempo=0.5[a]" -map "[v]" -map "[a]" -strict -2 out.mp4
TO DO
-----
* Check whether ffmpeg is installed.
"""
__date__ = "November 2019 - November 2020"
import joblib
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import numpy as np
import os
from scipy.io import wavfile
from scipy.io.wavfile import WavFileWarning
from scipy.spatial.distance import euclidean, correlation
from sklearn.neighbors import NearestNeighbors
import subprocess
import torch
from torch.utils.data import Dataset, DataLoader
import warnings
from ava.models.vae import VAE
def shotgun_movie_DC(dc, audio_file, p, method='spectrogram_correlation', \
output_dir='temp', fps=30, shoulder=0.01, c='b', alpha=0.2, s=0.9, \
marker_c='r', marker_s=50.0, marker_marker='*', transform_fn=None,
load_transform=False, save_transform=False, mp4_fn='out.mp4'):
"""
Make a shotgun VAE projection movie with the given audio file.
Parameters
----------
dc : ava.data.data_container.DataContainer
See ava.data.data_container.
audio_file : str
Path to audio file.
p : dict
Preprocessing parameters. Must contain keys: ``'fs'``, ``'get_spec'``,
``'num_freq_bins'``, ``'num_time_bins'``, ``'nperseg'``, ``'noverlap'``,
``'window_length'``, ``'min_freq'``, ``'max_freq'``, ``'spec_min_val'``,
``'spec_max_val'``, ``'mel'``, ...
method : str, optional
How to map spectrograms to points in the UMAP embedding. `'latent_nn'`
assigns embeddings based on nearest neighbors in latent space.
`'re_umap'` uses a pretrained UMAP object to map the spectrogram's
latent features directly. `'spectrogram_correlation'` finds the
spectrogram with the highest correlation. Defaults to
`'spectrogram_correlation'`.
output_dir : str, optional
Directory where output images are written. Defaults to ``'temp'``.
fps : int, optional
Frames per second. Defaults to ``20``.
shoulder : float, optional
The movie will start this far into the audio file and stop this far from
the end. This removes weird edge effect of making spectrograms. Defaults
to ``0.05``.
c : str, optional
Passed to ``matplotlib.pyplot.scatter`` for background points. Defaults
to ``'b'``.
alpha : float, optional
Passed to ``matplotlib.pyplot.scatter`` for background points. Defaults
to ``0.2``.
s : float, optional
Passed to ``matplotlib.pyplot.scatter`` for background points. Defaults
to ``0.9``.
marker_c : str, optional
Passed to ``matplotlib.pyplot.scatter`` for the marker. Defaults to
``'r'``.
marker_s : float, optional
Passed to ``matplotlib.pyplot.scatter`` for the marker. Defaults to
``40.0``.
marker_marker : str, optional
Passed to ``matplotlib.pyplot.scatter`` for the marker. Defaults to
``'r'``.
"""
assert dc.model_filename is not None
assert method in ['latent_nn', 're_umap', 'spectrogram_correlation']
if os.path.exists(output_dir):
for fn in os.listdir(output_dir):
if len(fn) > 4 and fn[-4:] == '.jpg':
os.remove(os.path.join(output_dir, fn))
# Read the audio file.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=WavFileWarning)
fs, audio = wavfile.read(audio_file)
assert fs == p['fs'], "found fs="+str(fs)+", expected "+str(p['fs'])
# Make spectrograms.
specs = []
dt = 1/fps
onset = shoulder
while onset + p['window_length'] < len(audio)/fs - shoulder:
offset = onset + p['window_length']
target_times = np.linspace(onset, offset, p['num_time_bins'])
# Then make a spectrogram.
spec, flag = p['get_spec'](onset-shoulder, offset+shoulder, audio, p, \
fs=fs, target_times=target_times)
assert flag
specs.append(spec)
onset += dt
assert len(specs) > 0
specs = | np.stack(specs) | numpy.stack |
import itertools
import os
import re
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
from numpy.testing import assert_array_almost_equal, assert_array_equal
from scipy import sparse
import pytest
from sklearn.base import clone
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
from sklearn.metrics import get_scorer
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.utils import compute_class_weight, _IS_32BIT
from sklearn.utils._testing import ignore_warnings
from sklearn.utils import shuffle
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import scale
from sklearn.utils._testing import skip_if_no_parallel
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model._logistic import (
_log_reg_scoring_path,
_logistic_regression_path,
LogisticRegression,
LogisticRegressionCV,
)
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sparse.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert predicted.shape == (n_samples,)
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert probabilities.shape == (n_samples, n_classes)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False, random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False, random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
with pytest.raises(ValueError, match=msg):
LogisticRegression(C=-1).fit(X, Y1)
with pytest.raises(ValueError, match=msg):
LogisticRegression(C="test").fit(X, Y1)
msg = "is not a valid scoring value"
with pytest.raises(ValueError, match=msg):
LogisticRegressionCV(scoring="bad-scorer", cv=2).fit(X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
with pytest.raises(ValueError, match=msg):
LR(tol=-1).fit(X, Y1)
with pytest.raises(ValueError, match=msg):
LR(tol="test").fit(X, Y1)
msg = "Maximum number of iteration must be positive"
with pytest.raises(ValueError, match=msg):
LR(max_iter=-1).fit(X, Y1)
with pytest.raises(ValueError, match=msg):
LR(max_iter="test").fit(X, Y1)
def test_logistic_cv_mock_scorer():
class MockScorer:
def __init__(self):
self.calls = 0
self.scores = [0.1, 0.4, 0.8, 0.5]
def __call__(self, model, X, y, sample_weight=None):
score = self.scores[self.calls % len(self.scores)]
self.calls += 1
return score
mock_scorer = MockScorer()
Cs = [1, 2, 3, 4]
cv = 2
lr = LogisticRegressionCV(Cs=Cs, scoring=mock_scorer, cv=cv)
lr.fit(X, Y1)
# Cs[2] has the highest score (0.8) from MockScorer
assert lr.C_[0] == Cs[2]
# scorer called 8 times (cv*len(Cs))
assert mock_scorer.calls == cv * len(Cs)
# reset mock_scorer
mock_scorer.calls = 0
custom_score = lr.score(X, lr.predict(X))
assert custom_score == mock_scorer.scores[0]
assert mock_scorer.calls == 1
@skip_if_no_parallel
def test_lr_liblinear_warning():
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
lr = LogisticRegression(solver="liblinear", n_jobs=2)
warning_message = (
"'n_jobs' > 1 does not have any effect when"
" 'solver' is set to 'liblinear'. Got 'n_jobs'"
" = 2."
)
with pytest.warns(UserWarning, match=warning_message):
lr.fit(iris.data, target)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [
LogisticRegression(C=len(iris.data), solver="liblinear", multi_class="ovr"),
LogisticRegression(C=len(iris.data), solver="lbfgs", multi_class="multinomial"),
LogisticRegression(
C=len(iris.data), solver="newton-cg", multi_class="multinomial"
),
LogisticRegression(
C=len(iris.data), solver="sag", tol=1e-2, multi_class="ovr", random_state=42
),
LogisticRegression(
C=len(iris.data),
solver="saga",
tol=1e-2,
multi_class="ovr",
random_state=42,
),
]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert np.mean(pred == target) > 0.95
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert np.mean(pred == target) > 0.95
@pytest.mark.parametrize("solver", ["lbfgs", "newton-cg", "sag", "saga"])
def test_multinomial_validation(solver):
lr = LogisticRegression(C=-1, solver=solver, multi_class="multinomial")
with pytest.raises(ValueError):
lr.fit([[0, 1], [1, 0]], [0, 1])
@pytest.mark.parametrize("LR", [LogisticRegression, LogisticRegressionCV])
def test_check_solver_option(LR):
X, y = iris.data, iris.target
msg = (
r"Logistic Regression supports only solvers in \['liblinear', "
r"'newton-cg', 'lbfgs', 'sag', 'saga'\], got wrong_name."
)
lr = LR(solver="wrong_name", multi_class="ovr")
with pytest.raises(ValueError, match=msg):
lr.fit(X, y)
msg = "multi_class should be 'multinomial', 'ovr' or 'auto'. Got wrong_name"
lr = LR(solver="newton-cg", multi_class="wrong_name")
with pytest.raises(ValueError, match=msg):
lr.fit(X, y)
# only 'liblinear' solver
msg = "Solver liblinear does not support a multinomial backend."
lr = LR(solver="liblinear", multi_class="multinomial")
with pytest.raises(ValueError, match=msg):
lr.fit(X, y)
# all solvers except 'liblinear' and 'saga'
for solver in ["newton-cg", "lbfgs", "sag"]:
msg = "Solver %s supports only 'l2' or 'none' penalties," % solver
lr = LR(solver=solver, penalty="l1", multi_class="ovr")
with pytest.raises(ValueError, match=msg):
lr.fit(X, y)
for solver in ["newton-cg", "lbfgs", "sag", "saga"]:
msg = "Solver %s supports only dual=False, got dual=True" % solver
lr = LR(solver=solver, dual=True, multi_class="ovr")
with pytest.raises(ValueError, match=msg):
lr.fit(X, y)
# only saga supports elasticnet. We only test for liblinear because the
# error is raised before for the other solvers (solver %s supports only l2
# penalties)
for solver in ["liblinear"]:
msg = "Only 'saga' solver supports elasticnet penalty, got solver={}.".format(
solver
)
lr = LR(solver=solver, penalty="elasticnet")
with pytest.raises(ValueError, match=msg):
lr.fit(X, y)
# liblinear does not support penalty='none'
msg = "penalty='none' is not supported for the liblinear solver"
lr = LR(penalty="none", solver="liblinear")
with pytest.raises(ValueError, match=msg):
lr.fit(X, y)
@pytest.mark.parametrize("solver", ["lbfgs", "newton-cg", "sag", "saga"])
def test_multinomial_binary(solver):
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
clf = LogisticRegression(
solver=solver, multi_class="multinomial", random_state=42, max_iter=2000
)
clf.fit(iris.data, target)
assert clf.coef_.shape == (1, iris.data.shape[1])
assert clf.intercept_.shape == (1,)
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(
solver=solver, multi_class="multinomial", random_state=42, fit_intercept=False
)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data), axis=1)]
assert np.mean(pred == target) > 0.9
def test_multinomial_binary_probabilities():
# Test multinomial LR gives expected probabilities based on the
# decision function, for a binary problem.
X, y = make_classification()
clf = LogisticRegression(multi_class="multinomial", solver="saga")
clf.fit(X, y)
decision = clf.decision_function(X)
proba = clf.predict_proba(X)
expected_proba_class_1 = np.exp(decision) / (np.exp(decision) + np.exp(-decision))
expected_proba = np.c_[1 - expected_proba_class_1, expected_proba_class_1]
assert_almost_equal(proba, expected_proba)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert sparse.issparse(clf.coef_)
pred_s_d = clf.decision_function(iris.data)
sp_data = sparse.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
with pytest.raises(ValueError):
clf.fit(X, y_wrong)
# Wrong dimensions for test data
with pytest.raises(ValueError):
clf.fit(X_, y_).predict(rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
logistic = LogisticRegression(random_state=0)
with pytest.raises(ValueError):
logistic.fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ["sag", "saga"]:
coefs, Cs, _ = f(_logistic_regression_path)(
X,
y,
Cs=Cs,
fit_intercept=False,
tol=1e-5,
solver=solver,
max_iter=1000,
multi_class="ovr",
random_state=0,
)
for i, C in enumerate(Cs):
lr = LogisticRegression(
C=C,
fit_intercept=False,
tol=1e-5,
solver=solver,
multi_class="ovr",
random_state=0,
max_iter=1000,
)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(
lr_coef, coefs[i], decimal=4, err_msg="with solver = %s" % solver
)
# test for fit_intercept=True
for solver in ("lbfgs", "newton-cg", "liblinear", "sag", "saga"):
Cs = [1e3]
coefs, Cs, _ = f(_logistic_regression_path)(
X,
y,
Cs=Cs,
tol=1e-6,
solver=solver,
intercept_scaling=10000.0,
random_state=0,
multi_class="ovr",
)
lr = LogisticRegression(
C=Cs[0],
tol=1e-4,
intercept_scaling=10000.0,
random_state=0,
multi_class="ovr",
solver=solver,
)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(
lr_coef, coefs[0], decimal=4, err_msg="with solver = %s" % solver
)
def test_logistic_regression_path_convergence_fail():
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = [1e3]
# Check that the convergence message points to both a model agnostic
# advice (scaling the data) and to the logistic regression specific
# documentation that includes hints on the solver configuration.
with pytest.warns(ConvergenceWarning) as record:
_logistic_regression_path(
X, y, Cs=Cs, tol=0.0, max_iter=1, random_state=0, verbose=0
)
assert len(record) == 1
warn_msg = record[0].message.args[0]
assert "lbfgs failed to converge" in warn_msg
assert "Increase the number of iterations" in warn_msg
assert "scale the data" in warn_msg
assert "linear_model.html#logistic-regression" in warn_msg
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20, random_state=0)
lr1 = LogisticRegression(
random_state=0,
dual=True,
max_iter=1,
tol=1e-15,
solver="liblinear",
multi_class="ovr",
)
lr1.fit(X, y)
lr2 = LogisticRegression(
random_state=0,
dual=True,
max_iter=1,
tol=1e-15,
solver="liblinear",
multi_class="ovr",
)
lr2.fit(X, y)
lr3 = LogisticRegression(
random_state=8,
dual=True,
max_iter=1,
tol=1e-15,
solver="liblinear",
multi_class="ovr",
)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
with pytest.raises(AssertionError, match=msg):
assert_array_almost_equal(lr1.coef_, lr3.coef_)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(
Cs=[1.0], fit_intercept=False, solver="liblinear", multi_class="ovr", cv=3
)
lr_cv.fit(X_ref, y)
lr = LogisticRegression(
C=1.0, fit_intercept=False, solver="liblinear", multi_class="ovr"
)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert len(lr_cv.classes_) == 2
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1,))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
@pytest.mark.parametrize(
"scoring, multiclass_agg_list",
[
("accuracy", [""]),
("precision", ["_macro", "_weighted"]),
# no need to test for micro averaging because it
# is the same as accuracy for f1, precision,
# and recall (see https://github.com/
# scikit-learn/scikit-learn/pull/
# 11578#discussion_r203250062)
("f1", ["_macro", "_weighted"]),
("neg_log_loss", [""]),
("recall", ["_macro", "_weighted"]),
],
)
def test_logistic_cv_multinomial_score(scoring, multiclass_agg_list):
# test that LogisticRegressionCV uses the right score to compute its
# cross-validation scores when using a multinomial scoring
# see https://github.com/scikit-learn/scikit-learn/issues/8720
X, y = make_classification(
n_samples=100, random_state=0, n_classes=3, n_informative=6
)
train, test = np.arange(80), np.arange(80, 100)
lr = LogisticRegression(C=1.0, multi_class="multinomial")
# we use lbfgs to support multinomial
params = lr.get_params()
# we store the params to set them further in _log_reg_scoring_path
for key in ["C", "n_jobs", "warm_start"]:
del params[key]
lr.fit(X[train], y[train])
for averaging in multiclass_agg_list:
scorer = get_scorer(scoring + averaging)
assert_array_almost_equal(
_log_reg_scoring_path(
X, y, train, test, Cs=[1.0], scoring=scorer, **params
)[2][0],
scorer(lr, X[test], y[test]),
)
def test_multinomial_logistic_regression_string_inputs():
# Test with string labels for LogisticRegression(CV)
n_samples, n_features, n_classes = 50, 5, 3
X_ref, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_classes=n_classes,
n_informative=3,
random_state=0,
)
y_str = LabelEncoder().fit(["bar", "baz", "foo"]).inverse_transform(y)
# For numerical labels, let y values be taken from set (-1, 0, 1)
y = np.array(y) - 1
# Test for string labels
lr = LogisticRegression(multi_class="multinomial")
lr_cv = LogisticRegressionCV(multi_class="multinomial", Cs=3)
lr_str = LogisticRegression(multi_class="multinomial")
lr_cv_str = LogisticRegressionCV(multi_class="multinomial", Cs=3)
lr.fit(X_ref, y)
lr_cv.fit(X_ref, y)
lr_str.fit(X_ref, y_str)
lr_cv_str.fit(X_ref, y_str)
assert_array_almost_equal(lr.coef_, lr_str.coef_)
assert sorted(lr_str.classes_) == ["bar", "baz", "foo"]
assert_array_almost_equal(lr_cv.coef_, lr_cv_str.coef_)
assert sorted(lr_str.classes_) == ["bar", "baz", "foo"]
assert sorted(lr_cv_str.classes_) == ["bar", "baz", "foo"]
# The predictions should be in original labels
assert sorted(np.unique(lr_str.predict(X_ref))) == ["bar", "baz", "foo"]
assert sorted(np.unique(lr_cv_str.predict(X_ref))) == ["bar", "baz", "foo"]
# Make sure class weights can be given with string labels
lr_cv_str = LogisticRegression(
class_weight={"bar": 1, "baz": 2, "foo": 0}, multi_class="multinomial"
).fit(X_ref, y_str)
assert sorted(np.unique(lr_cv_str.predict(X_ref))) == ["bar", "baz"]
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5, random_state=0)
X[X < 1.0] = 0.0
csr = sparse.csr_matrix(X)
clf = LogisticRegressionCV()
clf.fit(X, y)
clfs = LogisticRegressionCV()
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert clfs.C_ == clf.C_
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# The cv indices from stratified kfold (where stratification is done based
# on the fine-grained iris classes, i.e, before the classes 0 and 1 are
# conflated) is used for both clf and clf1
n_cv = 2
cv = StratifiedKFold(n_cv)
precomputed_folds = list(cv.split(train, target))
# Train clf on the original dataset where classes 0 and 1 are separated
clf = LogisticRegressionCV(cv=precomputed_folds, multi_class="ovr")
clf.fit(train, target)
# Conflate classes 0 and 1 and train clf1 on this modified dataset
clf1 = LogisticRegressionCV(cv=precomputed_folds, multi_class="ovr")
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
# Ensure that what OvR learns for class2 is same regardless of whether
# classes 0 and 1 are separated or not
assert_allclose(clf.scores_[2], clf1.scores_[2])
assert_allclose(clf.intercept_[2:], clf1.intercept_)
assert_allclose(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert clf.coef_.shape == (3, n_features)
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert coefs_paths.shape == (3, n_cv, 10, n_features + 1)
assert clf.Cs_.shape == (10,)
scores = np.asarray(list(clf.scores_.values()))
assert scores.shape == (3, n_cv, 10)
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ["lbfgs", "newton-cg", "sag", "saga"]:
max_iter = 500 if solver in ["sag", "saga"] else 15
clf_multi = LogisticRegressionCV(
solver=solver,
multi_class="multinomial",
max_iter=max_iter,
random_state=42,
tol=1e-3 if solver in ["sag", "saga"] else 1e-2,
cv=2,
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert multi_score > ovr_score
# Test attributes of LogisticRegressionCV
assert clf.coef_.shape == clf_multi.coef_.shape
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert coefs_paths.shape == (3, n_cv, 10, n_features + 1)
assert clf_multi.Cs_.shape == (10,)
scores = np.asarray(list(clf_multi.scores_.values()))
assert scores.shape == (3, n_cv, 10)
def test_logistic_regression_solvers():
"""Test solvers converge to the same result."""
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
params = dict(fit_intercept=False, random_state=42, multi_class="ovr")
solvers = ("newton-cg", "lbfgs", "liblinear", "sag", "saga")
regressors = {
solver: LogisticRegression(solver=solver, **params).fit(X, y)
for solver in solvers
}
for solver_1, solver_2 in itertools.combinations(regressors, r=2):
assert_array_almost_equal(
regressors[solver_1].coef_, regressors[solver_2].coef_, decimal=3
)
def test_logistic_regression_solvers_multiclass():
"""Test solvers converge to the same result for multiclass problems."""
X, y = make_classification(
n_samples=20, n_features=20, n_informative=10, n_classes=3, random_state=0
)
tol = 1e-7
params = dict(fit_intercept=False, tol=tol, random_state=42, multi_class="ovr")
solvers = ("newton-cg", "lbfgs", "liblinear", "sag", "saga")
# Override max iteration count for specific solvers to allow for
# proper convergence.
solver_max_iter = {"sag": 1000, "saga": 10000}
regressors = {
solver: LogisticRegression(
solver=solver, max_iter=solver_max_iter.get(solver, 100), **params
).fit(X, y)
for solver in solvers
}
for solver_1, solver_2 in itertools.combinations(regressors, r=2):
assert_array_almost_equal(
regressors[solver_1].coef_, regressors[solver_2].coef_, decimal=4
)
def test_logistic_regressioncv_class_weights():
for weight in [{0: 0.1, 1: 0.2}, {0: 0.1, 1: 0.2, 2: 0.5}]:
n_classes = len(weight)
for class_weight in (weight, "balanced"):
X, y = make_classification(
n_samples=30,
n_features=3,
n_repeated=0,
n_informative=3,
n_redundant=0,
n_classes=n_classes,
random_state=0,
)
clf_lbf = LogisticRegressionCV(
solver="lbfgs",
Cs=1,
fit_intercept=False,
multi_class="ovr",
class_weight=class_weight,
)
clf_ncg = LogisticRegressionCV(
solver="newton-cg",
Cs=1,
fit_intercept=False,
multi_class="ovr",
class_weight=class_weight,
)
clf_lib = LogisticRegressionCV(
solver="liblinear",
Cs=1,
fit_intercept=False,
multi_class="ovr",
class_weight=class_weight,
)
clf_sag = LogisticRegressionCV(
solver="sag",
Cs=1,
fit_intercept=False,
multi_class="ovr",
class_weight=class_weight,
tol=1e-5,
max_iter=10000,
random_state=0,
)
clf_saga = LogisticRegressionCV(
solver="saga",
Cs=1,
fit_intercept=False,
multi_class="ovr",
class_weight=class_weight,
tol=1e-5,
max_iter=10000,
random_state=0,
)
clf_lbf.fit(X, y)
clf_ncg.fit(X, y)
clf_lib.fit(X, y)
clf_sag.fit(X, y)
clf_saga.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_ncg.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_saga.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(
n_samples=20, n_features=5, n_informative=3, n_classes=2, random_state=0
)
sample_weight = y + 1
for LR in [LogisticRegression, LogisticRegressionCV]:
kw = {"random_state": 42, "fit_intercept": False, "multi_class": "ovr"}
if LR is LogisticRegressionCV:
kw.update({"Cs": 3, "cv": 3})
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
for solver in ["lbfgs", "liblinear"]:
clf_sw_none = LR(solver=solver, **kw)
clf_sw_ones = LR(solver=solver, **kw)
clf_sw_none.fit(X, y)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(**kw)
clf_sw_lbfgs.fit(X, y, sample_weight=sample_weight)
clf_sw_n = LR(solver="newton-cg", **kw)
clf_sw_n.fit(X, y, sample_weight=sample_weight)
clf_sw_sag = LR(solver="sag", tol=1e-10, **kw)
# ignore convergence warning due to small dataset
with ignore_warnings():
clf_sw_sag.fit(X, y, sample_weight=sample_weight)
clf_sw_liblinear = LR(solver="liblinear", **kw)
clf_sw_liblinear.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_liblinear.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
for solver in ["lbfgs", "liblinear"]:
clf_cw_12 = LR(solver=solver, class_weight={0: 1, 1: 2}, **kw)
clf_cw_12.fit(X, y)
clf_sw_12 = LR(solver=solver, **kw)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
# Test the above for l1 penalty and l2 penalty with dual=True.
# since the patched liblinear code is different.
clf_cw = LogisticRegression(
solver="liblinear",
fit_intercept=False,
class_weight={0: 1, 1: 2},
penalty="l1",
tol=1e-5,
random_state=42,
multi_class="ovr",
)
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear",
fit_intercept=False,
penalty="l1",
tol=1e-5,
random_state=42,
multi_class="ovr",
)
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
clf_cw = LogisticRegression(
solver="liblinear",
fit_intercept=False,
class_weight={0: 1, 1: 2},
penalty="l2",
dual=True,
random_state=42,
multi_class="ovr",
)
clf_cw.fit(X, y)
clf_sw = LogisticRegression(
solver="liblinear",
fit_intercept=False,
penalty="l2",
dual=True,
random_state=42,
multi_class="ovr",
)
clf_sw.fit(X, y, sample_weight)
assert_array_almost_equal(clf_cw.coef_, clf_sw.coef_, decimal=4)
def _compute_class_weight_dictionary(y):
# helper for returning a dictionary instead of an array
classes = np.unique(y)
class_weight = compute_class_weight("balanced", classes=classes, y=y)
class_weight_dict = dict(zip(classes, class_weight))
return class_weight_dict
def test_logistic_regression_class_weights():
# Multinomial case: remove 90% of class 0
X = iris.data[45:, :]
y = iris.target[45:]
solvers = ("lbfgs", "newton-cg")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(
solver=solver, multi_class="multinomial", class_weight="balanced"
)
clf2 = LogisticRegression(
solver=solver, multi_class="multinomial", class_weight=class_weight_dict
)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=4)
# Binary case: remove 90% of class 0 and 100% of class 2
X = iris.data[45:100, :]
y = iris.target[45:100]
solvers = ("lbfgs", "newton-cg", "liblinear")
class_weight_dict = _compute_class_weight_dictionary(y)
for solver in solvers:
clf1 = LogisticRegression(
solver=solver, multi_class="ovr", class_weight="balanced"
)
clf2 = LogisticRegression(
solver=solver, multi_class="ovr", class_weight=class_weight_dict
)
clf1.fit(X, y)
clf2.fit(X, y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=6)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes,
random_state=0,
)
X = StandardScaler(with_mean=False).fit_transform(X)
# 'lbfgs' is used as a referenced
solver = "lbfgs"
ref_i = LogisticRegression(solver=solver, multi_class="multinomial")
ref_w = LogisticRegression(
solver=solver, multi_class="multinomial", fit_intercept=False
)
ref_i.fit(X, y)
ref_w.fit(X, y)
assert ref_i.coef_.shape == (n_classes, n_features)
assert ref_w.coef_.shape == (n_classes, n_features)
for solver in ["sag", "saga", "newton-cg"]:
clf_i = LogisticRegression(
solver=solver,
multi_class="multinomial",
random_state=42,
max_iter=2000,
tol=1e-7,
)
clf_w = LogisticRegression(
solver=solver,
multi_class="multinomial",
random_state=42,
max_iter=2000,
tol=1e-7,
fit_intercept=False,
)
clf_i.fit(X, y)
clf_w.fit(X, y)
assert clf_i.coef_.shape == (n_classes, n_features)
assert clf_w.coef_.shape == (n_classes, n_features)
# Compare solutions between lbfgs and the other solvers
assert_allclose(ref_i.coef_, clf_i.coef_, rtol=1e-2)
assert_allclose(ref_w.coef_, clf_w.coef_, rtol=1e-2)
assert_allclose(ref_i.intercept_, clf_i.intercept_, rtol=1e-2)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ["lbfgs", "newton-cg", "sag", "saga"]:
clf_path = LogisticRegressionCV(
solver=solver, max_iter=2000, tol=1e-6, multi_class="multinomial", Cs=[1.0]
)
clf_path.fit(X, y)
assert_allclose(clf_path.coef_, ref_i.coef_, rtol=2e-2)
assert_allclose(clf_path.intercept_, ref_i.intercept_, rtol=2e-2)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5, random_state=0)
clf = LogisticRegression(fit_intercept=False, solver="liblinear", multi_class="ovr")
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5, random_state=0)
clf = LogisticRegressionCV(solver="liblinear", multi_class="ovr")
clf.fit(sparse.csr_matrix(X), y)
def test_saga_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5, random_state=0)
clf = LogisticRegressionCV(solver="saga")
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(
intercept_scaling=i, solver="liblinear", multi_class="ovr"
)
msg = (
"Intercept scaling is %r but needs to be greater than 0."
" To disable fitting an intercept,"
" set fit_intercept=False."
% clf.intercept_scaling
)
with pytest.raises(ValueError, match=msg):
clf.fit(X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert clf.intercept_ == 0.0
def test_logreg_l1():
# Because liblinear penalizes the intercept and saga does not, we do not
# fit the intercept to make it possible to compare the coefficients of
# the two models at convergence.
rng = np.random.RandomState(42)
n_samples = 50
X, y = make_classification(n_samples=n_samples, n_features=20, random_state=0)
X_noise = rng.normal(size=(n_samples, 3))
X_constant = np.ones(shape=(n_samples, 2))
X = np.concatenate((X, X_noise, X_constant), axis=1)
lr_liblinear = LogisticRegression(
penalty="l1",
C=1.0,
solver="liblinear",
fit_intercept=False,
multi_class="ovr",
tol=1e-10,
)
lr_liblinear.fit(X, y)
lr_saga = LogisticRegression(
penalty="l1",
C=1.0,
solver="saga",
fit_intercept=False,
multi_class="ovr",
max_iter=1000,
tol=1e-10,
)
lr_saga.fit(X, y)
assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_)
# Noise and constant features should be regularized to zero by the l1
# penalty
assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5))
assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5))
def test_logreg_l1_sparse_data():
# Because liblinear penalizes the intercept and saga does not, we do not
# fit the intercept to make it possible to compare the coefficients of
# the two models at convergence.
rng = np.random.RandomState(42)
n_samples = 50
X, y = make_classification(n_samples=n_samples, n_features=20, random_state=0)
X_noise = rng.normal(scale=0.1, size=(n_samples, 3))
X_constant = np.zeros(shape=(n_samples, 2))
X = np.concatenate((X, X_noise, X_constant), axis=1)
X[X < 1] = 0
X = sparse.csr_matrix(X)
lr_liblinear = LogisticRegression(
penalty="l1",
C=1.0,
solver="liblinear",
fit_intercept=False,
multi_class="ovr",
tol=1e-10,
)
lr_liblinear.fit(X, y)
lr_saga = LogisticRegression(
penalty="l1",
C=1.0,
solver="saga",
fit_intercept=False,
multi_class="ovr",
max_iter=1000,
tol=1e-10,
)
lr_saga.fit(X, y)
assert_array_almost_equal(lr_saga.coef_, lr_liblinear.coef_)
# Noise and constant features should be regularized to zero by the l1
# penalty
assert_array_almost_equal(lr_liblinear.coef_[0, -5:], np.zeros(5))
assert_array_almost_equal(lr_saga.coef_[0, -5:], np.zeros(5))
# Check that solving on the sparse and dense data yield the same results
lr_saga_dense = LogisticRegression(
penalty="l1",
C=1.0,
solver="saga",
fit_intercept=False,
multi_class="ovr",
max_iter=1000,
tol=1e-10,
)
lr_saga_dense.fit(X.toarray(), y)
assert_array_almost_equal(lr_saga.coef_, lr_saga_dense.coef_)
@pytest.mark.parametrize("random_seed", [42])
@pytest.mark.parametrize("penalty", ["l1", "l2"])
def test_logistic_regression_cv_refit(random_seed, penalty):
# Test that when refit=True, logistic regression cv with the saga solver
# converges to the same solution as logistic regression with a fixed
# regularization parameter.
# Internally the LogisticRegressionCV model uses a warm start to refit on
# the full data model with the optimal C found by CV. As the penalized
# logistic regression loss is convex, we should still recover exactly
# the same solution as long as the stopping criterion is strict enough (and
# that there are no exactly duplicated features when penalty='l1').
X, y = make_classification(n_samples=100, n_features=20, random_state=random_seed)
common_params = dict(
solver="saga",
penalty=penalty,
random_state=random_seed,
max_iter=1000,
tol=1e-12,
)
lr_cv = LogisticRegressionCV(Cs=[1.0], refit=True, **common_params)
lr_cv.fit(X, y)
lr = LogisticRegression(C=1.0, **common_params)
lr.fit(X, y)
assert_array_almost_equal(lr_cv.coef_, lr.coef_)
def test_logreg_predict_proba_multinomial():
X, y = make_classification(
n_samples=10, n_features=20, random_state=0, n_classes=3, n_informative=10
)
# Predicted probabilities using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert clf_ovr_loss > clf_multi_loss
# Predicted probabilities using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert clf_wrong_loss > clf_multi_loss
@pytest.mark.parametrize("max_iter", np.arange(1, 5))
@pytest.mark.parametrize("multi_class", ["ovr", "multinomial"])
@pytest.mark.parametrize(
"solver, message",
[
(
"newton-cg",
"newton-cg failed to converge. Increase the number of iterations.",
),
(
"liblinear",
"Liblinear failed to converge, increase the number of iterations.",
),
("sag", "The max_iter was reached which means the coef_ did not converge"),
("saga", "The max_iter was reached which means the coef_ did not converge"),
("lbfgs", "lbfgs failed to converge"),
],
)
def test_max_iter(max_iter, multi_class, solver, message):
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
if solver == "liblinear" and multi_class == "multinomial":
pytest.skip("'multinomial' is unavailable when solver='liblinear'")
lr = LogisticRegression(
max_iter=max_iter,
tol=1e-15,
multi_class=multi_class,
random_state=0,
solver=solver,
)
with pytest.warns(ConvergenceWarning, match=message):
lr.fit(X, y_bin)
assert lr.n_iter_[0] == max_iter
@pytest.mark.parametrize("solver", ["newton-cg", "liblinear", "sag", "saga", "lbfgs"])
def test_n_iter(solver):
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
n_classes = np.unique(y).shape[0]
assert n_classes == 3
# Also generate a binary classification sub-problem.
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
# Binary classification case
clf = LogisticRegression(tol=1e-2, C=1.0, solver=solver, random_state=42)
clf.fit(X, y_bin)
assert clf.n_iter_.shape == (1,)
clf_cv = LogisticRegressionCV(
tol=1e-2, solver=solver, Cs=n_Cs, cv=n_cv_fold, random_state=42
)
clf_cv.fit(X, y_bin)
assert clf_cv.n_iter_.shape == (1, n_cv_fold, n_Cs)
# OvR case
clf.set_params(multi_class="ovr").fit(X, y)
assert clf.n_iter_.shape == (n_classes,)
clf_cv.set_params(multi_class="ovr").fit(X, y)
assert clf_cv.n_iter_.shape == (n_classes, n_cv_fold, n_Cs)
# multinomial case
if solver == "liblinear":
# This solver only supports one-vs-rest multiclass classification.
return
# When using the multinomial objective function, there is a single
# optimization problem to solve for all classes at once:
clf.set_params(multi_class="multinomial").fit(X, y)
assert clf.n_iter_.shape == (1,)
clf_cv.set_params(multi_class="multinomial").fit(X, y)
assert clf_cv.n_iter_.shape == (1, n_cv_fold, n_Cs)
@pytest.mark.parametrize("solver", ("newton-cg", "sag", "saga", "lbfgs"))
@pytest.mark.parametrize("warm_start", (True, False))
@pytest.mark.parametrize("fit_intercept", (True, False))
@pytest.mark.parametrize("multi_class", ["ovr", "multinomial"])
def test_warm_start(solver, warm_start, fit_intercept, multi_class):
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
clf = LogisticRegression(
tol=1e-4,
multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42,
fit_intercept=fit_intercept,
)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = (
"Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept), str(warm_start))
)
if warm_start:
assert 2.0 > cum_diff, msg
else:
assert cum_diff > 2.0, msg
def test_saga_vs_liblinear():
iris = load_iris()
X, y = iris.data, iris.target
X = np.concatenate([X] * 3)
y = np.concatenate([y] * 3)
X_bin = X[y <= 1]
y_bin = y[y <= 1] * 2 - 1
X_sparse, y_sparse = make_classification(
n_samples=50, n_features=20, random_state=0
)
X_sparse = sparse.csr_matrix(X_sparse)
for X, y in ((X_bin, y_bin), (X_sparse, y_sparse)):
for penalty in ["l1", "l2"]:
n_samples = X.shape[0]
# alpha=1e-3 is time consuming
for alpha in np.logspace(-1, 1, 3):
saga = LogisticRegression(
C=1.0 / (n_samples * alpha),
solver="saga",
multi_class="ovr",
max_iter=200,
fit_intercept=False,
penalty=penalty,
random_state=0,
tol=1e-24,
)
liblinear = LogisticRegression(
C=1.0 / (n_samples * alpha),
solver="liblinear",
multi_class="ovr",
max_iter=200,
fit_intercept=False,
penalty=penalty,
random_state=0,
tol=1e-24,
)
saga.fit(X, y)
liblinear.fit(X, y)
# Convergence for alpha=1e-3 is very slow
assert_array_almost_equal(saga.coef_, liblinear.coef_, 3)
@pytest.mark.parametrize("multi_class", ["ovr", "multinomial"])
@pytest.mark.parametrize("solver", ["newton-cg", "liblinear", "saga"])
@pytest.mark.parametrize("fit_intercept", [False, True])
def test_dtype_match(solver, multi_class, fit_intercept):
# Test that np.float32 input data is not cast to np.float64 when possible
# and that the output is approximately the same no matter the input format.
if solver == "liblinear" and multi_class == "multinomial":
pytest.skip("liblinear does not support multinomial logistic")
out32_type = np.float64 if solver == "liblinear" else np.float32
X_32 = np.array(X).astype(np.float32)
y_32 = np.array(Y1).astype(np.float32)
X_64 = np.array(X).astype(np.float64)
y_64 = np.array(Y1).astype(np.float64)
X_sparse_32 = sparse.csr_matrix(X, dtype=np.float32)
X_sparse_64 = sparse.csr_matrix(X, dtype=np.float64)
solver_tol = 5e-4
lr_templ = LogisticRegression(
solver=solver,
multi_class=multi_class,
random_state=42,
tol=solver_tol,
fit_intercept=fit_intercept,
)
# Check 32-bit type consistency
lr_32 = clone(lr_templ)
lr_32.fit(X_32, y_32)
assert lr_32.coef_.dtype == out32_type
# Check 32-bit type consistency with sparsity
lr_32_sparse = clone(lr_templ)
lr_32_sparse.fit(X_sparse_32, y_32)
assert lr_32_sparse.coef_.dtype == out32_type
# Check 64-bit type consistency
lr_64 = clone(lr_templ)
lr_64.fit(X_64, y_64)
assert lr_64.coef_.dtype == np.float64
# Check 64-bit type consistency with sparsity
lr_64_sparse = clone(lr_templ)
lr_64_sparse.fit(X_sparse_64, y_64)
assert lr_64_sparse.coef_.dtype == np.float64
# solver_tol bounds the norm of the loss gradient
# dw ~= inv(H)*grad ==> |dw| ~= |inv(H)| * solver_tol, where H - hessian
#
# See https://github.com/scikit-learn/scikit-learn/pull/13645
#
# with Z = np.hstack((np.ones((3,1)), np.array(X)))
# In [8]: np.linalg.norm(np.diag([0,2,2]) + np.linalg.inv((Z.T @ Z)/4))
# Out[8]: 1.7193336918135917
# factor of 2 to get the ball diameter
atol = 2 * 1.72 * solver_tol
if os.name == "nt" and _IS_32BIT:
# FIXME
atol = 1e-2
# Check accuracy consistency
assert_allclose(lr_32.coef_, lr_64.coef_.astype(np.float32), atol=atol)
if solver == "saga" and fit_intercept:
# FIXME: SAGA on sparse data fits the intercept inaccurately with the
# default tol and max_iter parameters.
atol = 1e-1
assert_allclose(lr_32.coef_, lr_32_sparse.coef_, atol=atol)
assert_allclose(lr_64.coef_, lr_64_sparse.coef_, atol=atol)
def test_warm_start_converge_LR():
# Test to see that the logistic regression converges on warm start,
# with multi_class='multinomial'. Non-regressive test for #10836
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = np.array([1] * 100 + [-1] * 100)
lr_no_ws = LogisticRegression(
multi_class="multinomial", solver="sag", warm_start=False, random_state=0
)
lr_ws = LogisticRegression(
multi_class="multinomial", solver="sag", warm_start=True, random_state=0
)
lr_no_ws_loss = log_loss(y, lr_no_ws.fit(X, y).predict_proba(X))
for i in range(5):
lr_ws.fit(X, y)
lr_ws_loss = log_loss(y, lr_ws.predict_proba(X))
assert_allclose(lr_no_ws_loss, lr_ws_loss, rtol=1e-5)
def test_elastic_net_coeffs():
# make sure elasticnet penalty gives different coefficients from l1 and l2
# with saga solver (l1_ratio different from 0 or 1)
X, y = make_classification(random_state=0)
C = 2.0
l1_ratio = 0.5
coeffs = list()
for penalty in ("elasticnet", "l1", "l2"):
lr = LogisticRegression(
penalty=penalty, C=C, solver="saga", random_state=0, l1_ratio=l1_ratio
)
lr.fit(X, y)
coeffs.append(lr.coef_)
elastic_net_coeffs, l1_coeffs, l2_coeffs = coeffs
# make sure coeffs differ by at least .1
assert not np.allclose(elastic_net_coeffs, l1_coeffs, rtol=0, atol=0.1)
assert not np.allclose(elastic_net_coeffs, l2_coeffs, rtol=0, atol=0.1)
assert not np.allclose(l2_coeffs, l1_coeffs, rtol=0, atol=0.1)
@pytest.mark.parametrize("C", [0.001, 0.1, 1, 10, 100, 1000, 1e6])
@pytest.mark.parametrize("penalty, l1_ratio", [("l1", 1), ("l2", 0)])
def test_elastic_net_l1_l2_equivalence(C, penalty, l1_ratio):
# Make sure elasticnet is equivalent to l1 when l1_ratio=1 and to l2 when
# l1_ratio=0.
X, y = make_classification(random_state=0)
lr_enet = LogisticRegression(
penalty="elasticnet", C=C, l1_ratio=l1_ratio, solver="saga", random_state=0
)
lr_expected = LogisticRegression(
penalty=penalty, C=C, solver="saga", random_state=0
)
lr_enet.fit(X, y)
lr_expected.fit(X, y)
| assert_array_almost_equal(lr_enet.coef_, lr_expected.coef_) | numpy.testing.assert_array_almost_equal |
# -*- coding: utf-8 -*-
"""General linear model
author: <NAME>
"""
import numpy as np
from numpy.linalg import eigvals, inv, solve, matrix_rank, pinv, svd
from scipy import stats
import pandas as pd
from patsy import DesignInfo
from statsmodels.compat.pandas import Substitution
from statsmodels.base.model import Model
from statsmodels.iolib import summary2
__docformat__ = 'restructuredtext en'
_hypotheses_doc = \
"""hypotheses : list[tuple]
Hypothesis `L*B*M = C` to be tested where B is the parameters in
regression Y = X*B. Each element is a tuple of length 2, 3, or 4:
* (name, contrast_L)
* (name, contrast_L, transform_M)
* (name, contrast_L, transform_M, constant_C)
containing a string `name`, the contrast matrix L, the transform
matrix M (for transforming dependent variables), and right-hand side
constant matrix constant_C, respectively.
contrast_L : 2D array or an array of strings
Left-hand side contrast matrix for hypotheses testing.
If 2D array, each row is an hypotheses and each column is an
independent variable. At least 1 row
(1 by k_exog, the number of independent variables) is required.
If an array of strings, it will be passed to
patsy.DesignInfo().linear_constraint.
transform_M : 2D array or an array of strings or None, optional
Left hand side transform matrix.
If `None` or left out, it is set to a k_endog by k_endog
identity matrix (i.e. do not transform y matrix).
If an array of strings, it will be passed to
patsy.DesignInfo().linear_constraint.
constant_C : 2D array or None, optional
Right-hand side constant matrix.
if `None` or left out it is set to a matrix of zeros
Must has the same number of rows as contrast_L and the same
number of columns as transform_M
If `hypotheses` is None: 1) the effect of each independent variable
on the dependent variables will be tested. Or 2) if model is created
using a formula, `hypotheses` will be created according to
`design_info`. 1) and 2) is equivalent if no additional variables
are created by the formula (e.g. dummy variables for categorical
variables and interaction terms)
"""
def _multivariate_ols_fit(endog, exog, method='svd', tolerance=1e-8):
"""
Solve multivariate linear model y = x * params
where y is dependent variables, x is independent variables
Parameters
----------
endog : array_like
each column is a dependent variable
exog : array_like
each column is a independent variable
method : str
'svd' - Singular value decomposition
'pinv' - Moore-Penrose pseudoinverse
tolerance : float, a small positive number
Tolerance for eigenvalue. Values smaller than tolerance is considered
zero.
Returns
-------
a tuple of matrices or values necessary for hypotheses testing
.. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_introreg_sect012.htm
Notes
-----
Status: experimental and incomplete
"""
y = endog
x = exog
nobs, k_endog = y.shape
nobs1, k_exog= x.shape
if nobs != nobs1:
raise ValueError('x(n=%d) and y(n=%d) should have the same number of '
'rows!' % (nobs1, nobs))
# Calculate the matrices necessary for hypotheses testing
df_resid = nobs - k_exog
if method == 'pinv':
# Regression coefficients matrix
pinv_x = pinv(x)
params = pinv_x.dot(y)
# inverse of x'x
inv_cov = pinv_x.dot(pinv_x.T)
if matrix_rank(inv_cov,tol=tolerance) < k_exog:
raise ValueError('Covariance of x singular!')
# Sums of squares and cross-products of residuals
# Y'Y - (X * params)'B * params
t = x.dot(params)
sscpr = np.subtract(y.T.dot(y), t.T.dot(t))
return (params, df_resid, inv_cov, sscpr)
elif method == 'svd':
u, s, v = svd(x, 0)
if (s > tolerance).sum() < len(s):
raise ValueError('Covariance of x singular!')
invs = 1. / s
params = v.T.dot(np.diag(invs)).dot(u.T).dot(y)
inv_cov = v.T.dot(np.diag(np.power(invs, 2))).dot(v)
t = np.diag(s).dot(v).dot(params)
sscpr = np.subtract(y.T.dot(y), t.T.dot(t))
return (params, df_resid, inv_cov, sscpr)
else:
raise ValueError('%s is not a supported method!' % method)
def multivariate_stats(eigenvals,
r_err_sscp,
r_contrast, df_resid, tolerance=1e-8):
"""
For multivariate linear model Y = X * B
Testing hypotheses
L*B*M = 0
where L is contrast matrix, B is the parameters of the
multivariate linear model and M is dependent variable transform matrix.
T = L*inv(X'X)*L'
H = M'B'L'*inv(T)*LBM
E = M'(Y'Y - B'X'XB)M
Parameters
----------
eigenvals : array
The eigenvalues of inv(E + H)*H
r_err_sscp : int
Rank of E + H
r_contrast : int
Rank of T matrix
df_resid : int
Residual degree of freedom (n_samples minus n_variables of X)
tolerance : float
smaller than which eigenvalue is considered 0
Returns
-------
A DataFrame
References
----------
.. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_introreg_sect012.htm
"""
v = df_resid
p = r_err_sscp
q = r_contrast
s = np.min([p, q])
ind = eigenvals > tolerance
n_e = ind.sum()
eigv2 = eigenvals[ind]
eigv1 = np.array([i / (1 - i) for i in eigv2])
m = (np.abs(p - q) - 1) / 2
n = (v - p - 1) / 2
cols = ['Value', 'Num DF', 'Den DF', 'F Value', 'Pr > F']
index = ["Wilks' lambda", "Pillai's trace",
"Hotelling-Lawley trace", "Roy's greatest root"]
results = pd.DataFrame(columns=cols,
index=index)
def fn(x):
return np.real([x])[0]
results.loc["Wilks' lambda", 'Value'] = fn(np.prod(1 - eigv2))
results.loc["Pillai's trace", 'Value'] = fn(eigv2.sum())
results.loc["Hotelling-Lawley trace", 'Value'] = fn(eigv1.sum())
results.loc["Roy's greatest root", 'Value'] = fn(eigv1.max())
r = v - (p - q + 1)/2
u = (p*q - 2) / 4
df1 = p * q
if p*p + q*q - 5 > 0:
t = np.sqrt((p*p*q*q - 4) / (p*p + q*q - 5))
else:
t = 1
df2 = r*t - 2*u
lmd = results.loc["Wilks' lambda", 'Value']
lmd = np.power(lmd, 1 / t)
F = (1 - lmd) / lmd * df2 / df1
results.loc["Wilks' lambda", 'Num DF'] = df1
results.loc["Wilks' lambda", 'Den DF'] = df2
results.loc["Wilks' lambda", 'F Value'] = F
pval = stats.f.sf(F, df1, df2)
results.loc["Wilks' lambda", 'Pr > F'] = pval
V = results.loc["Pillai's trace", 'Value']
df1 = s * (2*m + s + 1)
df2 = s * (2*n + s + 1)
F = df2 / df1 * V / (s - V)
results.loc["Pillai's trace", 'Num DF'] = df1
results.loc["Pillai's trace", 'Den DF'] = df2
results.loc["Pillai's trace", 'F Value'] = F
pval = stats.f.sf(F, df1, df2)
results.loc["Pillai's trace", 'Pr > F'] = pval
U = results.loc["Hotelling-Lawley trace", 'Value']
if n > 0:
b = (p + 2*n) * (q + 2*n) / 2 / (2*n + 1) / (n - 1)
df1 = p * q
df2 = 4 + (p*q + 2) / (b - 1)
c = (df2 - 2) / 2 / n
F = df2 / df1 * U / c
else:
df1 = s * (2*m + s + 1)
df2 = s * (s*n + 1)
F = df2 / df1 / s * U
results.loc["Hotelling-Lawley trace", 'Num DF'] = df1
results.loc["Hotelling-Lawley trace", 'Den DF'] = df2
results.loc["Hotelling-Lawley trace", 'F Value'] = F
pval = stats.f.sf(F, df1, df2)
results.loc["Hotelling-Lawley trace", 'Pr > F'] = pval
sigma = results.loc["Roy's greatest root", 'Value']
r = np.max([p, q])
df1 = r
df2 = v - r + q
F = df2 / df1 * sigma
results.loc["Roy's greatest root", 'Num DF'] = df1
results.loc["Roy's greatest root", 'Den DF'] = df2
results.loc["Roy's greatest root", 'F Value'] = F
pval = stats.f.sf(F, df1, df2)
results.loc["Roy's greatest root", 'Pr > F'] = pval
return results
def _multivariate_ols_test(hypotheses, fit_results, exog_names,
endog_names):
def fn(L, M, C):
# .. [1] https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_introreg_sect012.htm
params, df_resid, inv_cov, sscpr = fit_results
# t1 = (L * params)M
t1 = L.dot(params).dot(M) - C
# H = t1'L(X'X)^L't1
t2 = L.dot(inv_cov).dot(L.T)
q = matrix_rank(t2)
H = t1.T.dot(inv(t2)).dot(t1)
# E = M'(Y'Y - B'(X'X)B)M
E = M.T.dot(sscpr).dot(M)
return E, H, q, df_resid
return _multivariate_test(hypotheses, exog_names, endog_names, fn)
@Substitution(hypotheses_doc=_hypotheses_doc)
def _multivariate_test(hypotheses, exog_names, endog_names, fn):
"""
Multivariate linear model hypotheses testing
For y = x * params, where y are the dependent variables and x are the
independent variables, testing L * params * M = 0 where L is the contrast
matrix for hypotheses testing and M is the transformation matrix for
transforming the dependent variables in y.
Algorithm:
T = L*inv(X'X)*L'
H = M'B'L'*inv(T)*LBM
E = M'(Y'Y - B'X'XB)M
And then finding the eigenvalues of inv(H + E)*H
.. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_introreg_sect012.htm
Parameters
----------
%(hypotheses_doc)s
k_xvar : int
The number of independent variables
k_yvar : int
The number of dependent variables
fn : function
a function fn(contrast_L, transform_M) that returns E, H, q, df_resid
where q is the rank of T matrix
Returns
-------
results : MANOVAResults
"""
k_xvar = len(exog_names)
k_yvar = len(endog_names)
results = {}
for hypo in hypotheses:
if len(hypo) ==2:
name, L = hypo
M = None
C = None
elif len(hypo) == 3:
name, L, M = hypo
C = None
elif len(hypo) == 4:
name, L, M, C = hypo
else:
raise ValueError('hypotheses must be a tuple of length 2, 3 or 4.'
' len(hypotheses)=%d' % len(hypo))
if any(isinstance(j, str) for j in L):
L = DesignInfo(exog_names).linear_constraint(L).coefs
else:
if not isinstance(L, np.ndarray) or len(L.shape) != 2:
raise ValueError('Contrast matrix L must be a 2-d array!')
if L.shape[1] != k_xvar:
raise ValueError('Contrast matrix L should have the same '
'number of columns as exog! %d != %d' %
(L.shape[1], k_xvar))
if M is None:
M = np.eye(k_yvar)
elif any(isinstance(j, str) for j in M):
M = DesignInfo(endog_names).linear_constraint(M).coefs.T
else:
if M is not None:
if not isinstance(M, np.ndarray) or len(M.shape) != 2:
raise ValueError('Transform matrix M must be a 2-d array!')
if M.shape[0] != k_yvar:
raise ValueError('Transform matrix M should have the same '
'number of rows as the number of columns '
'of endog! %d != %d' %
(M.shape[0], k_yvar))
if C is None:
C = np.zeros([L.shape[0], M.shape[1]])
elif not isinstance(C, np.ndarray):
raise ValueError('Constant matrix C must be a 2-d array!')
if C.shape[0] != L.shape[0]:
raise ValueError('contrast L and constant C must have the same '
'number of rows! %d!=%d'
% (L.shape[0], C.shape[0]))
if C.shape[1] != M.shape[1]:
raise ValueError('transform M and constant C must have the same '
'number of columns! %d!=%d'
% (M.shape[1], C.shape[1]))
E, H, q, df_resid = fn(L, M, C)
EH = np.add(E, H)
p = matrix_rank(EH)
# eigenvalues of inv(E + H)H
eigv2 = np.sort(eigvals(solve(EH, H)))
stat_table = multivariate_stats(eigv2, p, q, df_resid)
results[name] = {'stat':stat_table, 'contrast_L':L,
'transform_M':M, 'constant_C':C}
return results
class _MultivariateOLS(Model):
"""
Multivariate linear model via least squares
Parameters
----------
endog : array_like
Dependent variables. A nobs x k_endog array where nobs is
the number of observations and k_endog is the number of dependent
variables
exog : array_like
Independent variables. A nobs x k_exog array where nobs is the
number of observations and k_exog is the number of independent
variables. An intercept is not included by default and should be added
by the user (models specified using a formula include an intercept by
default)
Attributes
----------
endog : array
See Parameters.
exog : array
See Parameters.
"""
_formula_max_endog = None
def __init__(self, endog, exog, missing='none', hasconst=None, **kwargs):
if len(endog.shape) == 1 or endog.shape[1] == 1:
raise ValueError('There must be more than one dependent variable'
' to fit multivariate OLS!')
super(_MultivariateOLS, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, **kwargs)
def fit(self, method='svd'):
self._fittedmod = _multivariate_ols_fit(
self.endog, self.exog, method=method)
return _MultivariateOLSResults(self)
class _MultivariateOLSResults(object):
"""
_MultivariateOLS results class
"""
def __init__(self, fitted_mv_ols):
if (hasattr(fitted_mv_ols, 'data') and
hasattr(fitted_mv_ols.data, 'design_info')):
self.design_info = fitted_mv_ols.data.design_info
else:
self.design_info = None
self.exog_names = fitted_mv_ols.exog_names
self.endog_names = fitted_mv_ols.endog_names
self._fittedmod = fitted_mv_ols._fittedmod
def __str__(self):
return self.summary().__str__()
@Substitution(hypotheses_doc=_hypotheses_doc)
def mv_test(self, hypotheses=None):
"""
Linear hypotheses testing
Parameters
----------
%(hypotheses_doc)s
Returns
-------
results: _MultivariateOLSResults
Notes
-----
Tests hypotheses of the form
L * params * M = C
where `params` is the regression coefficient matrix for the
linear model y = x * params, `L` is the contrast matrix, `M` is the
dependent variable transform matrix and C is the constant matrix.
"""
k_xvar = len(self.exog_names)
if hypotheses is None:
if self.design_info is not None:
terms = self.design_info.term_name_slices
hypotheses = []
for key in terms:
L_contrast = np.eye(k_xvar)[terms[key], :]
hypotheses.append([key, L_contrast, None])
else:
hypotheses = []
for i in range(k_xvar):
name = 'x%d' % (i)
L = | np.zeros([1, k_xvar]) | numpy.zeros |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from os.path import join
import sys
import paddle
import numpy as np
import pandas as pd
from paddle.io import Dataset, DataLoader
from scipy.sparse import csr_matrix
import scipy.sparse as sp
from time import time
class BasicDataset(Dataset):
def __init__(self):
print("init dataset")
@property
def n_users(self):
raise NotImplementedError
@property
def m_items(self):
raise NotImplementedError
@property
def trainDataSize(self):
raise NotImplementedError
@property
def testDict(self):
raise NotImplementedError
@property
def allPos(self):
raise NotImplementedError
def getUserItemFeedback(self, users, items):
raise NotImplementedError
def getUserPosItems(self, users):
raise NotImplementedError
def getUserNegItems(self, users):
"""
not necessary for large dataset
it's stupid to return all neg items in super large dataset
"""
raise NotImplementedError
def getSparseGraph(self):
"""
build a graph in torch.sparse.IntTensor.
Details in NGCF's matrix form
A =
|I, R|
|R^T, I|
"""
raise NotImplementedError
class Loader(BasicDataset):
"""
gowalla dataset
"""
def __init__(self, args, path="./gowalla"):
# train or test
print(f'loading [{path}]')
print(args)
self.n_user = 0
self.m_item = 0
train_file = path + '/train.txt'
test_file = path + '/test.txt'
self.path = path
trainUniqueUsers, trainItem, trainUser = [], [], []
testUniqueUsers, testItem, testUser = [], [], []
self.traindataSize = 0
self.testDataSize = 0
with open(train_file) as f:
for l in f.readlines():
if len(l) > 0:
l = l.strip('\n').split(' ')
items = [int(i) if i != '' else -1 for i in l[1:]]
uid = int(l[0])
trainUniqueUsers.append(uid)
trainUser.extend([uid] * len(items))
trainItem.extend(items)
self.m_item = max(self.m_item, max(items))
self.n_user = max(self.n_user, uid)
self.traindataSize += len(items)
self.trainUniqueUsers = np.array(trainUniqueUsers)
self.trainUser = np.array(trainUser)
self.trainItem = np.array(trainItem)
with open(test_file) as f:
for l in f.readlines():
if len(l) > 0:
l = l.strip('\n').split(' ')
items = [int(i) if i != '' else -1 for i in l[1:]]
uid = int(l[0])
testUniqueUsers.append(uid)
testUser.extend([uid] * len(items))
testItem.extend(items)
self.m_item = max(self.m_item, max(items))
self.n_user = max(self.n_user, uid)
self.testDataSize += len(items)
self.m_item += 1
self.n_user += 1
self.testUniqueUsers = np.array(testUniqueUsers)
self.testUser = | np.array(testUser) | numpy.array |
import typing
from typing import Any, Callable, List, Tuple, Union
import numpy as np
import os, sys
from PIL import Image
from .abc_interpreter import Interpreter
from ..data_processor.readers import preprocess_image, read_image, restore_image, preprocess_inputs
from ..data_processor.visualizer import visualize_heatmap
class GradCAMInterpreter(Interpreter):
"""
Gradient CAM Interpreter.
More details regarding the GradCAM method can be found in the original paper:
https://arxiv.org/abs/1610.02391
"""
def __init__(self,
paddle_model,
trained_model_path,
use_cuda=True,
model_input_shape=[3, 224, 224]) -> None:
"""
Initialize the GradCAMInterpreter.
Args:
paddle_model (callable): A user-defined function that gives access to model predictions.
It takes the following arguments:
- data: Data inputs.
and outputs predictions. See the example at the end of ``interpret()``.
trained_model_path (str): The pretrained model directory.
use_cuda (bool, optional): Whether or not to use cuda. Default: True
model_input_shape (list, optional): The input shape of the model. Default: [3, 224, 224]
"""
Interpreter.__init__(self)
self.paddle_model = paddle_model
self.trained_model_path = trained_model_path
self.use_cuda = use_cuda
self.model_input_shape = model_input_shape
self.paddle_prepared = False
def interpret(self,
inputs,
target_layer_name,
labels=None,
visual=True,
save_path=None):
"""
Main function of the interpreter.
Args:
inputs (str or list of strs or numpy.ndarray): The input image filepath or a list of filepaths or numpy array of read images.
target_layer_name (str): The target layer to calculate gradients.
labels (list or tuple or numpy.ndarray, optional): The target labels to analyze. The number of labels should be equal to the number of images. If None, the most likely label for each image will be used. Default: None
visual (bool, optional): Whether or not to visualize the processed image. Default: True
save_path (str or list of strs or None, optional): The filepath(s) to save the processed image(s). If None, the image will not be saved. Default: None
:return: interpretations/heatmap for each image
:rtype: numpy.ndarray
Example::
import interpretdl as it
def paddle_model(data):
import paddle.fluid as fluid
class_num = 1000
model = ResNet50()
logits = model.net(input=image_input, class_dim=class_num)
probs = fluid.layers.softmax(logits, axis=-1)
return probs
gradcam = it.GradCAMInterpreter(paddle_model, "assets/ResNet50_pretrained",True)
gradcam.interpret(
'assets/catdog.png',
'res5c.add.output.5.tmp_0',
label=None,
visual=True,
save_path='assets/gradcam_test.jpg')
"""
imgs, data, save_path = preprocess_inputs(inputs, save_path,
self.model_input_shape)
self.target_layer_name = target_layer_name
if not self.paddle_prepared:
self._paddle_prepare()
bsz = len(data)
if labels is None:
_, _, out = self.predict_fn(
data, np.zeros(
(bsz, 1), dtype='int64'))
labels = np.argmax(out, axis=1)
labels = np.array(labels).reshape((bsz, 1))
feature_map, gradients, _ = self.predict_fn(data, labels)
f = np.array(feature_map)
g = np.array(gradients)
mean_g = np.mean(g, (2, 3))
heatmap = f.transpose([0, 2, 3, 1])
dim_array = np.ones((1, heatmap.ndim), int).ravel()
dim_array[heatmap.ndim - 1] = -1
dim_array[0] = bsz
heatmap = heatmap * mean_g.reshape(dim_array)
heatmap = np.mean(heatmap, axis=-1)
heatmap = np.maximum(heatmap, 0)
heatmap_max = np.max(heatmap, axis=tuple( | np.arange(1, heatmap.ndim) | numpy.arange |
import numpy as np
import os
class Perceptron(object):
"""docstring for Perceptron. Creates a single perceptron with multiple inputs and a bias.
Attributes:
inputs: The number of inputs given to the perceptron. Does not include the bias.
bias: The bias for each perceptron. Defaults to 1.0. """
def __init__(self, inputs, bias=1.0):
"""Create a perceptron with a given number of inputs and a bias."""
self.weights = (np.random.rand(inputs + 1) * 2) - 1
self.bias = bias
# Are we really adding a bias to the weights?
def activate(self, x):
"""Take the inputs and bias to produce the output of the Perceptron."""
sum = np.dot(np.append(x,self.bias),self.weights)
return self.sigmoid(sum)
def create_weights(self, init_weights):
""""Use this function to assign known weights to the perceptron."""
self.weights = np.array(init_weights)
def sigmoid(self, x):
"""Evaluate the perceptron function for an input, x."""
return 1 / (1 + np.exp(-x))
class Multilayer_Perceptron(object):
"""docstring for Multilayer_Perceptron. Creates a single perceptron with multiple inputs and a bias.
Attributes:
layers: A python list detailing the number of elements in each layer.
bias: The bias term. The same bias used for all
eta: The learning rate of the system. """
def __init__(self, layers, bias=1.0, eta=0.5):
self.layers = | np.array(layers, dtype=object) | numpy.array |
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
import keras
from google.colab.patches import cv2_imshow
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Layer, Activation, Dense, Flatten, Dropout, Lambda, Conv2D, MaxPooling2D, UpSampling2D, Conv2DTranspose, SpatialDropout2D
from tensorflow.keras import losses
from tensorflow.keras import backend as K
from tensorflow.keras.utils import to_categorical
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
import keras
import cv2
import matplotlib.gridspec as gridspec
from keras.datasets import cifar10
def get_indices_from_image_batch(image_batch,encoder,vq_model):
encoder_output = encoder.predict(image_batch)
vq_output = vq_model.predict(encoder_output)
return vq_output[-1]
def reconstruct_image_from_indices(indices_batch,vq_model,decoder):
output_vq = vq_model.layers[2].quantize(indices_batch)[0]
return decoder.predict(output_vq)
def plot_for_testing(x_train,encoder,vq_model,decoder):
for i in range(50):
indices = get_indices_from_image_batch(x_train[i:i+1],encoder,vq_model)
out = reconstruct_image_from_indices(indices,vq_model,decoder)
plt.imshow(x_train[i]*0.5+0.5)
plt.show()
plt.imshow(out[0]*0.5+0.5)
plt.show()
def showReconstructedRealBatch(x_train,encoder,vq_model,decoder,epoch):
"""
Show bathches of real and reconstructed images
"""
def show_batch_image(img):
img = np.reshape(img,(len(img),32,32,3))
if | np.min(img) | numpy.min |
# -*- coding: utf-8 -*-
"""Top-level package for gnpy."""
__author__ = """<TBD>"""
__email__ = '<<EMAIL>'
__version__ = '0.1.0'
import numpy as np
import multiprocessing as mp
import scipy.interpolate as interp
"""
GNPy: a Python 3 implementation of the Gaussian Noise (GN) Model of nonlinear
propagation, developed by the OptCom group, Department of Electronics and
Telecommunications, Politecnico di Torino, Italy
"""
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
def raised_cosine_comb(f, rs, roll_off, center_freq, power):
""" Returns an array storing the PSD of a WDM comb of raised cosine shaped
channels at the input frequencies defined in array f
:param f: Array of frequencies in THz
:param rs: Array of Symbol Rates in TBaud. One Symbol rate for each channel
:param roll_off: Array of roll-off factors [0,1). One per channel
:param center_freq: Array of channels central frequencies in THz. One per channel
:param power: Array of channel powers in W. One per channel
:return: PSD of the WDM comb evaluated over f
"""
ts_arr = 1.0 / rs
passband_arr = (1.0 - roll_off) / (2.0 * ts_arr)
stopband_arr = (1.0 + roll_off) / (2.0 * ts_arr)
g = power / rs
psd = np.zeros(np.shape(f))
for ind in range(np.size(center_freq)):
f_nch = center_freq[ind]
g_ch = g[ind]
ts = ts_arr[ind]
passband = passband_arr[ind]
stopband = stopband_arr[ind]
ff = np.abs(f - f_nch)
tf = ff - passband
if roll_off[ind] == 0:
psd = np.where(tf <= 0, g_ch, 0.) + psd
else:
psd = g_ch * (np.where(tf <= 0, 1., 0.) + 1.0 / 2.0 * (1 + np.cos(np.pi * ts / roll_off[ind] *
tf)) * np.where(tf > 0, 1., 0.) *
np.where(np.abs(ff) <= stopband, 1., 0.)) + psd
return psd
def fwm_eff(a, Lspan, b2, ff):
""" Computes the four-wave mixing efficiency given the fiber characteristics
over a given frequency set ff
:param a: Fiber loss coefficient in 1/km
:param Lspan: Fiber length in km
:param b2: Fiber Dispersion coefficient in ps/THz/km
:param ff: Array of Frequency points in THz
:return: FWM efficiency rho
"""
rho = np.power(np.abs((1.0 - np.exp(-2.0 * a * Lspan + 1j * 4.0 * np.pi * np.pi * b2 * Lspan * ff)) / (
2.0 * a - 1j * 4.0 * np.pi * np.pi * b2 * ff)), 2)
return rho
def get_freqarray(f, Bopt, fmax, max_step, f_dense_low, f_dense_up, df_dense):
""" Returns a non-uniformly spaced frequency array useful for fast GN-model.
integration. The frequency array is made of a denser area, sided by two
log-spaced arrays
:param f: Central frequency at which NLI is evaluated in THz
:param Bopt: Total optical bandwidth of the system in THz
:param fmax: Upper limit of the integration domain in THz
:param max_step: Maximum step size for frequency array definition in THz
:param f_dense_low: Lower limit of denser frequency region in THz
:param f_dense_up: Upper limit of denser frequency region in THz
:param df_dense: Step size to be used in the denser frequency region in THz
:return: Non uniformly defined frequency array
"""
f_dense = np.arange(f_dense_low, f_dense_up, df_dense)
k = Bopt / 2.0 / (Bopt / 2.0 - max_step) # Compute Step ratio for log-spaced array definition
if f < 0:
Nlog_short = np.ceil(np.log(fmax / np.abs(f_dense_low)) / np.log(k) + 1.0)
f1_short = -(np.abs(f_dense_low) * np.power(k, np.arange(Nlog_short, 0.0, -1.0) - 1.0))
k = (Bopt / 2 + (np.abs(f_dense_up) - f_dense_low)) / (Bopt / 2.0 - max_step + (np.abs(f_dense_up) - f_dense_up))
Nlog_long = np.ceil(np.log((fmax + (np.abs(f_dense_up) - f_dense_up)) / abs(f_dense_up)) * 1.0 / np.log(k) + 1.0)
f1_long = np.abs(f_dense_up) * np.power(k, (np.arange(1, Nlog_long + 1) - 1.0)) - (
np.abs(f_dense_up) - f_dense_up)
f1_array = np.concatenate([f1_short, f_dense[1:], f1_long])
else:
Nlog_short = np.ceil(np.log(fmax / np.abs(f_dense_up)) / np.log(k) + 1.0)
f1_short = f_dense_up * np.power(k, np.arange(1, Nlog_short + 1.0) - 1.0)
k = (Bopt / 2.0 + (abs(f_dense_low) + f_dense_low)) / (Bopt / 2.0 - max_step + (abs(f_dense_low) + f_dense_low))
Nlog_long = np.ceil(np.log((fmax + (np.abs(f_dense_low) + f_dense_low)) / np.abs(f_dense_low)) / np.log(k) + 1)
f1_long = -(np.abs(f_dense_low) * np.power(k, np.arange(Nlog_long, 0, -1) - 1.0)) + (
abs(f_dense_low) + f_dense_low)
f1_array = np.concatenate([f1_long, f_dense[1:], f1_short])
return f1_array
def GN_integral(b2, Lspan, a_db, gam, f_ch, b_ch, roll_off, power, Nch, model_param):
""" GN_integral computes the GN reference formula via smart brute force integration. The Gaussian Noise model is
applied in its incoherent form (phased-array factor =1). The function computes the integral by columns: for each f1,
a non-uniformly spaced f2 array is generated, and the integrand function is computed there. At the end of the loop
on f1, the overall GNLI is computed. Accuracy can be tuned by operating on model_param argument.
:param b2: Fiber dispersion coefficient in ps/THz/km. Scalar
:param Lspan: Fiber Span length in km. Scalar
:param a_db: Fiber loss coeffiecient in dB/km. Scalar
:param gam: Fiber nonlinear coefficient in 1/W/km. Scalar
:param f_ch: Baseband channels center frequencies in THz. Array of size 1xNch
:param b_ch: Channels' -3 dB bandwidth. Array of size 1xNch
:param roll_off: Channels' Roll-off factors [0,1). Array of size 1xNch
:param power: Channels' power values in W. Array of size 1xNch
:param Nch: Number of channels. Scalar
:param model_param: Dictionary with model parameters for accuracy tuning
model_param['min_FWM_inv']: Minimum FWM efficiency value to be considered for high density
integration in dB
model_param['n_grid']: Maximum Number of integration points to be used in each frequency slot of
the spectrum
model_param['n_grid_min']: Minimum Number of integration points to be used in each frequency
slot of the spectrum
model_param['f_array']: Frequencies at which evaluate GNLI, expressed in THz
:return: GNLI: power spectral density in W/THz of the nonlinear interference at frequencies model_param['f_array']
"""
alpha_lin = a_db / 20.0 / np.log10(np.e) # Conversion in linear units 1/km
min_FWM_inv = np.power(10, model_param['min_FWM_inv'] / 10) # Conversion in linear units
n_grid = model_param['n_grid']
n_grid_min = model_param['n_grid_min']
f_array = model_param['f_array']
fmax = (f_ch[-1] - (b_ch[-1] / 2.0)) - (f_ch[0] - (b_ch[0] / 2.0)) # Get frequency limit
f2eval = np.max(np.diff(f_ch))
Bopt = f2eval * Nch # Overall optical bandwidth [THz]
min_step = f2eval / n_grid # Minimum integration step
max_step = f2eval / n_grid_min # Maximum integration step
f_dense_start = np.abs(
np.sqrt(np.power(alpha_lin, 2) / (4.0 * np.power(np.pi, 4) * b2 * b2) * (min_FWM_inv - 1.0)) / f2eval)
f_ind_eval = 0
GNLI = np.full(f_array.size, np.nan) # Pre-allocate results
for f in f_array: # Loop over f
f_dense_low = f - f_dense_start
f_dense_up = f + f_dense_start
if f_dense_low < -fmax:
f_dense_low = -fmax
if f_dense_low == 0.0:
f_dense_low = -min_step
if f_dense_up == 0.0:
f_dense_up = min_step
if f_dense_up > fmax:
f_dense_up = fmax
f_dense_width = np.abs(f_dense_up - f_dense_low)
n_grid_dense = np.ceil(f_dense_width / min_step)
df = f_dense_width / n_grid_dense
# Get non-uniformly spaced f1 array
f1_array = get_freqarray(f, Bopt, fmax, max_step, f_dense_low, f_dense_up, df)
G1 = raised_cosine_comb(f1_array, b_ch, roll_off, f_ch, power) # Get corresponding spectrum
Gpart = np.zeros(f1_array.size) # Pre-allocate partial result for inner integral
f_ind = 0
for f1 in f1_array: # Loop over f1
if f1 != f:
f_lim = np.sqrt(np.power(alpha_lin, 2) / (4.0 * np.power(np.pi, 4) * b2 * b2) * (min_FWM_inv - 1.0)) / (
f1 - f) + f
f2_dense_up = np.maximum(f_lim, -f_lim)
f2_dense_low = np.minimum(f_lim, -f_lim)
if f2_dense_low == 0:
f2_dense_low = -min_step
if f2_dense_up == 0:
f2_dense_up = min_step
if f2_dense_low < -fmax:
f2_dense_low = -fmax
if f2_dense_up > fmax:
f2_dense_up = fmax
else:
f2_dense_up = fmax
f2_dense_low = -fmax
f2_dense_width = np.abs(f2_dense_up - f2_dense_low)
n2_grid_dense = np.ceil(f2_dense_width / min_step)
df2 = f2_dense_width / n2_grid_dense
# Get non-uniformly spaced f2 array
f2_array = get_freqarray(f, Bopt, fmax, max_step, f2_dense_low, f2_dense_up, df2)
f2_array = f2_array[f2_array >= f1] # Do not consider points below the bisector of quadrants I and III
if f2_array.size > 0:
G2 = raised_cosine_comb(f2_array, b_ch, roll_off, f_ch, power) # Get spectrum there
f3_array = f1 + f2_array - f # Compute f3
G3 = raised_cosine_comb(f3_array, b_ch, roll_off, f_ch, power) # Get spectrum over f3
G = G2 * G3 * G1[f_ind]
if np.count_nonzero(G):
FWM_eff = fwm_eff(alpha_lin, Lspan, b2, (f1 - f) * (f2_array - f)) # Compute FWM efficiency
Gpart[f_ind] = 2.0 * np.trapz(FWM_eff * G, f2_array) # Compute inner integral
f_ind += 1
# Compute outer integral. Nominal span loss already compensated
GNLI[f_ind_eval] = 16.0 / 27.0 * gam * gam * np.trapz(Gpart, f1_array)
f_ind_eval += 1 # Next frequency
return GNLI # Return GNLI array in W/THz and the array of the corresponding frequencies
def compute_psi(b2, l_eff_a, f_ch, channel_index, interfering_index, b_ch):
""" compute_psi computes the psi coefficient of the analytical formula.
:param b2: Fiber dispersion coefficient in ps/THz/km. Scalar
:param l_eff_a: Asymptotic effective length in km. Scalar
:param f_ch: Baseband channels center frequencies in THz. Array of size 1xNch
:param channel_index: Index of the channel. Scalar
:param interfering_index: Index of the interfering signal. Scalar
:param b_ch: Channels' -3 dB bandwidth [THz]. Array of size 1xNch
:return: psi: the coefficient
"""
b2 = np.abs(b2)
if channel_index == interfering_index: # The signal interferes with itself
b_ch_sig = b_ch[channel_index]
psi = np.arcsinh(0.5 * np.pi ** 2.0 * l_eff_a * b2 * b_ch_sig ** 2.0)
else:
f_sig = f_ch[channel_index]
b_ch_sig = b_ch[channel_index]
f_int = f_ch[interfering_index]
b_ch_int = b_ch[interfering_index]
del_f = f_sig - f_int
psi = np.arcsinh(np.pi ** 2.0 * l_eff_a * b2 * b_ch_sig * (del_f + 0.5 * b_ch_int))
psi -= np.arcsinh(np.pi ** 2.0 * l_eff_a * b2 * b_ch_sig * (del_f - 0.5 * b_ch_int))
return psi
def analytic_formula(ind, b2, l_eff, l_eff_a, gam, f_ch, g_ch, b_ch, n_ch):
""" analytic_formula computes the analytical formula.
:param ind: index of the channel at which g_nli is computed. Scalar
:param b2: Fiber dispersion coefficient in ps/THz/km. Scalar
:param l_eff: Effective length in km. Scalar
:param l_eff_a: Asymptotic effective length in km. Scalar
:param gam: Fiber nonlinear coefficient in 1/W/km. Scalar
:param f_ch: Baseband channels center frequencies in THz. Array of size 1xNch
:param g_ch: Power spectral density W/THz. Array of size 1xNch
:param b_ch: Channels' -3 dB bandwidth [THz]. Array of size 1xNch
:param n_ch: Number of channels. Scalar
:return: g_nli: power spectral density in W/THz of the nonlinear interference
"""
ch_psd = g_ch[ind]
b2 = abs(b2)
g_nli = 0.0
for n in np.arange(0, n_ch):
psi = compute_psi(b2, l_eff_a, f_ch, ind, n, b_ch)
g_nli += g_ch[n] * ch_psd ** 2.0 * psi
g_nli *= (16.0 / 27.0) * (gam * l_eff) ** 2.0 / (2.0 * np.pi * b2 * l_eff_a)
return g_nli
def gn_analytic(b2, l_span, a_db, gam, f_ch, b_ch, power, n_ch):
""" gn_analytic computes the GN reference formula via analytical solution.
:param b2: Fiber dispersion coefficient in ps/THz/km. Scalar
:param l_span: Fiber Span length in km. Scalar
:param a_db: Fiber loss coefficient in dB/km. Scalar
:param gam: Fiber nonlinear coefficient in 1/W/km. Scalar
:param f_ch: Baseband channels center frequencies in THz. Array of size 1xNch
:param b_ch: Channels' -3 dB bandwidth [THz]. Array of size 1xNch
:param power: Channels' power values in W. Array of size 1xNch
:param n_ch: Number of channels. Scalar
:return: g_nli: power spectral density in W/THz of the nonlinear interference at frequencies model_param['f_array']
"""
g_ch = power / b_ch
alpha_lin = a_db / 20.0 / np.log10(np.e) # Conversion in linear units 1/km
l_eff = (1.0 - np.exp(-2.0 * alpha_lin * l_span)) / (2.0 * alpha_lin) # Effective length
l_eff_a = 1.0 / (2.0 * alpha_lin) # Asymptotic effective length
g_nli = np.zeros(f_ch.size)
for ind in np.arange(0, f_ch.size):
g_nli[ind] = analytic_formula(ind, b2, l_eff, l_eff_a, gam, f_ch, g_ch, b_ch, n_ch)
return g_nli
def get_f_computed_interp(f_ch, n_not_interp):
""" get_f_computed_array returns the arrays containing the frequencies at which g_nli is computed and interpolated.
:param f_ch: the overall frequency array. Array of size 1xnum_ch
:param n_not_interp: the number of points at which g_nli has to be computed
:return: f_nli_comp: the array containing the frequencies at which g_nli is computed
:return: f_nli_interp: the array containing the frequencies at which g_nli is interpolated
"""
num_ch = len(f_ch)
if num_ch < n_not_interp: # It's useless to compute g_nli in a number of points larger than num_ch
n_not_interp = num_ch
# Compute f_nli_comp
n_not_interp_left = np.ceil((n_not_interp - 1.0) / 2.0)
n_not_interp_right = np.floor((n_not_interp - 1.0) / 2.0)
central_index = len(f_ch) // 2
print(central_index)
f_nli_central = np.array([f_ch[central_index]], copy=True)
if n_not_interp_left > 0:
index = np.linspace(0, central_index - 1, n_not_interp_left, dtype='int')
f_nli_left = np.array(f_ch[index], copy=True)
else:
f_nli_left = np.array([])
if n_not_interp_right > 0:
index = np.linspace(-1, -central_index, n_not_interp_right, dtype='int')
f_nli_right = np.array(f_ch[index], copy=True)
f_nli_right = f_nli_right[::-1] # Reverse the order of the array
else:
f_nli_right = np.array([])
f_nli_comp = np.concatenate([f_nli_left, f_nli_central, f_nli_right])
# Compute f_nli_interp
f_ch_sorted = np.sort(f_ch)
index = np.searchsorted(f_ch_sorted, f_nli_comp)
f_nli_interp = np.array(f_ch, copy=True)
f_nli_interp = np.delete(f_nli_interp, index)
return f_nli_comp, f_nli_interp
def interpolate_in_range(x, y, x_new, kind_interp):
""" Given some samples y of the function y(x), interpolate_in_range returns the interpolation of values y(x_new)
:param x: The points at which y(x) is evaluated. Array
:param y: The values of y(x). Array
:param x_new: The values at which y(x) has to be interpolated. Array
:param kind_interp: The interpolation method of the function scipy.interpolate.interp1d. String
:return: y_new: the new interpolates samples
"""
if x.size == 1:
y_new = y * np.ones(x_new.size)
elif x.size == 2:
x = np.append(x, x_new[-1])
y = np.append(y, y[-1])
func = interp.interp1d(x, y, kind=kind_interp, bounds_error=False)
y_new = func(x_new)
else:
func = interp.interp1d(x, y, kind=kind_interp, bounds_error=False)
y_new = func(x_new)
return y_new
def gn_model(spectrum_param, fiber_param, accuracy_param, n_cores):
""" gn_model can compute the gn model both analytically or through the smart brute force
integral.
:param spectrum_param: Dictionary with spectrum parameters
spectrum_param['num_ch']: Number of channels. Scalar
spectrum_param['f_ch']: Baseband channels center frequencies in THz. Array of size 1xnum_ch
spectrum_param['b_ch']: Channels' -3 dB band [THz]. Array of size 1xnum_ch
spectrum_param['roll_off']: Channels' Roll-off factors [0,1). Array of size 1xnum_ch
spectrum_param['power']: Channels' power values in W. Array of size 1xnum_ch
:param fiber_param: Dictionary with the parameters of the fiber
fiber_param['alpha']: Fiber loss coefficient in dB/km. Scalar
fiber_param['span_length']: Fiber Span length in km. Scalar
fiber_param['beta_2']: Fiber dispersion coefficient in ps/THz/km. Scalar
fiber_param['gamma']: Fiber nonlinear coefficient in 1/W/km. Scalar
:param accuracy_param: Dictionary with model parameters for accuracy tuning
accuracy_param['is_analytic']: A boolean indicating if you want to compute the NLI through
the analytic formula (is_analytic = True) of the smart brute force integration (is_analytic =
False). Boolean
accuracy_param['points_not_interp']: The number of NLI which will be calculated. Others are
interpolated
accuracy_param['kind_interp']: The kind of interpolation using the function
scipy.interpolate.interp1d
accuracy_param['th_fwm']: Minimum FWM efficiency value to be considered for high density
integration in dB
accuracy_param['n_points']: Maximum Number of integration points to be used in each frequency
slot of the spectrum
accuracy_param['n_points_min']: Minimum Number of integration points to be used in each
frequency
slot of the spectrum
:return: g_nli_comp: the NLI power spectral density in W/THz computed through GN model
:return: f_nli_comp: the frequencies at which g_nli_comp is evaluated
:return: g_nli_interp: the NLI power spectral density in W/THz computed through interpolation of g_nli_comp
:return: f_nli_interp: the frequencies at which g_nli_interp is estimated
"""
# Take signal parameters
num_ch = spectrum_param['num_ch']
f_ch = spectrum_param['f_ch']
b_ch = spectrum_param['b_ch']
roll_off = spectrum_param['roll_off']
power = spectrum_param['power']
# Take fiber parameters
a_db = fiber_param['alpha']
l_span = fiber_param['span_length']
beta2 = fiber_param['beta_2']
gam = fiber_param['gamma']
# Take accuracy parameters
is_analytic = accuracy_param['is_analytic']
n_not_interp = accuracy_param['points_not_interp']
kind_interp = accuracy_param['kind_interp']
th_fwm = accuracy_param['th_fwm']
n_points = accuracy_param['n_points']
n_points_min = accuracy_param['n_points_min']
# Computing NLI
if is_analytic: # Analytic solution
g_nli_comp = gn_analytic(beta2, l_span, a_db, gam, f_ch, b_ch, power, num_ch)
f_nli_comp = np.copy(f_ch)
g_nli_interp = []
f_nli_interp = []
else: # Smart brute force integration
f_nli_comp, f_nli_interp = get_f_computed_interp(f_ch, n_not_interp)
model_param = {'min_FWM_inv': th_fwm, 'n_grid': n_points, 'n_grid_min': n_points_min,
'f_array': | np.array(f_nli_comp, copy=True) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import sys
import csv
filenames = sys.argv
#print(filenames)
timestamp = filenames[1][13:-4]
data = []
for i in range(1,len(filenames)):
with open(filenames[i], 'r') as csvfile:
raw = csv.reader(csvfile)
raw = list(raw)
data.append(raw)
Original = np.array(data[0])
Stramp = np.array(data[1])
Trigger = np.array(data[2])
Origtime = np.array(data[3])[0].astype(float)
Stramptime = np.array(data[4])[0].astype(float)
fastres = Origtime[1] - Origtime[0]
slowres = Stramptime[1] - Stramptime[0]
datanum = np.shape(Original)[0] - 2
print('There are', datanum, 'events')
timerngmin = -350 #in ns
timerngmax = 350 #in ns
origbaseval = 82 #in mV
origscale = 1000. #convert mV to V
strampbaseval = -3 #in V
strampscale = 10. #due to x10 gain
fastdata = -1.0*(Original[:,2:].astype(float) - origbaseval)/origscale
slowdata = (Stramp[:,2:].astype(float) - strampbaseval)/strampscale
minindex = np.where(Stramptime > timerngmin)[0][0]
maxindex = np.where(Stramptime < timerngmax)[0][-1]
slowdata = slowdata[:,minindex:maxindex]
#classify into single signal and multiple signal first
noiserms = np.sqrt(np.mean(fastdata[0]**2))
print('rms is', noiserms)
noisethres = noiserms*3
monosgnl = [] #signals with a single peak
multisgnl = [] #signals with multiple peaks
for i in range(datanum):
peaks = np.where(fastdata[i] > noisethres)
if np.any( | np.ediff1d(peaks) | numpy.ediff1d |
from sys import argv
import numpy as np
import scipy as sp
from scipy.linalg import eig,svd,eigh
from scipy.sparse.linalg import eigs
from sklearn.neighbors import kneighbors_graph
from copy import deepcopy
from .utils import *
from pymanopt.manifolds import Grassmann
import nudged
from sklearn.metrics.pairwise import pairwise_distances
def findSingleLP(X,d,k,sigma,embMethod='lpp'):
D,N = X.shape
W = np.zeros((N,N))
B = np.zeros((N,N))
if embMethod == 'pca':
for i in range(N-1):
for j in range(i+1,N):
W[i,j] = 1.0/N
W = 0.5*(W + W.T)
B = np.eye(N)
L = B - W
M1 = X.dot(L).dot(X.T)
Mc = np.eye(M1.shape[0])
elif embMethod == 'lpp':
G = kneighbors_graph(X.T,k,mode='distance',include_self=False).toarray()
W = 0.5*(G + G.T)
W[W!=0] = np.exp(-W[W!=0] / (2*sigma*sigma))
B = np.diag(np.sum(W,axis=0))
L = B - W
M1 = X.dot(L).dot(X.T)
Mc = X.dot(B).dot(X.T)
elif embMethod == 'rand':
Gnk = Grassmann(D,2)
proj = Gnk.rand()
return [proj]
elif embMethod == 'syn':
proj = np.zeros((D,2))
card = 2
#ids = np.arange(D)
ids = np.array([1,0,4,3]) # For ecoli 2
#ids = np.array([2,7,3,0]) # For yeast 2
#ids = np.array([12, 39, 5, 0, 45, 43]) # For seaWater 3
#ids = np.array([0, 46, 5, 14, 11, 40, 49, 43]) # For seaWater 4
np.random.shuffle(ids)
#print ids
proj[ids[:card],0] = 1/np.sqrt(card)
proj[ids[card:2*card],1] = 1/np.sqrt(card)
#proj[ids[card-1:2*card-1],1] = 1/np.sqrt(card) # For cities
return [proj]
u,s = eig(M1)
if np.min(u) < 0:
M1 = M1 - np.min(u)*np.eye(M1.shape[0])
u,s = eig(Mc)
if np.min(u) < 0:
Mc = Mc - np.min(u)*np.eye(Mc.shape[0])
eigvals,eigvecs = eig(M1,Mc)
eigvecs = np.dot(sp.linalg.sqrtm(Mc),eigvecs)
if embMethod == 'pca':
ind = np.argsort(-eigvals)
proj = eigvecs[:,ind[0:d]]
elif embMethod == 'lpp':
ind = np.argsort(eigvals)
proj = eigvecs[:,ind[0:d]]
return [proj]
def findMultipleLP(X,d,k,sigma,maxIter,embMethod='lpp',labs = None):
N = X.shape[1]
W = np.zeros((N,N))
B = np.zeros((N,N))
if embMethod == 'pca':
for i in range(N-1):
for j in range(i+1,N):
W[i,j] = 1.0/N
W = np.maximum(W, W.T)
B = np.eye(N)
L = B - W
M1 = X.dot(L).dot(X.T)
Mc = np.eye(M1.shape[0])
elif embMethod == 'lpp':
G = kneighbors_graph(X.T,k,mode='distance',include_self=False).toarray()
W = 0.5*(G + G.T)
W[W!=0] = np.exp(-W[W!=0] / (2*sigma*sigma))
B = np.diag(np.sum(W,axis=0))
L = B - W
M1 = X.dot(L).dot(X.T)
Mc = X.dot(B).dot(X.T)
elif embMethod == 'lde':
Gw = np.zeros((N,N))
Gb = np.zeros((N,N))
dists = pairwise_distances(X.T)
for ii in range(N):
inds = np.where(labs == labs[ii])[0]
sinds = np.argsort(dists[ii,inds])
Gw[ii,inds[sinds[:k]]] = 1
inds = np.where(labs != labs[ii])[0]
sinds = np.argsort(dists[ii,inds])
Gb[ii,inds[sinds[:k]]] = 1
Gw = np.maximum(Gw, Gw.T)
Bw = np.diag(np.sum(Gw,axis=0))
Lw = Bw - Gw
M1 = X.dot(Lw).dot(X.T)
Gb = np.maximum(Gb, Gb.T)
Bb = np.diag(np.sum(Gb,axis=0))
Lb = Bb - Gb
Mc = X.dot(Lb).dot(X.T)
u,s = eig(M1)
u = np.real(u)
if np.min(u) < 0:
M1 = M1 - np.min(u)*np.eye(M1.shape[0])
u,s = eig(Mc)
u = | np.real(u) | numpy.real |
"""
Testing two-bkpt segreg by comparing with a brute-force method.
"""
# Author: <NAME>
# License: BSD 3 clause
import unittest
from matplotlib import pyplot as plt
import numpy as np
import numpy.testing
from segreg.analysis import stats_plotting
from segreg.model import segreg_util
from segreg.model.alt import brute_force_segreg
from segreg.model.alt import two_bkpt_segreg_alt
class TwoBkptSegregHelper(unittest.TestCase):
def setUp(self):
self._seed = None
def tearDown(self):
if self._seed is not None:
print()
print("seed: ", self._seed)
print()
def compare(self,
lhs_module,
rhs_module,
indep,
dep,
num_end_to_skip,
num_between_to_skip,
tol,
verbose=False,
expected_params=None,
expected_value=None,
plot=False,
seed=None):
(lhs_min_params,
lhs_min_value) = lhs_module.estimate_two_bkpt_segreg(indep,
dep,
num_end_to_skip=num_end_to_skip,
num_between_to_skip=num_between_to_skip)
(rhs_min_params,
rhs_min_value) = rhs_module.estimate_two_bkpt_segreg(indep,
dep,
num_end_to_skip=num_end_to_skip,
num_between_to_skip=num_between_to_skip)
if verbose:
print()
print()
print(lhs_module.__name__)
print(lhs_min_params)
print(lhs_min_value)
print()
print(rhs_module.__name__)
print(rhs_min_params)
print(rhs_min_value)
print()
if plot:
func = segreg_util.two_bkpt_segmented_func(*lhs_min_params)
stats_plotting.plot_models(func_arr=[func],
indep=indep,
dep=dep,
extra_pts_arr=[[lhs_min_params[0],
lhs_min_params[2]]],
full_size_scatter=True)
plt.show()
self.assertAlmostEqual(lhs_min_value,
rhs_min_value,
delta=1000.0 * tol)
try:
# sometimes different bkpts found due to precision diffs only
numpy.testing.assert_allclose(lhs_min_params,
rhs_min_params,
rtol=0.0,
atol=tol)
except:
print()
print("-" * 50)
print("EXCEPTION: OK")
print(lhs_module.__name__)
print(lhs_min_params)
print(lhs_min_value)
print(rhs_module.__name__)
print(rhs_min_params)
print(rhs_min_value)
print()
if seed is not None:
print("seed: ", seed)
print("-" * 50)
print()
if expected_params is not None:
self.check_known_value(expected_params=expected_params,
computed_params=lhs_min_params,
expected_value=expected_value,
computed_value=lhs_min_value)
self.check_known_value(expected_params=expected_params,
computed_params=rhs_min_params,
expected_value=expected_value,
computed_value=rhs_min_value)
def compare_to_brute_force(self,
lhs_module,
indep,
dep,
num_end_to_skip,
num_between_to_skip,
dx=0.01,
verbose=False,
seed=None):
(lhs_min_params,
lhs_min_value) = lhs_module.estimate_two_bkpt_segreg(indep,
dep,
num_end_to_skip=num_end_to_skip,
num_between_to_skip=num_between_to_skip)
(rhs_min_params,
rhs_min_value) = brute_force_segreg.estimate_two_bkpt_segreg(indep,
dep,
num_end_to_skip=num_end_to_skip,
num_between_to_skip=num_between_to_skip,
dx=dx)
if verbose:
print()
print()
print(lhs_module.__name__)
print(lhs_min_params)
print(lhs_min_value)
print()
print("BRUTE FORCE")
print(brute_force_segreg.__name__)
print(rhs_min_params)
print(rhs_min_value)
print()
tol = dx
# numpy.testing.assert_allclose(lhs_min_params,
# rhs_min_params,
# rtol=0.0,
# atol=tol)
# if bkpt diff not super tiny, make sure that brute force RSS is
# greater than the method we are checking against
if abs(rhs_min_value - lhs_min_value) > 1.0e-10:
self.assertTrue(lhs_min_value <= rhs_min_value)
self.assertAlmostEqual(lhs_min_value, rhs_min_value, delta=tol)
try:
# check bkpts close
self.assertAlmostEqual(lhs_min_params[0],
rhs_min_params[0],
delta=tol)
self.assertAlmostEqual(lhs_min_params[2],
rhs_min_params[2],
delta=tol)
except:
# if bkpts not close, brute may have gotten stuck somewhere else;
# let's make sure it is not better than lhs solution
[v1, v2, m1, m2, rss] = two_bkpt_segreg_alt.fixed_bkpt_ls_from_data(indep,
dep,
u1=rhs_min_params[0],
u2=rhs_min_params[2])
if abs(rhs_min_value - lhs_min_value) > 1.0e-10:
self.assertTrue(lhs_min_value < rss)
print()
print("-" * 50)
print("EXCEPTION: OK")
print(lhs_module.__name__)
print(lhs_min_params)
print(lhs_min_value)
print(brute_force_segreg.__name__)
print(rhs_min_params)
print(rhs_min_value)
print()
if seed is not None:
print("seed: ", seed)
print("-" * 50)
print()
def brute_force_test_suite(self,
lhs_module,
num_data=10,
num_tests=100,
num_end_to_skip=0,
num_between_to_skip=2,
seeds=None,
investigate=False):
"""
hypothesis?
"""
indep = np.arange(num_data, dtype=float)
if seeds is None:
np.random.seed(3483407)
seeds = np.random.randint(10, 1000000, num_tests)
for i, seed in enumerate(seeds):
if i % 100 == 0:
print("iter: ", i)
self._seed = seed
np.random.seed(seed)
dep = np.random.randint(low=-10, high=10, size=num_data)
dep = np.array(dep, dtype=float)
printdata = investigate
if printdata:
import pprint
print()
pprint.pprint(indep)
print()
pprint.pprint(dep)
print()
plt.scatter(indep, dep)
plt.show()
self.compare_to_brute_force(lhs_module,
indep,
dep,
num_end_to_skip,
num_between_to_skip,
verbose=investigate,
seed=seed)
dep = np.random.rand(num_data)
printdata = investigate
if printdata:
print()
pprint.pprint(indep)
print()
pprint.pprint(dep)
print()
plt.scatter(indep, dep)
plt.show()
self.compare_to_brute_force(lhs_module,
indep,
dep,
num_end_to_skip,
num_between_to_skip,
verbose=investigate,
seed=seed)
dep = np.random.randn(num_data)
printdata = investigate
if printdata:
print()
pprint.pprint(indep)
print()
pprint.pprint(dep)
print()
plt.scatter(indep, dep)
plt.show()
self.compare_to_brute_force(lhs_module,
indep,
dep,
num_end_to_skip,
num_between_to_skip,
verbose=investigate,
seed=seed)
def compare_test_suite(self,
lhs_module,
rhs_module,
num_data=10,
num_tests=100,
num_end_to_skip=0,
num_between_to_skip=2,
tol=1.0e-12,
seeds=None,
investigate=False):
"""
hypothesis?
"""
indep = np.arange(num_data, dtype=float)
if seeds is None:
np.random.seed(3483407)
seeds = np.random.randint(10, 1000000, num_tests)
for i, seed in enumerate(seeds):
if i % 1000 == 0:
print("iter: ", i)
self._seed = seed
np.random.seed(seed)
dep = np.random.randint(low=-10, high=10, size=num_data)
dep = | np.array(dep, dtype=float) | numpy.array |
import numpy as np
from abc import ABCMeta, abstractmethod
from tqdm import tqdm
from keras.preprocessing import image
from keras.applications.xception import Xception
from keras.applications.xception import preprocess_input, decode_predictions
class baseExtractor(metaclass=ABCMeta):
def __init__(self):
pass
@abstractmethod
def feature_extraction(self, images):
pass
class XceptionExtractor(baseExtractor):
def __init__(self):
self.model = Xception(weights='imagenet', include_top=False)
self.batch_size = 32
def feature_extraction(self, images):
x = preprocess_input(images)
out = []
num_data = len(x)
for start in tqdm(range(0, num_data, self.batch_size)):
if start +self.batch_size < num_data:
end = start + self.batch_size
else:
end = num_data
preds = self.model.predict(x[np.arange(start, end, 1), :, :, :])
preds = preds.reshape(preds.shape[0],
preds.shape[1]*preds.shape[2]*preds.shape[3]
)
out.extend(preds)
return out
class WHashExtractor(baseExtractor):
def __init__(self):
pass
def feature_extraction(self, images):
import imagehash
from PIL import Image
out = []
for image in images:
im = Image.fromarray(np.uint8(image))
out.append(imagehash.whash(im).hash.astype(float).flatten())
return out
def main():
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
img_one_array = image.img_to_array(img)
images = | np.expand_dims(img_one_array, axis=0) | numpy.expand_dims |
"""char_detect_segment
This module contains the necessary functions to detect characters,
merge bounding boxes of the characters, and crop them out.
"""
import os
import collections
import re
import pathlib
from operator import itemgetter
import cv2
import numpy as np
NUMBERS = re.compile(r'(\d+)')
IMAGEBOXES = collections.namedtuple('Image_Boxes', ['Name',
'Image',
'Boxes'])
def numerical_sort(value):
"""
DESCRIPTION: Sort file names numerically instead of alphabetically.
INPUT: Filename value
OUTPUT: Mapping of filename
"""
parts = NUMBERS.split(value)
parts[1::2] = map(int, parts[1::2])
return parts
def sort_contours(contours, setting):
"""
DESCRIPTION: Sort the contours by x and y coordinates,
calculate the central coordinates, and sort left-to-right,
top-to-bottom, just like in reading English text.
INPUT: List of OpenCV contours, string indicating a setting
OUTPUT: A sorted list of bounding boxes with center coordinates
"""
bound_box = []
for contour in contours:
[x_coord, y_coord, width, height] = cv2.boundingRect(contour)
center_x = x_coord + width / 2
center_y = y_coord + height / 2
single_box = [center_x, center_y, x_coord, y_coord, width, height]
bound_box.append(single_box)
if setting == "word":
# Need to sort the bounding bound_box by line first
# Then sort by characters in the line
# First, sort the contours by the y-coordinate
bound_box.sort(key=itemgetter(3))
# Set the initial line threshold to the bottom of the first character
line_theshold = bound_box[0][3] + bound_box[0][5] - 1
# If any top y coordinate is less than the line threshold,
# it is a new line
l_start = 0
for i, char_bound_box in enumerate(bound_box):
if char_bound_box[3] > line_theshold:
# Sort the line by the x-coordinate
bound_box[l_start:i] = sorted(bound_box[l_start:i],
key=itemgetter(2))
l_start = i
line_theshold = max(char_bound_box[3]+char_bound_box[5]-1,
line_theshold)
# Sort the last line
bound_box[l_start:] = sorted(bound_box[l_start:], key=itemgetter(2))
else:
# Just sort by the x-coordinate
bound_box.sort(key=itemgetter(2))
return bound_box
def dis(start, end):
"""
DESCRIPTION: Calculate the Euclidean distance between two coordinates
INPUT: Start and End points
OUTPUT: Distance between the two points
"""
return abs(start - end)
def create_word_mask(boxes, height, width):
"""
DESCRIPTION: Create a word mask to guide word segmentation.
It does this by horizontally dilating the drawn and filled
bounding boxes, and then creating boudning boxes of the new
shapes.
INPUT: List of sorted and merged bounding boxes, height and
width of image.
OUTPUT: Sorted list of word bounding boxes.
"""
# Create a blank image of zero values.
mask = np.zeros((height, width), dtype="uint8")
# Using the bounding boxes, we will draw white boxes on the mask
for mask_box in boxes:
cv2.rectangle(mask, (mask_box[0], mask_box[1]),
(mask_box[0]+mask_box[2], mask_box[1]+mask_box[3]),
255, -1)
# With this mask, we will have to unidirectionally dilate so that
# characters within the word are connected.
# Kernel is used to dilate in the horizontal direction
#
# First, we need to determine the size of the kernel
# We need two row matrices (one is zeros and other is ones).
# The size of the zero kernel is relative to the width of the image
# while the one kernel is just +1 of the zero kernel.
k_1_width = int(.02 * width)
k_2_width = k_1_width + 1
kernel_1 = np.zeros((1, k_1_width), dtype="uint8")
kernel_2 = | np.ones((1, k_2_width), dtype="uint8") | numpy.ones |
"""
This module contains tests of code from `../dsawl/active_learning`
directory. Code that is tested here provides functionality for
extension of learning sample in an efficient way.
@author: <NAME>
"""
import unittest
from typing import Tuple
import numpy as np
from sklearn.neighbors import (
KNeighborsClassifier, KNeighborsRegressor, KernelDensity
)
from dsawl.active_learning.scoring_functions import (
compute_confidences, compute_margins, compute_entropy,
compute_committee_divergences, compute_committee_variances,
compute_estimations_of_variance
)
from dsawl.active_learning import (
UncertaintyScorerForClassification, CommitteeScorer,
VarianceScorerForRegression, RandomScorer, DensityScorer,
CombinedSamplerFromPool
)
def get_predictions() -> np.ndarray:
"""
Get data that can be treated both as predicted probabilities of
class labels or as predictions of multiple continuous values.
:return:
predictions
"""
predictions = np.array(
[[0.2, 0.5, 0.3],
[0.9, 0.1, 0],
[0.33, 0.34, 0.33],
[0.5, 0.5, 0]]
)
return predictions
class TestScoringFunctions(unittest.TestCase):
"""
Tests of scoring functions.
"""
def test_compute_confidences(self) -> type(None):
"""
Test that `compute_confidences` function works correctly.
:return:
None
"""
predicted_probabilities = get_predictions()
execution_result = compute_confidences(predicted_probabilities)
true_answer = np.array([0.5, 0.9, 0.34, 0.5])
np.testing.assert_allclose(execution_result, true_answer)
def test_compute_margins(self) -> type(None):
"""
Test that `compute_margins` function works correctly.
:return:
None
"""
predicted_probabilities = get_predictions()
execution_result = compute_margins(predicted_probabilities)
true_answer = np.array([0.2, 0.8, 0.01, 0.])
np.testing.assert_allclose(execution_result, true_answer)
def test_compute_entropy(self) -> type(None):
"""
Test that `compute_entropy` function works correctly.
:return:
None
"""
predicted_probabilities = get_predictions()
execution_result = compute_entropy(predicted_probabilities)
true_answer = np.array(
[1.02965301, 0.32508297, 1.09851262, 0.69314718]
)
np.testing.assert_allclose(execution_result, true_answer)
def test_compute_committee_divergences(self) -> type(None):
"""
Test that `compute_committee_divergences` function
works correctly.
:return:
None
"""
stub = get_predictions()
list_of_predicted_probabilities = [stub[:2, :], stub[2:, :]]
execution_result = compute_committee_divergences(
list_of_predicted_probabilities
)
true_answer = np.array([0.0321534, 0.20349845])
np.testing.assert_allclose(execution_result, true_answer)
def test_compute_committee_variances(self) -> type(None):
"""
Test that `compute_committee_variances` function
works correctly.
:return:
None
"""
stub = get_predictions()
list_of_predictions = [stub[:2, 0], stub[2:, 0]]
execution_result = compute_committee_variances(list_of_predictions)
true_answer = np.array([0.004225, 0.04])
np.testing.assert_allclose(execution_result, true_answer)
def test_compute_estimations_of_variance(self) -> type(None):
"""
Test that `compute_estimations_of_variance` function
works correctly.
:return:
None
"""
predictions = np.array([0.1, 0.2, 0.3])
predictions_of_square = np.array([0.2, 0.3, 0.4])
execution_result = compute_estimations_of_variance(
predictions, predictions_of_square
)
true_answer = np.array([0.19, 0.26, 0.31])
np.testing.assert_allclose(execution_result, true_answer)
def get_dataset_and_pool() -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Get dataset with target variable that can be considered both
categorical or numerical and two numerical features, also
get feature representation of new unlabelled objects.
:return:
train features, train target, new features
"""
dataset = np.array(
[[0, 0, 1],
[1, 0, 1],
[0, 1, 1],
[3, 0, 2],
[2, 1, 2],
[3, 1, 2],
[0, 3, 3],
[1, 2, 3],
[1, 3, 3]],
dtype=float)
X_train = dataset[:, :-1]
y_train = dataset[:, -1]
X_new = np.array(
[[2, 0],
[0, 2],
[2, 2],
[1, 1],
[1, -4]]
)
return X_train, y_train, X_new
def get_predictions_of_knn_classifier() -> np.ndarray:
"""
Get predictions of class probabilities made by
`KNeighborsClassifier()` trained on data that are returned
by `get_dataset_and_pool` function and applied to `X_new` that
also comes from this function's output.
:return:
predicted probabilities for new objects
"""
clf = KNeighborsClassifier()
X_train, y_train, X_new = get_dataset_and_pool()
clf.fit(X_train, y_train)
predicted_probabilities = clf.predict_proba(X_new)
return predicted_probabilities
class TestUncertaintyScorerForClassification(unittest.TestCase):
"""
Tests of `UncertaintyScorerForClassification` class.
"""
def test_score_with_confidences(self) -> type(None):
"""
Test that `score` method works correctly if
scoring is based on confidences.
:return:
None
"""
X_train, y_train, X_new = get_dataset_and_pool()
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
scorer = UncertaintyScorerForClassification(
compute_confidences,
revert_sign=True,
clf=clf
)
execution_result = scorer.score(X_new)
true_answer = np.array([-0.6, -0.6, -0.4, -0.6, -0.6])
np.testing.assert_equal(execution_result, true_answer)
def test_score_with_margins(self) -> type(None):
"""
Test that `score` method works correctly if
scoring is based on margins.
:return:
None
"""
X_train, y_train, X_new = get_dataset_and_pool()
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
scorer = UncertaintyScorerForClassification(
compute_margins,
revert_sign=True,
clf=clf
)
execution_result = scorer.score(X_new)
true_answer = np.array([-0.2, -0.2, 0, -0.4, -0.2])
np.testing.assert_allclose(execution_result, true_answer)
def test_score_with_entropy(self) -> type(None):
"""
Test that `score` method works correctly if
scoring is based on entropy.
:return:
None
"""
X_train, y_train, X_new = get_dataset_and_pool()
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
scorer = UncertaintyScorerForClassification(
compute_entropy,
clf=clf
)
execution_result = scorer.score(X_new)
true_answer = np.array(
[0.67301167, 0.67301167, 1.05492017, 0.95027054, 0.67301167]
)
np.testing.assert_allclose(execution_result, true_answer)
def test_get_tools_and_set_tools(self) -> type(None):
"""
Test that `get_tools` and `set_tools` methods work
correctly.
:return:
None
"""
X_train, y_train, X_new = get_dataset_and_pool()
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
scorer = UncertaintyScorerForClassification(
compute_entropy,
clf=clf
)
another_clf = KNeighborsClassifier()
another_clf.fit(X_train[:-1, :], y_train[:-1])
scorer.set_tools(another_clf)
predictions = scorer.get_tools().predict(X_new)
another_predictions = another_clf.predict(X_new)
np.testing.assert_equal(predictions, another_predictions)
def test_update_tools(self) -> type(None):
"""
Test that `update_tools` method works correctly.
:return:
None
"""
X_train, y_train, X_new = get_dataset_and_pool()
clf = KNeighborsClassifier()
scorer = UncertaintyScorerForClassification(
compute_entropy,
clf=clf
)
scorer.update_tools(X_train, y_train)
execution_result = scorer.get_tools().predict(X_new)
true_answer = np.array([2, 3, 2, 1, 1])
np.testing.assert_equal(execution_result, true_answer)
scorer.update_tools(X_train[:-1, :], y_train[:-1], clf)
execution_result = scorer.get_tools().predict(X_new)
true_answer = np.array([2, 1, 2, 1, 1])
np.testing.assert_equal(execution_result, true_answer)
class TestCommitteeScorer(unittest.TestCase):
"""
Tests of `CommitteeScorer` class.
"""
def test_score_with_divergences(self) -> type(None):
"""
Test that `score` method works correctly if it is
a classification problem and scoring is based on
Kullback-Leibler divergence.
:return:
None
"""
X_train, y_train, X_new = get_dataset_and_pool()
scorer = CommitteeScorer(
compute_committee_divergences
)
scorer.update_tools(X_train, y_train, KNeighborsClassifier())
execution_result = scorer.score(X_new)
true_answer = np.array([0, 0, 0, 0.09080533, 0])
np.testing.assert_allclose(execution_result, true_answer, atol=1e-15)
def test_score_with_variances(self) -> type(None):
"""
Test that `score` method works correctly if it is
a regression problem and scoring is based on variance of
predictions.
:return:
None
"""
X_train, y_train, X_new = get_dataset_and_pool()
scorer = CommitteeScorer(
compute_committee_variances,
is_classification=False
)
scorer.update_tools(X_train, y_train, KNeighborsRegressor())
execution_result = scorer.score(X_new)
true_answer = np.array([0, 0, 0, 0.008888889, 0])
np.testing.assert_allclose(execution_result, true_answer)
def test_get_tools_and_set_tools(self) -> type(None):
"""
Test that `get_tools` and `set_tools` methods work
correctly.
:return:
None
"""
X_train, y_train, X_new = get_dataset_and_pool()
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
scorer = CommitteeScorer(
compute_entropy,
committee=[clf]
)
another_clf = KNeighborsClassifier()
another_clf.fit(X_train[:-1, :], y_train[:-1])
scorer.set_tools([another_clf])
predictions = scorer.get_tools()[0].predict(X_new)
another_predictions = another_clf.predict(X_new)
np.testing.assert_equal(predictions, another_predictions)
def test_update_tools(self) -> type(None):
"""
Test that `update_tools` method works correctly.
:return:
None
"""
X_train, y_train, X_new = get_dataset_and_pool()
clf = KNeighborsClassifier()
scorer = CommitteeScorer(
compute_entropy,
committee=[clf]
)
scorer.update_tools(X_train, y_train)
execution_result = [clf.predict(X_new) for clf in scorer.get_tools()]
true_answer = [np.array([1, 1, 2, 1, 1]) for _ in range(3)]
for result, answer in zip(execution_result, true_answer):
np.testing.assert_equal(result, answer)
scorer.update_tools(
np.vstack((X_train, X_train[1, :])),
np.hstack((y_train, y_train[1])),
clf
)
execution_result = [clf.predict(X_new) for clf in scorer.get_tools()]
true_answer = [np.array([1, 1, 2, 1, 1]) for _ in range(3)]
for result, answer in zip(execution_result, true_answer):
np.testing.assert_equal(result, answer)
class TestVarianceScorerForRegression(unittest.TestCase):
"""
Tests of `VarianceScorerForRegression` class.
"""
def test_score_with_estimation_of_target_variance(self) -> type(None):
"""
Test that `score` method works correctly if scoring
is based on estimation of target variable's variance.
:return:
None
"""
X_train, y_train, X_new = get_dataset_and_pool()
scorer = VarianceScorerForRegression(
compute_estimations_of_variance
)
scorer.update_tools(X_train, y_train, KNeighborsRegressor())
execution_result = scorer.score(X_new)
true_answer = np.array([0.24, 0.96, 0.56, 0.64, 0.24])
np.testing.assert_allclose(execution_result, true_answer)
def test_get_tools_and_set_tools(self) -> type(None):
"""
Test that `get_tools` and `set_tools` methods work
correctly.
:return:
None
"""
X_train, y_train, X_new = get_dataset_and_pool()
rgrs = {
'target': KNeighborsRegressor().fit(X_train, y_train),
'target^2': KNeighborsRegressor().fit(X_train, y_train ** 2)
}
scorer = VarianceScorerForRegression(
compute_entropy,
rgrs=rgrs
)
another_rgrs = {
'target': KNeighborsRegressor().fit(X_train[:-1, :], y_train[:-1]),
'target^2': KNeighborsRegressor().fit(
X_train[:-1, :], y_train[:-1] ** 2
)
}
scorer.set_tools(another_rgrs)
predictions = scorer.get_tools()['target'].predict(X_new)
another_predictions = another_rgrs['target'].predict(X_new)
np.testing.assert_equal(predictions, another_predictions)
def test_update_tools(self) -> type(None):
"""
Test that `update_tools` method works correctly.
:return:
None
"""
X_train, y_train, X_new = get_dataset_and_pool()
rgrs = {
'target': KNeighborsRegressor(),
'target^2': KNeighborsRegressor()
}
scorer = VarianceScorerForRegression(
compute_entropy,
rgrs=rgrs
)
scorer.update_tools(X_train, y_train)
execution_result = scorer.get_tools()['target'].predict(X_new)
true_answer = np.array([1.6, 2.2, 2.2, 1.6, 1.4])
np.testing.assert_equal(execution_result, true_answer)
scorer.update_tools(
X_train[:-1, :], y_train[:-1], KNeighborsRegressor()
)
execution_result = scorer.get_tools()['target'].predict(X_new)
true_answer = np.array([1.6, 1.8, 2.4, 1.6, 1.4])
| np.testing.assert_equal(execution_result, true_answer) | numpy.testing.assert_equal |
import copy
import os
import warnings
from copy import deepcopy
from functools import partial
from typing import List, Optional, Tuple
from scipy.optimize import minimize
from src.postprocessing.extract_poses import load_robot_poses, load_computed_poses
import matplotlib.pyplot as plt
import numpy as np
import open3d as o3d
def rotation_matrix_x(theta):
return np.array(
[[1, 0.0, 0.0],
[0.0, np.cos(theta), - | np.sin(theta) | numpy.sin |
import cv2
import logging
import numpy as np
from tqdm import tqdm
from brainscore.model_interface import BrainModel
from brainscore.utils import fullname
class VisualSearchObjArray(BrainModel):
def __init__(self, identifier, target_model_param, stimuli_model_param):
self.current_task = None
self.identifier = identifier
self.target_model = target_model_param['target_model']
self.stimuli_model = stimuli_model_param['stimuli_model']
self.target_layer = target_model_param['target_layer']
self.stimuli_layer = stimuli_model_param['stimuli_layer']
self.search_image_size = stimuli_model_param['search_image_size']
self._logger = logging.getLogger(fullname(self))
def start_task(self, task: BrainModel.Task, **kwargs):
self.fix = kwargs['fix'] # fixation map
self.max_fix = kwargs['max_fix'] # maximum allowed fixation excluding the very first fixation
self.data_len = kwargs['data_len'] # Number of stimuli
self.current_task = task
def look_at(self, stimuli_set):
self.gt_array = []
gt = stimuli_set[stimuli_set['image_label'] == 'mask']
gt_paths = list(gt.image_paths.values())[int(gt.index.values[0]):int(gt.index.values[-1] + 1)]
for i in range(6):
imagename_gt = gt_paths[i]
gt = cv2.imread(imagename_gt, 0)
gt = cv2.resize(gt, (self.search_image_size, self.search_image_size), interpolation=cv2.INTER_AREA)
retval, gt = cv2.threshold(gt, 125, 255, cv2.THRESH_BINARY)
temp_stim = np.uint8(np.zeros((3 * self.search_image_size, 3 * self.search_image_size)))
temp_stim[self.search_image_size:2 * self.search_image_size,
self.search_image_size:2 * self.search_image_size] = np.copy(gt)
gt = np.copy(temp_stim)
gt = gt / 255
self.gt_array.append(gt)
self.gt_total = np.copy(self.gt_array[0])
for i in range(1, 6):
self.gt_total += self.gt_array[i]
self.score = np.zeros((self.data_len, self.max_fix + 1))
self.data = np.zeros((self.data_len, self.max_fix + 2, 2), dtype=int)
S_data = np.zeros((300, 7, 2), dtype=int)
I_data = np.zeros((300, 1), dtype=int)
data_cnt = 0
target = stimuli_set[stimuli_set['image_label'] == 'target']
target_features = self.target_model(target, layers=[self.target_layer], stimuli_identifier=False)
if target_features.shape[0] == target_features['neuroid_num'].shape[0]:
target_features = target_features.T
stimuli = stimuli_set[stimuli_set['image_label'] == 'stimuli']
stimuli_features = self.stimuli_model(stimuli, layers=[self.stimuli_layer], stimuli_identifier=False)
if stimuli_features.shape[0] == stimuli_features['neuroid_num'].shape[0]:
stimuli_features = stimuli_features.T
import torch
for i in tqdm(range(self.data_len), desc="visual search stimuli: "):
op_target = self.unflat(target_features[i:i + 1])
MMconv = torch.nn.Conv2d(op_target.shape[1], 1, kernel_size=(op_target.shape[2], op_target.shape[3]),
stride=1, bias=False)
MMconv.weight = torch.nn.Parameter(torch.Tensor(op_target))
gt_idx = target_features.tar_obj_pos.values[i]
gt = self.gt_array[gt_idx]
op_stimuli = self.unflat(stimuli_features[i:i + 1])
out = MMconv(torch.Tensor(op_stimuli)).detach().numpy()
out = out.reshape(out.shape[2:])
out = out - | np.min(out) | numpy.min |
import numpy as np
import matplotlib.pyplot as plt
import math
import os
import os.path as op
from datetime import datetime
from isstools.conversions import xray
from subprocess import call
import re
import collections
import pandas as pd
import h5py
from pathlib import Path
class XASdata:
def __init__(self, db = None, **kwargs):
self.energy = np.array([])
self.data = | np.array([]) | numpy.array |
import numpy as np
from phonopy.structure.cells import get_primitive
from phonopy.units import VaspToTHz
from upho.phonon.star_creator import StarCreator
from upho.phonon.translational_projector import TranslationalProjector
from upho.phonon.rotational_projector import RotationalProjector
from upho.phonon.vectors_adjuster import VectorsAdjuster
from upho.phonon.element_weights_calculator import (
ElementWeightsCalculator)
from upho.analysis.time_measurer import TimeMeasurer
class Eigenstates:
def __init__(self,
dynamical_matrix,
unitcell_ideal,
primitive_matrix_ideal,
star="none",
mode="eigenvector",
factor=VaspToTHz,
verbose=False):
self._verbose = verbose
self._mode = mode
self._factor = factor
self._cell = dynamical_matrix.get_primitive() # Disordered
self._dynamical_matrix = dynamical_matrix
self._star = star
self._unitcell_ideal = unitcell_ideal
# In this module, primitive is w.r.t. the unit cell (may be disordered).
self._primitive = get_primitive(
self._unitcell_ideal, primitive_matrix_ideal)
self._build_star_creator()
self._generate_translational_projector()
self._generate_vectors_adjuster()
self._create_rotational_projector()
self._build_element_weights_calculator()
def _build_element_weights_calculator(self):
unitcell_orig = self._cell
primitive_ideal = self._primitive
self._element_weights_calculator = ElementWeightsCalculator(
unitcell_orig, primitive_ideal)
def _build_star_creator(self):
if self._star == "all":
is_overlapping = True
else: # "none" or "sym"
is_overlapping = False
primitive_ideal_wrt_unitcell = self._primitive
self._star_creator = StarCreator(
is_overlapping=is_overlapping,
atoms=primitive_ideal_wrt_unitcell)
if self._star == "none":
self._nopr = 1
else: # "sym" or "all"
self._nopr = len(self._star_creator.get_rotations())
print("nopr:", self._nopr)
def _generate_translational_projector(self):
self._translational_projector = TranslationalProjector(
self._primitive, self._unitcell_ideal)
def _create_rotational_projector(self):
self._rotational_projector = RotationalProjector(self._primitive)
def _generate_vectors_adjuster(self):
# Get the (disordered) unitcell.
primitive = self._dynamical_matrix.get_primitive()
self._vectors_adjuster = VectorsAdjuster(primitive)
def create_q_star(self, q):
"""
Args:
q: Reciprocal space point in fractional coordinates for "PC".
"sym" : Duplication is not allowed.
"all" : Duplication is allowed.
"none": The star of k is not considered.
"""
if self._star == "none":
q_star, transformation_matrices = (
| np.array(q) | numpy.array |
##############################################################################
# pymbar: A Python Library for MBAR
#
# Copyright 2010-2017 University of Virginia, Memorial Sloan-Kettering Cancer Center
#
# Authors: <NAME>, <NAME>
# Contributors: <NAME>, <NAME>
#
# pymbar is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with pymbar. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
##############################################################################
# imports
##############################################################################
from six.moves import zip_longest
import warnings
import numpy as np
try: # numexpr used in logsumexp when available.
import numexpr
HAVE_NUMEXPR = True
except ImportError:
HAVE_NUMEXPR = False
##############################################################################
# functions / classes
##############################################################################
class TypeCastPerformanceWarning(RuntimeWarning):
pass
def kln_to_kn(kln, N_k = None, cleanup = False):
""" Convert KxKxN_max array to KxN max array
if self.N is not initialized, it will be here.
Parameters
----------
u_kln : np.ndarray, float, shape=(KxLxN_max)
N_k (optional) : np.array
the N_k matrix from the previous formatting form
cleanup (optional) : bool
optional command to clean up, since u_kln can get very large
Outputs
-------
u_kn: np.ndarray, float, shape=(LxN)
"""
#print "warning: KxLxN_max arrays deprecated; convering into new preferred KxN shape"
# rewrite into kn shape
[K, L, N_max] = np.shape(kln)
if N_k is None:
# We assume that all N_k are N_max.
# Not really an easier way to do this without being given the answer.
N_k = N_max * np.ones([L], dtype=np.int64)
N = np.sum(N_k)
kn = np.zeros([L, N], dtype=np.float64)
i = 0
for k in range(K): # loop through the old K; some might be zero
for ik in range(N_k[k]):
kn[:, i] = kln[k, :, ik]
i += 1
if cleanup:
del(kln) # very big, let's explicitly delete
return kn
def kn_to_n(kn, N_k = None, cleanup = False):
""" Convert KxN_max array to N array
Parameters
----------
u_kn: np.ndarray, float, shape=(KxN_max)
N_k (optional) : np.array
the N_k matrix from the previous formatting form
cleanup (optional) : bool
optional command to clean up, since u_kln can get very large
Outputs
-------
u_n: np.ndarray, float, shape=(N)
"""
#print "warning: KxN arrays deprecated; convering into new preferred N shape"
# rewrite into kn shape
# rewrite into kn shape
[K, N_max] = np.shape(kn)
if N_k is None:
# We assume that all N_k are N_max.
# Not really an easier way to do this without being given the answer.
N_k = N_max*np.ones([K], dtype=np.int64)
N = np.sum(N_k)
n = np.zeros([N], dtype=np.float64)
i = 0
for k in range(K): # loop through the old K; some might be zero
for ik in range(N_k[k]):
n[i] = kn[k, ik]
i += 1
if cleanup:
del(kn) # very big, let's explicitly delete
return n
def ensure_type(val, dtype, ndim, name, length=None, can_be_none=False, shape=None,
warn_on_cast=True, add_newaxis_on_deficient_ndim=False):
"""Typecheck the size, shape and dtype of a numpy array, with optional
casting.
Parameters
----------
val : {np.ndaraay, None}
The array to check
dtype : {nd.dtype, str}
The dtype you'd like the array to have
ndim : int
The number of dimensions you'd like the array to have
name : str
name of the array. This is used when throwing exceptions, so that
we can describe to the user which array is messed up.
length : int, optional
How long should the array be?
can_be_none : bool
Is ``val == None`` acceptable?
shape : tuple, optional
What should be shape of the array be? If the provided tuple has
Nones in it, those will be semantically interpreted as matching
any length in that dimension. So, for example, using the shape
spec ``(None, None, 3)`` will ensure that the last dimension is of
length three without constraining the first two dimensions
warn_on_cast : bool, default=True
Raise a warning when the dtypes don't match and a cast is done.
add_newaxis_on_deficient_ndim : bool, default=True
Add a new axis to the beginining of the array if the number of
dimensions is deficient by one compared to your specification. For
instance, if you're trying to get out an array of ``ndim == 3``,
but the user provides an array of ``shape == (10, 10)``, a new axis will
be created with length 1 in front, so that the return value is of
shape ``(1, 10, 10)``.
Notes
-----
The returned value will always be C-contiguous.
Returns
-------
typechecked_val : np.ndarray, None
If `val=None` and `can_be_none=True`, then this will return None.
Otherwise, it will return val (or a copy of val). If the dtype wasn't right,
it'll be casted to the right shape. If the array was not C-contiguous, it'll
be copied as well.
"""
if can_be_none and val is None:
return None
if not isinstance(val, np.ndarray):
# special case: if the user is looking for a 1d array, and
# they request newaxis upconversion, and provided a scalar
# then we should reshape the scalar to be a 1d length-1 array
if add_newaxis_on_deficient_ndim and ndim == 1 and np.isscalar(val):
val = np.array([val])
else:
raise TypeError(("%s must be numpy array. "
" You supplied type %s" % (name, type(val))))
if warn_on_cast and val.dtype != dtype:
warnings.warn("Casting %s dtype=%s to %s " % (name, val.dtype, dtype),
TypeCastPerformanceWarning)
if not val.ndim == ndim:
if add_newaxis_on_deficient_ndim and val.ndim + 1 == ndim:
val = val[np.newaxis, ...]
else:
raise ValueError(("%s must be ndim %s. "
"You supplied %s" % (name, ndim, val.ndim)))
val = np.ascontiguousarray(val, dtype=dtype)
if length is not None and len(val) != length:
raise ValueError(("%s must be length %s. "
"You supplied %s" % (name, length, len(val))))
if shape is not None:
# the shape specified given by the user can look like (None, None 3)
# which indicates that ANY length is accepted in dimension 0 or
# dimension 1
sentenel = object()
error = ValueError(("%s must be shape %s. You supplied "
"%s" % (name, str(shape).replace('None', 'Any'), val.shape)))
for a, b in zip_longest(val.shape, shape, fillvalue=sentenel):
if a is sentenel or b is sentenel:
# if the sentenel was reached, it means that the ndim didn't
# match or something. this really shouldn't happen
raise error
if b is None:
# if the user's shape spec has a None in it, it matches anything
continue
if a != b:
# check for equality
raise error
return val
def _logsum(a_n):
"""Compute the log of a sum of exponentiated terms exp(a_n) in a numerically-stable manner.
NOTE: this function has been deprecated in favor of logsumexp.
Parameters
----------
a_n : np.ndarray, shape=(n_samples)
a_n[n] is the nth exponential argument
Returns
-------
a_n : np.ndarray, shape=(n_samples)
a_n[n] is the nth exponential argument
Notes
-----
_logsum a_n = max_arg + \log \sum_{n=1}^N \exp[a_n - max_arg]
where max_arg = max_n a_n. This is mathematically (but not numerically) equivalent to
_logsum a_n = \log \sum_{n=1}^N \exp[a_n]
Example
-------
>>> a_n = np.array([0.0, 1.0, 1.2], np.float64)
>>> print('%.3e' % _logsum(a_n))
1.951e+00
"""
# Compute the maximum argument.
max_log_term = np.max(a_n)
# Compute the reduced terms.
terms = np.exp(a_n - max_log_term)
# Compute the log sum.
log_sum = np.log(np.sum(terms)) + max_log_term
return log_sum
def logsumexp(a, axis=None, b=None, use_numexpr=True):
"""Compute the log of the sum of exponentials of input elements.
Parameters
----------
a : array_like
Input array.
axis : None or int, optional, default=None
Axis or axes over which the sum is taken. By default `axis` is None,
and all elements are summed.
b : array-like, optional
Scaling factor for exp(`a`) must be of the same shape as `a` or
broadcastable to `a`.
use_numexpr : bool, optional, default=True
If True, use the numexpr library to speed up the calculation, which
can give a 2-4X speedup when working with large arrays.
Returns
-------
res : ndarray
The result, ``log(sum(exp(a)))`` calculated in a numerically
more stable way. If `b` is given then ``log(sum(b*exp(a)))``
is returned.
See Also
--------
numpy.logaddexp, numpy.logaddexp2, scipy.misc.logsumexp
Notes
-----
This is based on scipy.misc.logsumexp but with optional numexpr
support for improved performance.
"""
a = np.asarray(a)
a_max = np.amax(a, axis=axis, keepdims=True)
if a_max.ndim > 0:
a_max[~ | np.isfinite(a_max) | numpy.isfinite |
"""
Tests for :mod:`numpy.core.numeric`.
Does not include tests which fall under ``array_constructors``.
"""
from typing import List
import numpy as np
class SubClass(np.ndarray):
...
i8: np.int64
A: np.ndarray
B: List[int]
C: SubClass
reveal_type(np.count_nonzero(i8)) # E: int
reveal_type(np.count_nonzero(A)) # E: int
reveal_type(np.count_nonzero(B)) # E: int
reveal_type(np.count_nonzero(A, keepdims=True)) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, Any]]
reveal_type(np.count_nonzero(A, axis=0)) # E: Union[numpy.signedinteger[Any], numpy.ndarray[Any, Any]]
reveal_type(np.isfortran(i8)) # E: bool
reveal_type(np.isfortran(A)) # E: bool
reveal_type(np.argwhere(i8)) # E: numpy.ndarray[Any, Any]
reveal_type(np.argwhere(A)) # E: numpy.ndarray[Any, Any]
reveal_type(np.flatnonzero(i8)) # E: numpy.ndarray[Any, Any]
reveal_type(np.flatnonzero(A)) # E: numpy.ndarray[Any, Any]
reveal_type(np.correlate(B, A, mode="valid")) # E: numpy.ndarray[Any, Any]
reveal_type(np.correlate(A, A, mode="same")) # E: numpy.ndarray[Any, Any]
reveal_type(np.convolve(B, A, mode="valid")) # E: numpy.ndarray[Any, Any]
reveal_type(np.convolve(A, A, mode="same")) # E: numpy.ndarray[Any, Any]
reveal_type(np.outer(i8, A)) # E: numpy.ndarray[Any, Any]
reveal_type(np.outer(B, A)) # E: numpy.ndarray[Any, Any]
reveal_type(np.outer(A, A)) # E: numpy.ndarray[Any, Any]
reveal_type(np.outer(A, A, out=C)) # E: SubClass
reveal_type(np.tensordot(B, A)) # E: numpy.ndarray[Any, Any]
reveal_type(np.tensordot(A, A)) # E: numpy.ndarray[Any, Any]
reveal_type(np.tensordot(A, A, axes=0)) # E: numpy.ndarray[Any, Any]
reveal_type(np.tensordot(A, A, axes=(0, 1))) # E: numpy.ndarray[Any, Any]
reveal_type(np.isscalar(i8)) # E: bool
reveal_type(np.isscalar(A)) # E: bool
reveal_type(np.isscalar(B)) # E: bool
reveal_type(np.roll(A, 1)) # E: numpy.ndarray[Any, Any]
reveal_type(np.roll(A, (1, 2))) # E: numpy.ndarray[Any, Any]
reveal_type(np.roll(B, 1)) # E: numpy.ndarray[Any, Any]
reveal_type(np.rollaxis(A, 0, 1)) # E: numpy.ndarray[Any, Any]
reveal_type(np.moveaxis(A, 0, 1)) # E: numpy.ndarray[Any, Any]
reveal_type(np.moveaxis(A, (0, 1), (1, 2))) # E: numpy.ndarray[Any, Any]
reveal_type(np.cross(B, A)) # E: numpy.ndarray[Any, Any]
reveal_type(np.cross(A, A)) # E: numpy.ndarray[Any, Any]
reveal_type(np.indices([0, 1, 2])) # E: numpy.ndarray[Any, Any]
reveal_type(np.indices([0, 1, 2], sparse=False)) # E: numpy.ndarray[Any, Any]
reveal_type(np.indices([0, 1, 2], sparse=True)) # E: tuple[numpy.ndarray[Any, Any]]
reveal_type(np.binary_repr(1)) # E: str
reveal_type(np.base_repr(1)) # E: str
reveal_type(np.allclose(i8, A)) # E: bool
reveal_type(np.allclose(B, A)) # E: bool
reveal_type(np.allclose(A, A)) # E: bool
reveal_type(np.isclose(i8, A)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]]
reveal_type(np.isclose(B, A)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]]
reveal_type(np.isclose(A, A)) # E: Union[numpy.bool_, numpy.ndarray[Any, Any]]
reveal_type(np.array_equal(i8, A)) # E: bool
reveal_type( | np.array_equal(B, A) | numpy.array_equal |
from openmdao.api import ImplicitComponent
import numpy as np
from scipy.interpolate import Akima1DInterpolator as Akima
""" Create tables for table lookup functions """
# Small engines polytripic efficiency values
Wc_SE = np.array([0, 0.205, 0.63, 1.0, 1.5, 2., 2.5, 3., 4., 5., 30., 200])
# TGL 0 - current technology level
EtaPoly_SE0 =np.array([0, 0.82, 0.86, 0.871, 0.881, 0.885, 0.8875, 0.889, 0.892, 0.894, 0.895, 0.895])
# TGL 1 - next generation technology level ~2% better
EtaPoly_SE1 = | np.array([0, 0.84, 0.88, 0.891, 0.901, 0.905, 0.9075, 0.909, 0.912, 0.914, 0.915, 0.915 ]) | numpy.array |
#!/usr/bin/env python
# Copyright 2020 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import scipy.io as sio
import WORC.IOparser.file_io as wio
import WORC.IOparser.config_io_combat as cio
import numpy as np
import random
import pandas as pd
from WORC.addexceptions import WORCValueError, WORCKeyError
import tempfile
from sys import platform
from WORC.featureprocessing.VarianceThreshold import selfeat_variance
from sklearn.preprocessing import StandardScaler
from neuroCombat import neuroCombat
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from WORC.featureprocessing.Imputer import Imputer
def ComBat(features_train_in, labels_train, config, features_train_out,
features_test_in=None, labels_test=None, features_test_out=None,
VarianceThreshold=True, scaler=False, logarithmic=False):
"""
Apply ComBat feature harmonization.
Based on: https://github.com/Jfortin1/ComBatHarmonization
"""
# Load the config
print('############################################################')
print('# Initializing ComBat. #')
print('############################################################\n')
config = cio.load_config(config)
excluded_features = config['ComBat']['excluded_features']
# If mod, than also load moderating labels
if config['ComBat']['mod'][0] == '[]':
label_names = config['ComBat']['batch']
else:
label_names = config['ComBat']['batch'] + config['ComBat']['mod']
# Load the features for both training and testing, match with batch and mod parameters
label_data_train, image_features_train =\
wio.load_features(features_train_in, patientinfo=labels_train,
label_type=label_names)
feature_labels = image_features_train[0][1]
image_features_train = [i[0] for i in image_features_train]
label_data_train['patient_IDs'] = list(label_data_train['patient_IDs'])
# Exclude features
if excluded_features:
print(f'\t Excluding features containing: {excluded_features}')
# Determine indices of excluded features
included_feature_indices = []
excluded_feature_indices = []
for fnum, i in enumerate(feature_labels):
if not any(e in i for e in excluded_features):
included_feature_indices.append(fnum)
else:
excluded_feature_indices.append(fnum)
# Actually exclude the features
image_features_train_combat = [np.asarray(i)[included_feature_indices].tolist() for i in image_features_train]
feature_labels_combat = np.asarray(feature_labels)[included_feature_indices].tolist()
image_features_train_noncombat = [np.asarray(i)[excluded_feature_indices].tolist() for i in image_features_train]
feature_labels_noncombat = np.asarray(feature_labels)[excluded_feature_indices].tolist()
else:
image_features_train_combat = image_features_train
feature_labels_combat = feature_labels.tolist()
image_features_train_noncombat = []
feature_labels_noncombat = []
# Detect NaNs, otherwise first feature imputation is required
if any(np.isnan(a) for a in | np.asarray(image_features_train_combat) | numpy.asarray |
import openmdao.api as om
import numpy as np
from openconcept.utilities.math.integrals import Integrator
import warnings
class BandolierCoolingSystem(om.ExplicitComponent):
"""
Computes battery heat transfer for a parameteric battery
based on Tesla's Model 3 design.
Assumptions:
Heat generated uniformly in the cell
Weight per cell and thermal resistance stay constant
even as specific energy varies parametrically
(this means that cell count is constant with pack WEIGHT,
not pack ENERGY as technology improves)
Cylindrical cells attached to Tesla-style thermal ribbon
Liquid cooling
Heat transfer through axial direction only (not baseplate)
2170 cells (21 mm diameter, 70mm tall)
Battery thermal model assumes unsteady cell temperature,
quasi-steady temperature gradients
Inputs
------
q_in : float
Heat generation rate in the battery (vector, W)
T_in : float
Coolant inlet temperature (vector, K)
T_battery : float
Volume averaged battery temperature (vector, K)
mdot_coolant : float
Mass flow rate of coolant through the bandolier (vector, kg/s)
battery_weight : float
Weight of the battery (overall). Default 100kg (scalar)
n_cpb : float
Number of cells long per "bandolier" actual count is 2x (scalar, default 82, Tesla)
t_channel : float
Thickness (width) of the cooling channel in the bandolier
(scalar, default 1mm)
Outputs
-------
dTdt : float
Time derivative dT/dt (Tbar in the paper) (vector, K/s)
T_surface : float
Surface temp of the battery (vector, K)
T_core : float
Center temp of the battery (vector, K)
q : float
Heat transfer rate from the motor to the fluid (vector, W)
T_out : float
Outlet fluid temperature (vector, K)
Options
-------
num_nodes : float
The number of analysis points to run
coolant_specific_heat : float
Specific heat of the coolant (J/kg/K) (default 3801, glycol/water)
fluid_k : float
Thermal conductivity of the coolant (W/m/K)
nusselt : float
Nusselt number of the coolant channel (default 7.54 for uniform surf temp)
cell_kr : float
Thermal conductivity of the cell in the radial direction (W/m/k)
cell_diameter : float
Battery diameter (default 21mm for 2170 cell)
cell_height : float
Battery height (default 70mm for 2170 cell)
cell_mass : float
Battery weight (default 70g for 2170 cell)
cell_specific_heat : float
Mass average specific heat of the battery (default 900, LiIon cylindrical cell)
battery_weight_fraction : float
Fraction of battery by weight that is cells (default 0.72 knocks down Tesla by a bit)
"""
def initialize(self):
self.options.declare('num_nodes', default=1, desc='Number of analysis points')
self.options.declare('coolant_specific_heat', default=3801, desc='Coolant specific heat in J/kg/K')
self.options.declare('fluid_k', default=0.405, desc='Thermal conductivity of the fluid in W / mK')
self.options.declare('nusselt', default=7.54, desc='Hydraulic diameter Nusselt number')
self.options.declare('cell_kr', default=0.3) # 0.455 for an 18650 cell, knocked down a bit
self.options.declare('cell_diameter', default=0.021)
self.options.declare('cell_height', default=0.070)
self.options.declare('cell_mass', default=0.070)
self.options.declare('cell_specific_heat', default=875.)
self.options.declare('battery_weight_fraction', default=0.65)
def setup(self):
nn = self.options['num_nodes']
self.add_input('q_in', shape=(nn,), units='W', val=0.0)
self.add_input('T_in', shape=(nn,), units='K', val=300.)
self.add_input('T_battery', shape=(nn,), units='K', val=300.)
self.add_input('mdot_coolant', shape=(nn,), units='kg/s', val=0.20)
self.add_input('battery_weight', units='kg', val=478.)
self.add_input('n_cpb', units=None, val=82.)
self.add_input('t_channel', units='m', val=0.0005)
self.add_output('dTdt', shape=(nn,), units='K/s', tags=['integrate', 'state_name:T_battery', 'state_units:K', 'state_val:300.0', 'state_promotes:True'])
self.add_output('T_surface', shape=(nn,), units='K', lower=1e-10)
self.add_output('T_core', shape=(nn,), units='K', lower=1e-10)
self.add_output('q', shape=(nn,), units='W')
self.add_output('T_out', shape=(nn,), units='K', val=300, lower=1e-10)
self.declare_partials(['*'], ['*'], method='cs')
def compute(self, inputs, outputs):
nn = self.options['num_nodes']
n_cells = inputs['battery_weight'] * self.options['battery_weight_fraction'] / self.options['cell_mass']
n_bandoliers = n_cells / inputs['n_cpb'] / 2
mdot_b = inputs['mdot_coolant'] / n_bandoliers
q_cell = inputs['q_in'] / n_cells
hconv = self.options['nusselt'] * self.options['fluid_k'] / 2 / inputs['t_channel']
Hc = self.options['cell_height']
Dc = self.options['cell_diameter']
mc = self.options['cell_mass']
krc = self.options['cell_kr']
cpc = self.options['cell_specific_heat']
L_bandolier = inputs['n_cpb'] * Dc
cpf = self.options['coolant_specific_heat'] # of the coolant
A_heat_trans = Hc * L_bandolier * 2 # two sides of the tape
NTU = hconv * A_heat_trans / mdot_b / cpf
Kcell = mdot_b * cpf * (1 - np.exp(-NTU)) / 2 / inputs['n_cpb'] # divide out the total bandolier convection by 2 * n_cpb cells
# the convective heat transfer is (Ts - Tin) * Kcell
PI = np.pi
Tbar = inputs['T_battery']
Rc = Dc / 2
K_cyl = 8*np.pi*Hc*krc
Ts = (K_cyl * Tbar + Kcell * inputs['T_in']) / (K_cyl + Kcell)
outputs['T_surface'] = Ts
q_conv = (Ts - inputs['T_in']) * Kcell * n_cells
outputs['dTdt'] = (q_cell - (Ts - inputs['T_in']) * Kcell) / mc / cpc # todo check that this quantity matches convection
outputs['q'] = q_conv
qcheck = (Tbar - Ts) * K_cyl
# UAcomb = 1/(1/hconv/A_heat_trans+1/K_cyl/2/inputs['n_cpb'])
# qcheck2 = (Tbar - inputs['T_in']) * mdot_b * cpf * (1 - np.exp(-UAcomb/mdot_b/cpf)) / 2 / inputs['n_cpb']
# if np.sum(np.abs(qcheck - outputs['q']/n_cells)) > 1e-5:
# # the heat flux across the cell is not equal to the heat flux due to convection
# raise ValueError('The surface temperature solution appears to be wrong')
outputs['T_out'] = inputs['T_in'] + outputs['q'] / inputs['mdot_coolant'] / cpf
outputs['T_core'] = (Tbar - Ts) + Tbar
class LiquidCooledBattery(om.Group):
"""A battery with liquid cooling
Inputs
------
q_in : float
Heat produced by the operating component (vector, W)
mdot_coolant : float
Coolant mass flow rate (vector, kg/s)
T_in : float
Instantaneous coolant inflow temperature (vector, K)
battery_weight : float
Battery weight (scalar, kg)
n_cpb : float
Number of cells long per "bandolier" actual count is 2x (scalar, default 82, Tesla)
t_channel : float
Thickness (width) of the cooling channel in the bandolier
(scalar, default 1mm)
T_initial : float
Initial temperature of the battery (only required in thermal mass mode) (scalar, K)
duration : float
Duration of mission segment, only required in unsteady mode
Outputs
-------
T_out : float
Instantaneous coolant outlet temperature (vector, K)
T: float
Battery volume averaged temperature (vector, K)
T_core : float
Battery core temperature (vector, K)
T_surface : float
Battery surface temperature (vector, K)
Options
-------
num_nodes : int
Number of analysis points to run
quasi_steady : bool
Whether or not to treat the component as having thermal mass
num_nodes : float
The number of analysis points to run
coolant_specific_heat : float
Specific heat of the coolant (J/kg/K) (default 3801, glycol/water)
fluid_k : float
Thermal conductivity of the coolant (W/m/K)
nusselt : float
Nusselt number of the coolant channel (default 7.54 for uniform surf temp)
cell_kr : float
Thermal conductivity of the cell in the radial direction (W/m/k)
cell_diameter : float
Battery diameter (default 21mm for 2170 cell)
cell_height : float
Battery height (default 70mm for 2170 cell)
cell_mass : float
Battery weight (default 70g for 2170 cell)
cell_specific_heat : float
Mass average specific heat of the battery (default 900, LiIon cylindrical cell)
battery_weight_fraction : float
Fraction of battery by weight that is cells (default 0.72 knocks down Tesla by a bit)
"""
def initialize(self):
self.options.declare('quasi_steady', default=False, desc='Treat the component as quasi-steady or with thermal mass')
self.options.declare('num_nodes', default=1, desc='Number of quasi-steady points to runs')
self.options.declare('coolant_specific_heat', default=3801, desc='Coolant specific heat in J/kg/K')
self.options.declare('fluid_k', default=0.405, desc='Thermal conductivity of the fluid in W / mK')
self.options.declare('nusselt', default=7.54, desc='Hydraulic diameter Nusselt number')
self.options.declare('cell_kr', default=0.3) # 0.455 for an 18650 cell, knocked down a bit
self.options.declare('cell_diameter', default=0.021)
self.options.declare('cell_height', default=0.070)
self.options.declare('cell_mass', default=0.070)
self.options.declare('cell_specific_heat', default=875.)
self.options.declare('battery_weight_fraction', default=0.65)
def setup(self):
nn = self.options['num_nodes']
quasi_steady = self.options['quasi_steady']
self.add_subsystem('hex', BandolierCoolingSystem(num_nodes=nn,
coolant_specific_heat=self.options['coolant_specific_heat'],
fluid_k=self.options['fluid_k'],
nusselt=self.options['nusselt'],
cell_kr=self.options['cell_kr'],
cell_diameter=self.options['cell_diameter'],
cell_height=self.options['cell_height'],
cell_mass=self.options['cell_mass'],
cell_specific_heat=self.options['cell_specific_heat'],
battery_weight_fraction=self.options['battery_weight_fraction']),
promotes_inputs=['q_in', 'mdot_coolant', 'T_in', ('T_battery', 'T'), 'battery_weight', 'n_cpb', 't_channel'],
promotes_outputs=['T_core', 'T_surface', 'T_out', 'dTdt'])
if not quasi_steady:
ode_integ = self.add_subsystem('ode_integ', Integrator(num_nodes=nn, diff_units='s', method='simpson', time_setup='duration'),
promotes_outputs=['*'], promotes_inputs=['*'])
ode_integ.add_integrand('T', rate_name='dTdt', units='K', lower=1e-10)
else:
self.add_subsystem('thermal_bal',
om.BalanceComp('T', eq_units='K/s', lhs_name='dTdt', rhs_val=0.0, units='K', lower=1.0, val=299.*np.ones((nn,))),
promotes_inputs=['dTdt'],
promotes_outputs=['T'])
class MotorCoolingJacket(om.ExplicitComponent):
"""
Computes motor winding temperature assuming
well-designed, high-power-density aerospace motor.
This component is based on the following assumptions:
- 2020 technology level
- 200kW-1MW class inrunner PM motor
- Liquid cooling of the stators
- "Reasonable" coolant flow rates (component will validate this)
- Thermal performance similiar to the Siemens SP200D motor
The component assumes a constant heat transfer coefficient based
on the surface area of the motor casing (not counting front and rear faces)
The MagniX Magni 250/500 and Siemens SP200D motors were measured
using rough photogrammetry.
Magni250: 280kW rated power, ~0.559m OD, 0.2m case "depth" (along thrust axis)
Magni500: 560kW rated power, ~0.652m OD, 0.4m case "depth"
Siemens SP200D: 200kW rated power, ~0.63m OD, ~0.16 case "depth"
Based on these dimensions I assume 650kW per square meter
of casing surface area. This includes only the cylindrical portion,
not the front and rear motor faces.
Using a thermal FEM image of the SP200D, I estimate
a temperature rise of 23K from coolant inlet temperature (~85C)
to winding max temp (~108C) at the steady state operating point.
With 95% efficiency at 200kW, this is about 1373 W / m^2 casing area / K.
We'll reduce that somewhat since this is a direct oil cooling system,
and assume 1100 W/m^2/K instead.
Dividing 1.1 kW/m^2/K by 650kWrated/m^2 gives: 1.69e-3 kW / kWrated / K
At full rated power and 95% efficiency, this is 29.5C steady state temp rise
which the right order of magnitude.
Inputs
------
q_in : float
Heat production rate in the motor (vector, W)
T_in : float
Coolant inlet temperature (vector, K)
T : float
Temperature of the motor windings (vector, K)
mdot_coolant : float
Mass flow rate of the coolant (vector, kg/s)
power_rating : float
Rated steady state power of the motor (scalar, W)
motor_weight : float
Weight of electric motor (scalar, kg)
Outputs
-------
dTdt : float
Time derivative dT/dt (vector, K/s)
q : float
Heat transfer rate from the motor to the fluid (vector, W)
T_out : float
Outlet fluid temperature (vector, K)
Options
-------
num_nodes : float
The number of analysis points to run
coolant_specific_heat : float
Specific heat of the coolant (J/kg/K) (default 3801, glycol/water)
case_cooling_coefficient : float
Watts of heat transfer per square meter of case surface area per K
temperature differential (default 1100 W/m^2/K)
case_area_coefficient : float
rated motor power per square meter of case surface area
(default 650,000 W / m^2)
motor_specific_heat : float
Specific heat of the motor casing (J/kg/K) (default 921, alu)
"""
def initialize(self):
self.options.declare('num_nodes', default=1, desc='Number of analysis points')
self.options.declare('coolant_specific_heat', default=3801, desc='Specific heat in J/kg/K')
self.options.declare('case_cooling_coefficient', default=1100.)
self.options.declare('case_area_coefficient', default=650000.)
self.options.declare('motor_specific_heat', default=921, desc='Specific heat in J/kg/K - default 921 for aluminum')
def setup(self):
nn = self.options['num_nodes']
arange = np.arange(nn)
self.add_input('q_in', shape=(nn,), units='W', val=0.0)
self.add_input('T_in', shape=(nn,), units='K', val=330)
self.add_input('T', shape=(nn,), units='K', val=359.546)
self.add_input('mdot_coolant', shape=(nn,), units='kg/s', val=1.0)
self.add_input('power_rating', units='W', val=2e5)
self.add_input('motor_weight', units='kg', val=100)
self.add_output('q', shape=(nn,), units='W')
self.add_output('T_out', shape=(nn,), units='K', val=300, lower=1e-10)
self.add_output('dTdt', shape=(nn,), units='K/s', tags=['integrate', 'state_name:T_motor', 'state_units:K', 'state_val:300.0', 'state_promotes:True'])
self.declare_partials(['T_out','q','dTdt'], ['power_rating'], rows=arange, cols=np.zeros((nn,)))
self.declare_partials(['dTdt'], ['motor_weight'], rows=arange, cols=np.zeros((nn,)))
self.declare_partials(['T_out','q','dTdt'], ['T_in', 'T','mdot_coolant'], rows=arange, cols=arange)
self.declare_partials(['dTdt'], ['q_in'], rows=arange, cols=arange)
def compute(self, inputs, outputs):
const = self.options['case_cooling_coefficient'] / self.options['case_area_coefficient']
NTU = const * inputs['power_rating'] / inputs['mdot_coolant'] / self.options['coolant_specific_heat']
effectiveness = 1 - np.exp(-NTU)
heat_transfer = (inputs['T'] - inputs['T_in']) * effectiveness * inputs['mdot_coolant'] * self.options['coolant_specific_heat']
outputs['q'] = heat_transfer
outputs['T_out'] = inputs['T_in'] + heat_transfer / inputs['mdot_coolant'] / self.options['coolant_specific_heat']
outputs['dTdt'] = (inputs['q_in'] - outputs['q']) / inputs['motor_weight'] / self.options['motor_specific_heat']
def compute_partials(self, inputs, J):
nn = self.options['num_nodes']
cp = self.options['coolant_specific_heat']
mdot = inputs['mdot_coolant']
const = self.options['case_cooling_coefficient'] / self.options['case_area_coefficient']
NTU = const * inputs['power_rating'] / mdot / cp
dNTU_dP = const / mdot / cp
dNTU_dmdot = -const * inputs['power_rating'] / mdot **2 / cp
effectiveness = 1 - np.exp(-NTU)
deff_dP = np.exp(-NTU) * dNTU_dP
deff_dmdot = np.exp(-NTU) * dNTU_dmdot
heat_transfer = (inputs['T'] - inputs['T_in']) * effectiveness * inputs['mdot_coolant'] * self.options['coolant_specific_heat']
J['q', 'T'] = effectiveness * mdot * cp
J['q', 'T_in'] = - effectiveness * mdot * cp
J['q', 'power_rating'] = (inputs['T'] - inputs['T_in']) * deff_dP * mdot * cp
J['q', 'mdot_coolant'] = (inputs['T'] - inputs['T_in']) * cp * (effectiveness + deff_dmdot * mdot)
J['T_out', 'T'] = J['q','T'] / mdot / cp
J['T_out', 'T_in'] = np.ones(nn) + J['q','T_in'] / mdot / cp
J['T_out', 'power_rating'] = J['q', 'power_rating'] / mdot / cp
J['T_out', 'mdot_coolant'] = (J['q', 'mdot_coolant'] * mdot - heat_transfer) / cp / mdot ** 2
J['dTdt', 'q_in'] = 1 / inputs['motor_weight'] / self.options['motor_specific_heat']
J['dTdt', 'T'] = -J['q', 'T'] / inputs['motor_weight'] / self.options['motor_specific_heat']
J['dTdt', 'T_in'] = -J['q', 'T_in'] / inputs['motor_weight'] / self.options['motor_specific_heat']
J['dTdt', 'power_rating'] = -J['q', 'power_rating'] / inputs['motor_weight'] / self.options['motor_specific_heat']
J['dTdt', 'mdot_coolant'] = -J['q', 'mdot_coolant'] / inputs['motor_weight'] / self.options['motor_specific_heat']
J['dTdt', 'motor_weight'] = -(inputs['q_in'] - heat_transfer) / inputs['motor_weight']**2 / self.options['motor_specific_heat']
class LiquidCooledMotor(om.Group):
"""A component (heat producing) with thermal mass
cooled by a cold plate.
Inputs
------
q_in : float
Heat produced by the operating component (vector, W)
mdot_coolant : float
Coolant mass flow rate (vector, kg/s)
T_in : float
Instantaneous coolant inflow temperature (vector, K)
motor_weight : float
Object mass (only required in thermal mass mode) (scalar, kg)
T_initial : float
Initial temperature of the cold plate (only required in thermal mass mode) / object (scalar, K)
duration : float
Duration of mission segment, only required in unsteady mode
power_rating : float
Rated power of the motor (scalar, kW)
Outputs
-------
T_out : float
Instantaneous coolant outlet temperature (vector, K)
T: float
Windings temperature (vector, K)
Options
-------
motor_specific_heat : float
Specific heat capacity of the object in J / kg / K (default 921 = aluminum)
coolant_specific_heat : float
Specific heat capacity of the coolant in J / kg / K (default 3801, glycol/water)
num_nodes : int
Number of analysis points to run
quasi_steady : bool
Whether or not to treat the component as having thermal mass
case_cooling_coefficient : float
Watts of heat transfer per square meter of case surface area per K
temperature differential (default 1100 W/m^2/K)
"""
def initialize(self):
self.options.declare('motor_specific_heat', default=921.0, desc='Specific heat in J/kg/K')
self.options.declare('coolant_specific_heat', default=3801, desc='Specific heat in J/kg/K')
self.options.declare('quasi_steady', default=False, desc='Treat the component as quasi-steady or with thermal mass')
self.options.declare('num_nodes', default=1, desc='Number of quasi-steady points to runs')
self.options.declare('case_cooling_coefficient', default=1100.)
def setup(self):
nn = self.options['num_nodes']
quasi_steady = self.options['quasi_steady']
self.add_subsystem('hex',
MotorCoolingJacket(num_nodes=nn, coolant_specific_heat=self.options['coolant_specific_heat'],
motor_specific_heat=self.options['motor_specific_heat'],
case_cooling_coefficient=self.options['case_cooling_coefficient']),
promotes_inputs=['q_in','T_in', 'T','power_rating','mdot_coolant','motor_weight'],
promotes_outputs=['T_out', 'dTdt'])
if not quasi_steady:
ode_integ = self.add_subsystem('ode_integ', Integrator(num_nodes=nn, diff_units='s', method='simpson', time_setup='duration'),
promotes_outputs=['*'], promotes_inputs=['*'])
ode_integ.add_integrand('T', rate_name='dTdt', units='K', lower=1e-10)
else:
self.add_subsystem('thermal_bal',
om.BalanceComp('T', eq_units='K/s', lhs_name='dTdt', rhs_val=0.0, units='K', lower=1.0, val=299.*np.ones((nn,))),
promotes_inputs=['dTdt'],
promotes_outputs=['T'])
class SimplePump(om.ExplicitComponent):
"""
A pump that circulates coolant against pressure.
The default parameters are based on a survey of commercial
airplane fuel pumps of a variety of makes and models.
Inputs
------
power_rating : float
Maximum rated electrical power (scalar, W)
mdot_coolant : float
Coolant mass flow rate (vector, kg/s)
rho_coolant : float
Coolant density (vector, kg/m3)
delta_p : float
Pressure rise provided by the pump (vector, kg/s)
Outputs
-------
elec_load : float
Electricity used by the pump (vector, W)
component_weight : float
Pump weight (scalar, kg)
component_sizing_margin : float
Fraction of total power rating used via elec_load (vector, dimensionless)
Options
-------
num_nodes : int
Number of analysis points to run (sets vec length; default 1)
efficiency : float
Pump electrical + mech efficiency. Sensible range 0.0 to 1.0 (default 0.35)
weight_base : float
Base weight of pump, doesn't change with power rating (default 0)
weight_inc : float
Incremental weight of pump, scales linearly with power rating (default 1/450 kg/W)
"""
def initialize(self):
self.options.declare('num_nodes', default=1, desc='Number of flight/control conditions')
self.options.declare('efficiency', default=0.35, desc='Efficiency (dimensionless)')
self.options.declare('weight_base', default=0.0, desc='Pump base weight')
self.options.declare('weight_inc', default=1/450, desc='Incremental pump weight (kg/W)')
def setup(self):
nn = self.options['num_nodes']
eta = self.options['efficiency']
weight_inc = self.options['weight_inc']
self.add_input('power_rating', units='W', desc='Pump electrical power rating')
self.add_input('mdot_coolant', units='kg/s', desc='Coolant mass flow rate', val=np.ones((nn,)))
self.add_input('delta_p', units='Pa', desc='Pump pressure rise', val=np.ones((nn,)))
self.add_input('rho_coolant', units='kg/m**3', desc='Coolant density', val= | np.ones((nn,)) | numpy.ones |
import time
import scipy
import matplotlib.pyplot as plt
import numpy as np
import warnings
import matplotlib
import strax
import straxen
import pickle
from numba import njit
from sklearn.cluster import KMeans
from mpl_toolkits.mplot3d import Axes3D
from scipy.stats import sem
from IPython.display import clear_output
font = {'size':17}
matplotlib.rc('font',**font)
warnings.filterwarnings("ignore")
plt.rcParams['figure.figsize'] = 10, 10
inch = 2.54 # cm
pmt_surface=(3*inch)**2*np.pi/4.
pmt_to_lxe = 7.0
npmt_top_array = 253#494
global pmt_pos
pmt_pos = straxen.pmt_positions()
pmt_pos = list(zip(pmt_pos['x'].values,pmt_pos['y'].values,np.repeat(pmt_to_lxe, npmt_top_array)))
pmt_pos = np.asarray(pmt_pos)
#
# <NAME>, Spring and Summer of 2021
#
def clustering_for_k(k, list_pmt, list_hits):
"""
Cluster all the data in k clusters given the positions and hits on each pmt
:param k: amount of clusters in the dataset
:param pmt_pos: position of the photo-multiplier tubes
:param hits: list of number of hits on each pmt
:return kmeans: the clustered data
"""
#clustering according to scipy kmeans, added is the weight of each hit
kmeans = KMeans(n_clusters=k, random_state=0).fit(list_pmt, sample_weight = list_hits)
return kmeans
@njit
def loop_clusters(k, list_pmt, list_hits, labels, cluster_points):
"""
Calculate the within cluster distance metric
:param k: amount of clusters in the dataset
:param list_pmt: position of the photo-multiplier tubes
:param list_hits: list of number of hits on each pmt
:param labels: the labels on which pmt belongs to which cluster
:cluster_points: list to loop over all points in one cluster per loop
:return W: the within-dispersion measures for specific k
"""
W = 0
D = np.zeros(k)
points_in_cluster = np.zeros(k)
#loop through all the k clusters to determine the weighted "D"istance of each cluster
for i in range(0,k):
p_counter = 0
for j in cluster_points[i]:
for m in cluster_points[i]:
#the squared euclidian distance times the weights of each pmt
if list_hits[j] != 0 and list_hits[m] != 0:
D[i] += (list_hits[j]*list_hits[m])*((list_pmt[j][0]-list_pmt[m][0])**2 +(list_pmt[j][1]-list_pmt[m][1])**2)
p_counter += list_hits[j]
points_in_cluster[i] = p_counter
#loop through all the clusters to determine the "W"ithin cluster distance
for i in range(len(points_in_cluster)):
if points_in_cluster[i] != 0:
W += (1/(2 * points_in_cluster[i]))*D[i]
return W
def within_cluster_distance(k, hits):
"""
Initialise and calculates the within-cluster distancs for specific k
:param k: amount of clusters in the dataset
:param pmt_pos: position of the photo-multiplier tubes
:param hits: list of number of hits on each pmt
:return W: the within-cluster-distance metric for specific k
"""
#remove from the pmt- and the hit-list all events where hits = 0
list_pmt = pmt_pos[hits != 0]
list_hits = hits[hits != 0]
#𝅘𝅥𝅮 cluster time 𝅘𝅥𝅮
kmeans = (clustering_for_k(k, list_pmt, list_hits))
labels = kmeans.labels_
cluster_points = []
for i in range(0,k):
#for each cluster appends an array of point-indices that belong to that specific cluster
cluster_points.append(np.asarray(np.where(np.asarray(labels) == i))[0])
W = loop_clusters(k, list_pmt, list_hits, labels, cluster_points)
return W
def mini_cluster(hits):
"""
Calculate the optimal number of clusters for very small amount of pmt's with hits
:param pmt_pos: position of the photo-multiplier tubes
:param hits: number of hits on each pmt
:return k: the optimal number of clusters for a small dataset
"""
#remove from the pmt- and the hit-list all events where hits = 0
list_pmt = pmt_pos[hits != 0]
list_hits = hits[hits != 0]
#sort both lists on highest hit count
list_hits, list_pmt = zip(*sorted(zip(-np.asarray(list_hits), -np.asarray(list_pmt))))
list_hits = -np.asarray(list_hits)
list_pmt = -np.asarray(list_pmt)
#create a list of each nonzero hit
cluster_list = np.asarray(list(range(0,len(list_hits))))
i = 0
while i < len(cluster_list):
j = cluster_list[i]
#delete from the list each hit that lies within 27.5mm euclidian distance
for k in (cluster_list):
distance = ((list_pmt[j][0] - list_pmt[k][0])**2 + (list_pmt[j][1] - list_pmt[k][1])**2)**0.5
if distance < 27.5 and k != j:
cluster_list = np.delete(cluster_list, np.where(cluster_list == k)[0][0])
i += 1
return len(cluster_list)
def new_cluster_execute(hits, switch_param = 3, linehit = 0.2680932125849651):
"""
Calculate the most optimal amount of clusters in the data
:param pmt_pos: position of the photo-multiplier tubes
:param hits: number of hits on each pmt
:param switch_param: the number of nonzero hits at which the method changes to the mini-cluster method
:return optimum: most optimal amount of clusters
"""
#for events with three or less nonzero hits, the distance between both points becomes a required quantity
if len(np.where(np.asarray(hits))[0]) <= switch_param:
optimum = mini_cluster(hits)
return optimum
else:
wk1 = within_cluster_distance(1, hits)
wk2 = within_cluster_distance(2, hits)
ratio = (wk1-wk2)/(wk1+wk2)
#0.2680932125849651 is 97.5% of 2% treshold single scatters
if ratio > linehit:
return 2
return 1
def cluster_plot(optimum, hits):
"""
Plot the data into calculated clusters
:param optimum: the amount of clusters in the data
:param pmt_pos: position of the photo-multiplier tubes
:param hits: number of hits on each pmt
"""
try:
colors = {0: "red", 1: "green"}
#remove from the pmt- and the hit-list all events where hits = 0
list_pmt = pmt_pos[hits != 0]
list_hits = hits[hits != 0]
#𝅘𝅥𝅮 cluster time 𝅘𝅥𝅮
kmeans = (clustering_for_k(optimum, list_pmt, list_hits))
fig = plt.gcf()
ax = fig.gca()
#for each point in each cluster, plot a circle. the radius of the circle is related to the weight of the hit
for i in range(optimum):
cluster_points = np.asarray(np.where(np.asarray(kmeans.labels_) == i))[0]
for j in cluster_points:
circle = plt.Circle((list_pmt[j][0], list_pmt[j][1]), np.sqrt(list_hits[j])/10, color=colors[i], fill=False)
ax.add_patch(circle)
plt.scatter(pmt_pos[:, 0], pmt_pos[:, 1], s=1)
plt.xlim(-100,100)
plt.xlabel("x(mm)")
plt.ylabel("y(mm)")
plt.ylim(-100,100)
except TypeError:
print("No optimum number of clusters was found, so no plot can be made")
return
#
#as per request, my thesis figures into functions that are easy callable
#
def show_true_dist(area_per_channel, true_pos, iterations, percentage = 0.02, switch_param = 2):
"""
Plot the distribution of the true distance
:param area_per_channel: array of datasets
:param true_pos: true positions of array of datasets
:param iterations: number of hits in distribution
:param percentage: background threshold percentage
:param switch_param: maximum number of pmts with hits at which the algorithm switches to a simple cluster finding algorithm
"""
hist_list_one = []
hist_list_two = []
for ip in range(iterations):
hits1 = np.array(area_per_channel[ip][:253])
hits2 = np.array(area_per_channel[ip+1000][:253])
hits = hits1 + hits2
hits_below_thresh = hits < (max(hits) * percentage)
hits[hits_below_thresh] = 0
#cutoff-point is at 97.5% of single scatters with background trehshold at 0.025%
optimum = new_cluster_execute(hits, switch_param, 0.27359306097095776)
true_dist = ((true_pos[ip][0]-true_pos[1000+ip][0])**2 + (true_pos[ip][1]-true_pos[1000+ip][1])**2)**0.5
if optimum == 1:
hist_list_one.append(true_dist)
if optimum == 2:
hist_list_two.append(true_dist)
font = {'size':33}
matplotlib.rc('font',**font)
plt.rcParams['figure.figsize'] = 12, 12
plt.hist(hist_list_two, bins=75, range=(0,150), label='Good identification', fill=True, color = 'green', histtype='step', alpha=0.4)
plt.hist(hist_list_two, bins=75, range=(0,150), fill=False, histtype='step', color='black', linewidth = 1)
plt.hist(hist_list_one, bins=75, range=(0,150), label='Misidentification', fill=True, color = 'red', histtype='step', alpha = 0.4, edgecolor = 'black', linewidth = 1)
# plt.hist(hist_list_one, bins=75, range=(0,150), fill=False, histtype='step', color='black', linestyle = (0,(1,4)), linewidth = 3)
plt.hist(hist_list_one, bins=75, range=(0,150), fill=False, histtype='step', color='black', linewidth = 1)
plt.xlabel("$\mathcal{D}$(mm)")
plt.ylabel("#Entries/2mm")
plt.legend(fontsize = 25)
plt.show()
def data_cluster_execute(area_per_channel, single_double, iterations, display, percentage = 0.02, switch_param = 3):
"""
Calculate the optimal number of clusters in the data
:param area_per_channel: array of datasets
:param single_double: input is 'single' or 'double' hit
:param iterations: number of hits in distribution
:param percentage: background threshold percentage
:param switch_param: maximum number of pmts with hits at which the algorithm switches to a simple cluster finding algorithm
"""
for ip in range(iterations):
if single_double == 'single':
hits = np.array(area_per_channel[ip][:253])
hits_below_thresh = hits < (max(hits) * percentage)
hits[hits_below_thresh] = 0
elif single_double == 'double':
hits1 = np.array(area_per_channel[ip][:253])
hits2 = np.array(area_per_channel[ip+1][:253])
hits = hits1 + hits2
hits_below_thresh = hits < (max(hits) * percentage)
hits[hits_below_thresh] = 0
optimum = new_cluster_execute(hits, switch_param)
if display:
cluster_plot(optimum, hits)
plt.show()
try:
istat = int(input("Type: 0 to continue, 1 to quit...."))
except ValueError:
print('The possible inputs are 0, 1 and 2.')
break
if istat == 1:
break
clear_output()
return
def eta_hist(area_per_channel, iterations, percentage = 0.02, switch_param = 2):
"""
Plot the distribution of eta
:param area_per_channel: array of datasets
:param iterations: number of hits in distribution
:param percentage: background threshold percentage
:param switch_param: maximum number of pmts with hits at which the algorithm switches to a simple cluster finding algorithm
"""
hist_list = []
hist_list2 = []
for ip in range(iterations):
hits1 = np.array(area_per_channel[ip][:253])
hits2 = np.array(area_per_channel[ip+1000][:253])
hits = hits1 + hits2
hits_below_thresh = hits < (max(hits) * percentage)
hits[hits_below_thresh] = 0
v1=within_cluster_distance(1, hits)
v2=within_cluster_distance(2, hits)
ratio = (v1-v2)/(v1+v2)
hist_list.append(ratio)
for ip in range(iterations):
hits1 = np.array(area_per_channel[ip][:253])
hits = hits1
hits_below_thresh = hits < (max(hits) * percentage)
hits[hits_below_thresh] = 0
v1=within_cluster_distance(1, hits)
v2=within_cluster_distance(2, hits)
ratio = (v1-v2)/(v1+v2)
hist_list2.append(ratio)
line_hit = np.percentile(hist_list2, 97.5)
ax = plt.gca()
xticks = ax.xaxis.get_major_ticks()
xticks[0].label1.set_visible(False)
plt.axvline(x=line_hit, color = 'k', linewidth=1.5, linestyle='dashed')
plt.hist(hist_list2, bins=100, range=(0,1), label='Single Scatter', fill=True, color = 'yellow', histtype='step', alpha = 0.45, ec = 'black')
# plt.hist(hist_list2, bins=100, range=(0,1), fill=False, histtype='step', color='black', linestyle = (0,(1,4)), linewidth = 3)
plt.hist(hist_list2, bins=100, range=(0,1), fill=False, histtype='step', color='black', linewidth = 1)
plt.hist(hist_list, bins=100, range=(0,1), label='Double Scatter', fill=True, color = 'gray', histtype='step', alpha=0.35)
plt.hist(hist_list, bins=100, range=(0,1), fill=False, histtype='step', color='black')
plt.xlabel("$\\eta$" , size = 35)
plt.ylabel("#Entries/0.01", size = 35)
plt.legend(fontsize = 25, edgecolor = 'black', frameon=False)
maxy = 3000
plt.arrow(line_hit + 0.01, maxy, 0, -260, width = 0.0001)
plt.annotate("", xy=(line_hit+0.08, maxy - 262), xytext=(line_hit+0.0075, maxy - 262), arrowprops=dict(arrowstyle="->", linewidth=1))
plt.annotate("Do-S", xy=(line_hit+0.08, maxy - 260), xytext=(line_hit+0.015, maxy - 220), size=25)
plt.arrow(line_hit - 0.01, maxy, 0, -179, width = 0.0001)
plt.annotate("", xy=(line_hit-0.007, maxy-180), xytext=(line_hit-0.08, maxy - 180), arrowprops=dict(arrowstyle="<-", linewidth=1))
plt.annotate("Si-S", xy=(line_hit-0.02, maxy-150), xytext=(line_hit-0.10, maxy-140), size=25)
plt.ylim(0,maxy)
plt.xlim(0,1)
plt.show()
def wk1_wk2_plot(area_per_channel, percentage = 0.02, switch_param = 3):
"""
Plot wk1 and wk2 into a single plot
:param area_per_channel: array of datasets
:param percentage: background threshold percentage
:param switch_param: maximum number of pmts with hits at which the algorithm switches to a simple cluster finding algorithm
"""
ip = 133
hits1 = np.array(area_per_channel[ip][:253])
hits2 = np.array(area_per_channel[ip+6][:253])
hits = hits1 + hits2
hits_below_thresh = hits < (max(hits) * percentage)
hits[hits_below_thresh] = 0
l2 = []
x2 = []
for i in range(1,6):
l2.append(np.log(within_cluster_distance(i, hits)))
x2.append(i)
hits = np.array(area_per_channel[ip][:253])
hits_below_thresh = hits < (max(hits) * percentage)
hits[hits_below_thresh] = 0
l = []
x = []
for i in range(1,6):
l.append(np.log(within_cluster_distance(i, hits))+2.99)
x.append(i)
plt.grid()
plt.plot(x2, l2, linewidth = 4, label='Double Cluster')
plt.scatter(x2, l2, s = 120)
plt.plot(x, l, linewidth = 4, label='Single Cluster')
plt.scatter(x, l, s = 120)
plt.xlabel('Amount of clusters $k$')
plt.ylabel("Log($W_k$)")
plt.legend()
plt.show()
def jusitification_classification(area_per_channel, iterations, percentage = 0.02,switch_param = 2):
"""
Plot the justification of the classification line
:param area_per_channel: array of datasets
:param iterations: number of hits in distribution
:param percentage: background threshold percentage
:param switch_param: maximum number of pmts with hits at which the algorithm switches to a simple cluster finding algorithm
"""
hist_list = []
hist_list2 = []
cuteff = []
cutmiss = []
label_list=[]
line_hit = [0.0, 0.06666666666666667, 0.13333333333333333, 0.2, 0.2222222, 0.2444444, 0.26666666666666666, 0.30, 0.3333333333333333, 0.4, 0.4666666666666667, 0.5333333333333333, 0.6, 0.6666666666666666, 0.7333333333333333, 0.8, 0.8666666666666667, 0.9333333333333333, 1.0]
for ip in range(iterations):
hits1 = np.array(area_per_channel[ip][:253])
hits2 = np.array(area_per_channel[ip+1000][:253])
hits = hits1 + hits2
hits_below_thresh = hits < (max(hits) * percentage)
hits[hits_below_thresh] = 0
v1=within_cluster_distance(1, hits)
v2=within_cluster_distance(2, hits)
ratio = (v1-v2)/(v1+v2)
hist_list.append(ratio)
for ip in range(iterations):
hits1 = np.array(area_per_channel[ip][:253])
hits = hits1
hits_below_thresh = hits < (max(hits) * percentage)
hits[hits_below_thresh] = 0
v1=within_cluster_distance(1, hits)
v2=within_cluster_distance(2, hits)
ratio = (v1-v2)/(v1+v2)
hist_list2.append(ratio)
for i in range(len(line_hit)):
cuteff.append(len(np.nonzero(np.asarray(hist_list2) < line_hit[i])[0]) / len(hist_list2))
cutmiss.append(len(np.nonzero(np.asarray(hist_list) < line_hit[i])[0]) / len(hist_list))
label_list.append(line_hit[i])
fig, ax = plt.subplots()
for i, txt in enumerate(label_list):
if i > 8 and i < len(cuteff)-2:
ax.annotate(np.round(txt,2), (cuteff[i]+0.01, cutmiss[i]), fontsize = 20)
if i > len(cuteff)-2:
ax.annotate(np.round(txt,2), (cuteff[i]+0.01, cutmiss[i]+0.01), fontsize = 20)
if i < 4 and i > 2:
ax.annotate(np.round(txt,2), (cuteff[i]-0.01, cutmiss[i]-0.037), fontsize = 20)
if i < 1:
ax.annotate(np.round(txt,2), (cuteff[i]-0.009, cutmiss[i]-0.035), fontsize = 20)
if i == 8:
ax.annotate(np.round(txt,2), (cuteff[i]+0.014, cutmiss[i]-0.003), fontsize = 20)
if i == 7:
ax.annotate(np.round(txt,2), (cuteff[i]+0.019, cutmiss[i]-0.008), fontsize = 20)
if i == 6:
ax.annotate(np.round(txt,2), (cuteff[i]+0.027, cutmiss[i]-0.011), fontsize = 20)
if i == 5:
ax.annotate(np.round(txt,2), (cuteff[i]-0.017, cutmiss[i]-0.04), fontsize = 20)
if i == 4:
ax.annotate(np.round(txt,2), (cuteff[i]+0.01, cutmiss[i]-0.026), fontsize = 20)
c_eff = list(np.asarray(cuteff))
c_miss = list(np.asarray(cutmiss))
del c_eff[-2]
del c_miss[-2]
ax.plot(single_eff, double_miss, c='k', linewidth = 2)
ax.scatter(c_eff, c_miss, c='k', s = 50)
plt.xlabel("Single Scatter Efficiency (%)" , size = 35)
plt.ylabel("Double Scatter Misidentification (%)" , size = 35)
plt.axhline(y=1, linestyle = 'dashed', c = 'k')
plt.axvline(x=1, linestyle = 'dashed', c = 'k')
plt.axvline(x=0.975, label='97.5%', c='b', linestyle='dashed')
plt.xlim(0.45,1.07)
plt.ylim(0,1.05)
plt.show()
def func(amp, eff):
"""
Makes sure the bin size of the single efficiency distribution is logarithmic
"""
bins = np.array([5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100, 150, 250, 350, 450, 550, 650, 750, 850, 950, 1050, 2000, 3000])
inds = np.digitize(amp, bins)
ind= [[] for _ in range(len(bins))]
pro= [[] for _ in range(len(bins))]
for i in range(len(inds)):
value = inds[i]
ind[value].append(amp[i])
pro[value].append(eff[i])
for i in range(len(ind)):
if len(ind[i]) == 0:
ind[i] = [0]
ind[i] = sum(ind[i])/len(ind[i])
if len(pro[i]) == 0:
pro[i] = [0]
pro[i] = sum(pro[i])/len(pro[i])
sorted_zeros = sorted(-np.argwhere(np.asarray(ind) == 0))
for i in range(len(sorted_zeros)):
del ind[-sorted_zeros[i][0]]
del pro[-sorted_zeros[i][0]]
return (ind, pro)
def single_cluster_eff(area_per_channel, mc_amplitude, loop_time, iterations, each_block, switch_param = 2):
"""
Plot the single scatter efficiency
Make sure that (iterations * each_block) = loop_time
:param area_per_channel: array of datasets
:param mc_amplitude: the monte carlo amplitude of the hits in the datasets
:param loop_time: the number of datasets used
:param iterations: the number of ticks on the x-axis
:param each_block: the number of datasets used per tick
:param switch_param: maximum number of pmts with hits at which the algorithm switches to a simple cluster finding algorithm
"""
optimum = [[],[],[]]
maximum_amplitude = []
for ip in range(loop_time):
percentage = 0
hits = np.array(area_per_channel[ip][:253])
hits_below_thresh = hits < (max(hits) * percentage)
hits[hits_below_thresh] = 0
optimum[0].append(new_cluster_execute(hits, switch_param, 0.39481508101094803))
percentage = 0.02
hits = np.array(area_per_channel[ip][:253])
hits_below_thresh = hits < (max(hits) * percentage)
hits[hits_below_thresh] = 0
optimum[1].append(new_cluster_execute(hits, switch_param, 0.2680932125849651))
percentage = 0.05
hits = np.array(area_per_channel[ip][:253])
hits_below_thresh = hits < (max(hits) * percentage)
hits[hits_below_thresh] = 0
optimum[2].append(new_cluster_execute(hits, switch_param, 0.30755463539466177))
maximum_amplitude.append(mc_amplitude[ip])
maximum_amplitude0, optimum[0] = zip(*sorted(zip( | np.asarray(maximum_amplitude) | numpy.asarray |
"""
Copyright 2018 Novartis Institutes for BioMedical Research Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import hnswlib
import importlib
import itertools
import numpy as np
import operator
import os
import sys
import warnings
from contextlib import contextmanager
from scipy.ndimage.interpolation import zoom
from scipy.stats import norm
from sklearn.neighbors import BallTree
from sklearn.preprocessing import MinMaxScaler
from typing import Callable, List
# Stupid Keras things is a smart way to always print. See:
# https://github.com/keras-team/keras/issues/1406
stderr = sys.stderr
sys.stderr = open(os.devnull, "w")
import keras
from keras.layers import Input
from keras.models import Model
sys.stderr = stderr
flatten = itertools.chain.from_iterable
def compare_lists(
a: List, b: List, conditionator: Callable = all, comparator: Callable = operator.eq
):
return conditionator(map(comparator, a, itertools.islice(a, 1, None)))
def unpredictability(p: np.ndarray) -> float:
"""Unpredictability score
Unpredictability is defined as the minimum deviation of the prediction probability
from `0.5` to `0` or `1`. For example, for a prediction probability of 0.6 the
unpredictability is 0.4. The highest unpredictability is 1 and the lowest is 0.
"""
return np.mean(np.abs(p - np.round(p))) * 2
def prediction_proba_change(p0: np.ndarray, p1: np.ndarray) -> float:
"""Unpredictability score
Total amount of change in the prediction probability
"""
return np.mean(np.abs(p0 - p1))
def prediction_change(p0: np.ndarray, p1: np.ndarray, border: float = 0.5) -> float:
"""Prediction change score
Prediction change is defined as the number of times the predicted class changes
based on the border probability.
"""
return np.mean(np.sign(p0 - border) != np.sign(p1 - border))
# def uncertainty(model, X_train: np.ndarray, X_test: np.ndarray) -> float:
# """Unpredictability score
#
# Unpredictability is defined as the minimum deviation of the prediction probability
# from `0.5` to `0` or `1`. For example, for a prediction probability of 0.6 the
# unpredictability is 0.4. The highest unpredictability is 1 and the lowest is 0.
# """
# return random_forest_error(model, X_train, X_test).mean()
def convergence(
x0: np.ndarray, x1: np.ndarray, x2: np.ndarray, decimals: int = 2
) -> float:
"""Convergence score
Given three measurements, the convergence score is the percentage of changes that
increase or decrease in both steps. The highest convergence score is 1 and the
lowest is 0.
"""
x0r = np.round(x0, decimals=decimals)
x1r = | np.round(x1, decimals=decimals) | numpy.round |
# -*- coding: utf-8 -*-
# Tests for module mosaic.immutable_model
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import unittest
import numpy as N
import immutable.np as IN
import mosaic.immutable_model as M
from mosaic.api import is_valid
def make_water_fragment(nsites=1):
return M.fragment("water", (),
(("H1", M.atom(M.element("H"), nsites)),
("H2", M.atom(M.element("H"), nsites)),
("O", M.atom(M.element("O"), nsites))),
(("H1", "O", "single"), ("H2", "O", "single")))
class AtomDescriptorTest(unittest.TestCase):
def test_singleton(self):
self.assertTrue(M.dummy() is M.dummy())
self.assertTrue(M.dummy('a') is M.dummy('a'))
self.assertTrue(M.dummy('a') is not M.dummy('b'))
self.assertTrue(M.dummy('a') is not M.unknown('a'))
self.assertTrue(M.dummy('C') is not M.element('C'))
self.assertTrue(M.element('C') is M.element('C'))
def test_name(self):
for name in ['a', 'b', 'c']:
self.assertEqual(M.unknown(name).name, name)
def test_type(self):
self.assertEqual(M.dummy().type, "dummy")
self.assertEqual(M.unknown().type, "")
self.assertEqual(M.element('O').type, "element")
self.assertEqual(M.cgparticle('ala').type, "cgparticle")
class WaterTest(unittest.TestCase):
def setUp(self):
self.mol = make_water_fragment()
def test_basics(self):
self.assertEqual(self.mol.number_of_atoms, 3)
self.assertEqual(self.mol.number_of_sites, 3)
self.assertEqual(self.mol.number_of_bonds, 2)
self.assertEqual(self.mol.species, "water")
def test_equality(self):
same_mol = make_water_fragment()
changed_bond_order = M.fragment("water", (),
(("H1", M.atom(M.element("H"))),
("H2", M.atom(M.element("H"))),
("O", M.atom(M.element("O")))),
(("O", "H2", "single"),
("O", "H1", "single")))
changed_atom_order = M.fragment("water", (),
(("O", M.atom(M.element("O"))),
("H1", M.atom(M.element("H"))),
("H2", M.atom(M.element("H")))),
(("O", "H1", "single"),
("O", "H2", "single")))
self.assertEqual(self.mol, self.mol)
self.assertEqual(self.mol, same_mol)
self.assertEqual(self.mol, changed_bond_order)
self.assertNotEqual(self.mol, changed_atom_order)
class PeptideTest(unittest.TestCase):
def _make_molecule(self):
C = M.element('C')
H = M.element('H')
N = M.element('N')
O = M.element('O')
peptide_group = M.fragment('peptide',
(),
(('CA', M.atom(C)),
('HA', M.atom(H)),
('H', M.atom(H)),
('N', M.atom(N)),
('C', M.atom(C)),
('O', M.atom(O))),
(('N', 'H', "single"),
('N', 'CA', "single"),
('CA', 'HA', "single"),
('CA', 'C', "single"),
('C', 'O', "double")))
ala_sidechain = M.fragment('ala_sidechain',
(),
(('CB', M.atom(C)),
('HB1', M.atom(H)),
('HB2', M.atom(H)),
('HB3', M.atom(H))),
(('CB', 'HB1', "single"),
('CB', 'HB2', "single"),
('CB', 'HB3', "single"),))
ala = M.fragment('alanine',
(('peptide', peptide_group),
('sidechain', ala_sidechain)),
(),
(('peptide.CA', 'sidechain.CB', "single"),))
return M.polymer('alanine_dipeptide',
(('ALA1', ala),
('ALA2', ala)),
(('ALA1.peptide.C', 'ALA2.peptide.N', "single"),),
'polypeptide')
def test_basic(self):
mol = self._make_molecule()
self.assertEqual(mol.number_of_atoms, 20)
self.assertEqual(mol.number_of_sites, 20)
self.assertEqual(mol.number_of_bonds, 19)
self.assertEqual(mol.polymer_type, "polypeptide")
def test_equality(self):
self.assertEqual(self._make_molecule(),
self._make_molecule())
def test_iterators(self):
mol = self._make_molecule()
mol_ref = M.FragmentRef('x', mol)
atoms = tuple(mol_ref.recursive_atom_iterator())
self.assertEqual(len(atoms), mol.number_of_atoms)
bonds = tuple(mol_ref.recursive_bond_iterator())
self.assertEqual(len(bonds), mol.number_of_bonds)
for a1, a2, order in bonds:
for a in a1, a2:
node = mol
for p in a.split('.'):
node = node[p]
self.assertTrue(isinstance(node, M.Atom))
paths = tuple(mol_ref.recursive_atom_path_iterator())
self.assertEqual(len(paths), mol.number_of_atoms)
for ap in paths:
node = mol
for p in ap.split('.'):
node = node[p]
self.assertTrue(isinstance(node, M.Atom))
class ErrorCheckingTest(unittest.TestCase):
def test_atom_descriptor(self):
self.assertRaises(TypeError, lambda: M.dummy(42))
self.assertRaises(ValueError, lambda: M.element(42))
self.assertRaises(ValueError, lambda: M.element("X"))
def test_atom(self):
carbon = M.element("C")
self.assertRaises(TypeError, lambda: M.atom('C', 1))
self.assertRaises(ValueError, lambda: M.atom(carbon, 0))
def test_fragment(self):
carbon = M.atom(M.element("C"))
# Illegal fragments
self.assertRaises(TypeError,
lambda: M.fragment('m', None, (("C", carbon),), ()))
self.assertRaises(TypeError,
lambda: M.fragment('m', [1, 2], (("C", carbon),), ()))
self.assertRaises(TypeError,
lambda: M.fragment('m', (("C", carbon),), (), ()))
# Illegal atoms
self.assertRaises(TypeError,
lambda: M.fragment('m', (), None, ()))
self.assertRaises(TypeError,
lambda: M.fragment('m', (), [1, 2], ()))
self.assertRaises(TypeError,
lambda: M.fragment('m', (), (carbon,), ()))
self.assertRaises(ValueError,
lambda: M.fragment('m', (),
(("C", carbon),
("C", carbon)),
()))
# Illegal bond lists
self.assertRaises(TypeError,
lambda: M.fragment('m', (), (("C", carbon),), None))
self.assertRaises(TypeError,
lambda: M.fragment('m', (), (("C", carbon),),
[1, 2, 3]))
self.assertRaises(TypeError,
lambda: M.fragment('m', (), (("C", carbon),),
(('X', 'X'))))
self.assertRaises(TypeError,
lambda: M.fragment('m', (), (("C", carbon),),
(['X', 'X', 'single'])))
def test_bonds(self):
carbon = M.atom(M.element("C"))
# Bond specified by only one atom
self.assertRaises(ValueError,
lambda: M.fragment('m', (),
(('C1', carbon), ('C2', carbon)),
(('C1', ),)))
# Bond specified by two atoms but no bond order
self.assertRaises(ValueError,
lambda: M.fragment('m', (),
(('C1', carbon), ('C2', carbon)),
(('C1', 'C2'),)))
# Bond specified by two identical atoms
self.assertRaises(ValueError,
lambda: M.fragment('m', (),
(('C1', carbon), ('C2', carbon)),
(('C1', 'C1', ''),)))
# Bond specified by an atom name that is undefined
self.assertRaises(ValueError,
lambda: M.fragment('m', (),
(('C1', carbon), ('C2', carbon)),
(('C1', 'C3', ''),)))
# Bond specified at the wrong fragment level
f = M.fragment('x', (), (('C1', carbon), ('C2', carbon)), ())
self.assertRaises(ValueError,
lambda: M.fragment('m', (('x', f),),
(('C3', carbon),),
(('x.C1', 'x.C2', ''),)))
def test_universe(self):
mol = M.fragment("water", (),
(("H1", M.atom(M.element("H"), 8)),
("H2", M.atom(M.element("H"), 8)),
("O", M.atom(M.element("O"), 2))),
(("H1", "O", "single"), ("H2", "O", "single")))
self.assertRaises(TypeError,
lambda: M.universe(0, [(mol, 'water', 10)]))
self.assertRaises(ValueError,
lambda: M.universe('strange', [(mol, 'water', 10)]))
self.assertRaises(ValueError,
lambda: M.universe('strange', [(mol, 10)]))
self.assertRaises(TypeError,
lambda: M.universe('infinite', mol))
self.assertRaises(ValueError,
lambda: M.universe('infinite', [("water", 10)]))
self.assertRaises(TypeError,
lambda: M.universe('infinite', [mol]))
self.assertRaises(ValueError,
lambda: M.universe('infinite', [(10, mol)]))
self.assertRaises(ValueError,
lambda: M.universe('infinite', [(mol, 'water', 10)],
[(IN.zeros((3,3), N.float64),
IN.zeros((3,), N.float64))]))
def test_configuration(self):
mol = make_water_fragment()
universe = M.universe('cube', [(mol, 'water', 10)])
# Missing data
self.assertRaises(TypeError,
lambda: M.Configuration(universe))
# Positions but no cell parameters
self.assertRaises(TypeError,
lambda: M.Configuration(universe,
IN.zeros((30, 3), N.float32)))
# Positions and cell parameters of different dtype
self.assertRaises(ValueError,
lambda: M.Configuration(universe,
IN.zeros((30, 3), N.float32),
N.float64(10.)))
# Positions not an array
self.assertRaises(TypeError,
lambda: M.Configuration(universe,
list(IN.zeros((30, 3),
N.float32)),
N.float32(10.)))
# Positions of wrong shape
self.assertRaises(ValueError,
lambda: M.Configuration(universe,
IN.zeros((25, 3), N.float32),
N.float32(10.)))
# Cell parameters of wrong shape
self.assertRaises(ValueError,
lambda: M.Configuration(universe,
IN.zeros((30, 3), N.float32),
IN.zeros((3,), N.float32)))
def test_selection(self):
mol = make_water_fragment(nsites=2)
universe = M.universe('cube', [(mol, 'water', 5)])
# Index occurs twice
self.assertRaises(ValueError,
lambda: M.AtomSelection(universe,
IN.zeros((2,), N.uint16)))
# Atom index too large
self.assertRaises(ValueError,
lambda: M.AtomSelection(universe,
IN.array([20], N.uint16)))
# Template atom index too large
self.assertRaises(ValueError,
lambda: M.TemplateAtomSelection(universe,
IN.array([3], N.uint8)))
# Site index too large
self.assertRaises(ValueError,
lambda: M.SiteSelection(universe,
IN.array([40], N.uint16)))
# Template site index too large
self.assertRaises(ValueError,
lambda: M.TemplateSiteSelection(universe,
IN.array([8], N.uint8)))
class UniverseTest(unittest.TestCase):
def setUp(self):
mol = make_water_fragment(2)
self.universe = M.universe('infinite', [(mol, 'water', 10)],
convention='my_own')
def test_basics(self):
self.assertTrue(is_valid(self.universe))
self.assertEqual(self.universe.number_of_molecules, 10)
self.assertEqual(self.universe.number_of_atoms, 30)
self.assertEqual(self.universe.number_of_sites, 60)
self.assertEqual(self.universe.number_of_bonds, 20)
self.assertEqual(self.universe.cell_shape, "infinite")
self.assertEqual(self.universe.convention, "my_own")
def test_properties(self):
masses = M.TemplateAtomProperty(self.universe,
"masses", "amu",
IN.array([1., 1., 16.], N.float32))
self.assertTrue(is_valid(masses))
self.assertEqual(masses.type, 'template_atom')
self.assertTrue(masses.universe == self.universe)
self.assertEqual(masses.element_shape, ())
self.assertEqual(masses.data.shape, (3,))
bead_masses = M.TemplateSiteProperty(self.universe,
"mass", "amu",
IN.array([1., 1.,
1., 1.,
8., 8.], N.float32))
self.assertTrue(is_valid(bead_masses))
self.assertEqual(bead_masses.type, 'template_site')
self.assertTrue(bead_masses.universe is self.universe)
self.assertEqual(bead_masses.element_shape, ())
self.assertEqual(bead_masses.data.shape, (6,))
velocities = M.SiteProperty(self.universe,
"velocity", "nm ps-1",
IN.zeros((60, 3), dtype=N.float64))
self.assertTrue(is_valid(velocities))
self.assertEqual(velocities.type, 'site')
self.assertTrue(velocities.universe is self.universe)
self.assertEqual(velocities.data.shape, (60, 3))
self.assertEqual(velocities.element_shape, (3,))
foo = M.AtomProperty(self.universe,
"foo", "",
IN.zeros((30, 2, 2), dtype=N.int16))
self.assertTrue(is_valid(foo))
self.assertEqual(foo.type, 'atom')
self.assertTrue(foo.universe is self.universe)
self.assertEqual(foo.data.shape, (30, 2, 2))
self.assertEqual(foo.element_shape, (2, 2))
def test_labels(self):
labels = tuple(a.name
for f, n in self.universe.molecules
for a in f.recursive_atom_iterator())
el = M.TemplateAtomLabel(self.universe, "element", labels)
self.assertTrue(is_valid(el))
self.assertEqual(el.name, "element")
self.assertTrue(el.universe == self.universe)
self.assertTrue(len(el.strings)
== self.universe.number_of_template_atoms)
for s1, s2 in zip(labels, el.strings):
self.assertEqual(s1, s2)
labels = tuple(a.name
for f, n in self.universe.molecules
for _ in range(n)
for a in f.recursive_atom_iterator())
el = M.AtomLabel(self.universe, "element", labels)
self.assertTrue(is_valid(el))
self.assertEqual(el.name, "element")
self.assertTrue(el.universe == self.universe)
self.assertTrue(len(el.strings)
== self.universe.number_of_atoms)
for s1, s2 in zip(labels, el.strings):
self.assertEqual(s1, s2)
labels = tuple(a.name
for f, n in self.universe.molecules
for a in f.recursive_atom_iterator()
for _ in range(a.number_of_sites))
el = M.TemplateSiteLabel(self.universe, "element", labels)
self.assertTrue(is_valid(el))
self.assertEqual(el.name, "element")
self.assertTrue(el.universe == self.universe)
self.assertTrue(len(el.strings)
== self.universe.number_of_template_sites)
for s1, s2 in zip(labels, el.strings):
self.assertEqual(s1, s2)
labels = tuple(a.name
for f, n in self.universe.molecules
for _ in range(n)
for a in f.recursive_atom_iterator()
for __ in range(a.number_of_sites))
el = M.SiteLabel(self.universe, "element", labels)
self.assertTrue(is_valid(el))
self.assertEqual(el.name, "element")
self.assertTrue(el.universe == self.universe)
self.assertTrue(len(el.strings)
== self.universe.number_of_sites)
for s1, s2 in zip(labels, el.strings):
self.assertEqual(s1, s2)
def test_bonds(self):
bonds = self.universe.bond_index_array()
self.assertEqual(len(bonds), self.universe.number_of_bonds)
self.assertTrue((bonds >= 0).all())
self.assertTrue((bonds < self.universe.number_of_atoms).all())
for i in range(10):
self.assertEqual(bonds[2*i, 0], 3*i)
self.assertEqual(bonds[2*i, 1], 3*i+2)
self.assertEqual(bonds[2*i+1, 0], 3*i+1)
self.assertEqual(bonds[2*i+1, 1], 3*i+2)
def test_index_mappings(self):
mol = self.universe.molecules[0][0]
s2a = mol.site_to_atom_index_mapping()
self.assertTrue((s2a == N.array([0, 0, 1, 1, 2, 2])).all())
s2a = self.universe.site_to_atom_index_mapping()
s2a_ref = N.repeat( | N.arange(30) | numpy.arange |
'''
desisim.quickcat
================
Code for quickly generating an output zcatalog given fiber assignment tiles,
a truth catalog, and optionally a previous zcatalog.
'''
from __future__ import absolute_import, division, print_function
import os
import yaml
from collections import Counter
from pkg_resources import resource_filename
from time import asctime
import numpy as np
from astropy.io import fits
from astropy.table import Table, Column, vstack
import sys
import scipy.special as sp
import desisim
from desisim.targets import get_simtype
import astropy.constants
c = astropy.constants.c.to('km/s').value
from desitarget.targetmask import desi_mask, bgs_mask, mws_mask
from desiutil.log import get_logger
log = get_logger()
#- redshift errors, zwarn, cata fail rate fractions from
#- /project/projectdirs/desi/datachallenge/redwood/spectro/redux/redwood/
#- sigmav = c sigmaz / (1+z)
_sigma_v = {
# 'ELG': 38.03,
# 'LRG': 67.38,
'BGS': 37.70,
# 'QSO': 182.16,
'STAR': 51.51,
'WD':54.35,
'SKY': 9999, #- meaningless
'UNKNOWN': 9999, #- meaningless
}
_zwarn_fraction = {
# 'ELG': 0.087,
# 'LRG': 0.007,
# 'QSO': 0.020,
'BGS': 0.024,
'STAR': 0.345,
'WD':0.094,
'SKY': 1.0,
'UNKNOWN': 1.0,
}
_cata_fail_fraction = {
# 'ELG': 0.020,
# 'LRG': 0.002,
# 'QSO': 0.012,
'BGS': 0.003,
'STAR': 0.050,
'WD':0.0,
'SKY': 0.,
'UNKNOWN': 0.,
}
def get_zeff_obs(simtype, obsconditions):
'''
'''
if(simtype=='LRG'):
p_v = [1.0, 0.15, -0.5]
p_w = [1.0, 0.4, 0.0]
p_x = [1.0, 0.06, 0.05]
p_y = [1.0, 0.0, 0.08]
p_z = [1.0, 0.0, 0.0]
sigma_r = 0.02
elif(simtype=='QSO'):
p_v = [1.0, -0.2, 0.3]
p_w = [1.0, -0.5, 0.6]
p_x = [1.0, -0.1, -0.075]
p_y = [1.0, -0.08, -0.04]
p_z = [1.0, 0.0, 0.0]
sigma_r = 0.05
elif(simtype=='ELG'):
p_v = [1.0, -0.1, -0.2]
p_w = [1.0, 0.25, -0.75]
p_x = [1.0, 0.0, 0.05]
p_y = [1.0, 0.2, 0.1]
p_z = [1.0, -10.0, 300.0]
sigma_r = 0.075
else:
log.warning('No model for how observing conditions impact {} redshift efficiency'.format(simtype))
return np.ones(len(obsconditions))
ncond = len(np.atleast_1d(obsconditions['AIRMASS']))
# airmass
v = obsconditions['AIRMASS'] - np.mean(obsconditions['AIRMASS'])
pv = p_v[0] + p_v[1] * v + p_v[2] * (v**2. - np.mean(v**2))
# ebmv
if 'EBMV' in obsconditions :
w = obsconditions['EBMV'] - np.mean(obsconditions['EBMV'])
pw = p_w[0] + p_w[1] * w + p_w[2] * (w**2 - np.mean(w**2))
else :
pw = np.ones(ncond)
# seeing
x = obsconditions['SEEING'] - np.mean(obsconditions['SEEING'])
px = p_x[0] + p_x[1]*x + p_x[2] * (x**2 - np.mean(x**2))
# transparency
if 'LINTRANS' in obsconditions :
y = obsconditions['LINTRANS'] - np.mean(obsconditions['LINTRANS'])
py = p_y[0] + p_y[1]*y + p_y[2] * (y**2 - np.mean(y**2))
else :
py = np.ones(ncond)
# moon illumination fraction
z = obsconditions['MOONFRAC'] - np.mean(obsconditions['MOONFRAC'])
pz = p_z[0] + p_z[1]*z + p_z[2] * (z**2 - np.mean(z**2))
#- if moon is down phase doesn't matter
pz = np.ones(ncond)
pz[obsconditions['MOONALT'] < 0] = 1.0
pr = 1.0 + np.random.normal(size=ncond, scale=sigma_r)
#- this correction factor can be greater than 1, but not less than 0
pobs = (pv * pw * px * py * pz * pr).clip(min=0.0)
return pobs
def get_redshift_efficiency(simtype, targets, truth, targets_in_tile, obsconditions, params, ignore_obscondition=False):
"""
Simple model to get the redshift effiency from the observational conditions or observed magnitudes+redshuft
Args:
simtype: ELG, LRG, QSO, MWS, BGS
targets: target catalog table; currently used only for TARGETID
truth: truth table with OIIFLUX, TRUEZ
targets_in_tile: dictionary. Keys correspond to tileids, its values are the
arrays of targetids observed in that tile.
obsconditions: table observing conditions with columns
'TILEID': array of tile IDs
'AIRMASS': array of airmass values on a tile
'EBMV': array of E(B-V) values on a tile
'LINTRANS': array of atmospheric transparency during spectro obs; floats [0-1]
'MOONFRAC': array of moonfraction values on a tile.
'SEEING': array of FWHM seeing during spectroscopic observation on a tile.
parameter_filename: yaml file with quickcat parameters
ignore_obscondition: if True, no variation of efficiency with obs. conditions (adjustment of exposure time should correct for mean change of S/N)
Returns:
tuple of arrays (observed, p) both with same length as targets
observed: boolean array of whether the target was observed in these tiles
p: probability to get this redshift right
"""
targetid = targets['TARGETID']
n = len(targetid)
try:
if 'DECAM_FLUX' in targets.dtype.names :
true_gflux = targets['DECAM_FLUX'][:, 1]
true_rflux = targets['DECAM_FLUX'][:, 2]
else:
true_gflux = targets['FLUX_G']
true_rflux = targets['FLUX_R']
except:
raise Exception('Missing photometry needed to estimate redshift efficiency!')
a_small_flux=1e-40
true_gflux[true_gflux<a_small_flux]=a_small_flux
true_rflux[true_rflux<a_small_flux]=a_small_flux
if (obsconditions is None) or ('OIIFLUX' not in truth.dtype.names):
raise Exception('Missing obsconditions and flux information to estimate redshift efficiency')
if (simtype == 'ELG'):
# Read the model OII flux threshold (FDR fig 7.12 modified to fit redmonster efficiency on OAK)
# filename = resource_filename('desisim', 'data/quickcat_elg_oii_flux_threshold.txt')
# Read the model OII flux threshold (FDR fig 7.12)
filename = resource_filename('desisim', 'data/elg_oii_flux_threshold_fdr.txt')
fdr_z, modified_fdr_oii_flux_threshold = np.loadtxt(filename, unpack=True)
# Compute OII flux thresholds for truez
oii_flux_limit = np.interp(truth['TRUEZ'],fdr_z,modified_fdr_oii_flux_threshold)
oii_flux_limit[oii_flux_limit<1e-20]=1e-20
# efficiency is modeled as a function of flux_OII/f_OII_threshold(z) and an arbitrary sigma_fudge
snr_in_lines = params["ELG"]["EFFICIENCY"]["SNR_LINES_SCALE"]*7*truth['OIIFLUX']/oii_flux_limit
snr_in_continuum = params["ELG"]["EFFICIENCY"]["SNR_CONTINUUM_SCALE"]*true_rflux
snr_tot = np.sqrt(snr_in_lines**2+snr_in_continuum**2)
sigma_fudge = params["ELG"]["EFFICIENCY"]["SIGMA_FUDGE"]
nsigma = 3.
simulated_eff = eff_model(snr_tot,nsigma,sigma_fudge)
elif(simtype == 'LRG'):
r_mag = 22.5 - 2.5*np.log10(true_rflux)
sigmoid_cutoff = params["LRG"]["EFFICIENCY"]["SIGMOID_CUTOFF"]
sigmoid_fudge = params["LRG"]["EFFICIENCY"]["SIGMOID_FUDGE"]
simulated_eff = 1./(1.+np.exp((r_mag-sigmoid_cutoff)/sigmoid_fudge))
log.info("{} eff = sigmoid with cutoff = {:4.3f} fudge = {:4.3f}".format(simtype,sigmoid_cutoff,sigmoid_fudge))
elif(simtype == 'QSO'):
zsplit = params['QSO_ZSPLIT']
r_mag = 22.5 - 2.5*np.log10(true_rflux)
simulated_eff = np.ones(r_mag.shape)
# lowz tracer qsos
sigmoid_cutoff = params["LOWZ_QSO"]["EFFICIENCY"]["SIGMOID_CUTOFF"]
sigmoid_fudge = params["LOWZ_QSO"]["EFFICIENCY"]["SIGMOID_FUDGE"]
ii=(truth['TRUEZ']<=zsplit)
simulated_eff[ii] = 1./(1.+np.exp((r_mag[ii]-sigmoid_cutoff)/sigmoid_fudge))
log.info("{} eff = sigmoid with cutoff = {:4.3f} fudge = {:4.3f}".format("LOWZ QSO",sigmoid_cutoff,sigmoid_fudge))
# highz lya qsos
sigmoid_cutoff = params["LYA_QSO"]["EFFICIENCY"]["SIGMOID_CUTOFF"]
sigmoid_fudge = params["LYA_QSO"]["EFFICIENCY"]["SIGMOID_FUDGE"]
ii=(truth['TRUEZ']>zsplit)
simulated_eff[ii] = 1./(1.+np.exp((r_mag[ii]-sigmoid_cutoff)/sigmoid_fudge))
log.info("{} eff = sigmoid with cutoff = {:4.3f} fudge = {:4.3f}".format("LYA QSO",sigmoid_cutoff,sigmoid_fudge))
elif simtype == 'BGS':
simulated_eff = 0.98 * np.ones(n)
elif simtype == 'MWS':
simulated_eff = 0.98 * np.ones(n)
else:
default_zeff = 0.98
log.warning('using default redshift efficiency of {} for {}'.format(default_zeff, simtype))
simulated_eff = default_zeff * np.ones(n)
#- Get the corrections for observing conditions per tile, then
#- correct targets on those tiles. Parameterize in terms of failure
#- rate instead of success rate to handle bookkeeping of targets that
#- are observed on more than one tile.
#- NOTE: this still isn't quite right since multiple observations will
#- be simultaneously fit instead of just taking whichever individual one
#- succeeds.
if ignore_obscondition :
ncond = len(np.atleast_1d(obsconditions['AIRMASS']))
zeff_obs = np.ones(ncond)
else :
zeff_obs = get_zeff_obs(simtype, obsconditions)
pfail = np.ones(n)
observed = np.zeros(n, dtype=bool)
# More efficient alternative for large numbers of tiles + large target
# list, but requires pre-computing the sort order of targetids.
# Assume targets['TARGETID'] is unique, so not checking this.
sort_targetid = np.argsort(targetid)
# Extract the targets-per-tile lists into one huge list.
concat_targets_in_tile = np.concatenate([targets_in_tile[tileid] for tileid in obsconditions['TILEID']])
ntargets_per_tile = np.array([len(targets_in_tile[tileid]) for tileid in obsconditions['TILEID']])
# Match entries in each tile list against sorted target list.
target_idx = targetid[sort_targetid].searchsorted(concat_targets_in_tile,side='left')
target_idx_r = targetid[sort_targetid].searchsorted(concat_targets_in_tile,side='right')
del(concat_targets_in_tile)
# Flag targets in tiles that do not appear in the target list (sky,
# standards).
not_matched = target_idx_r - target_idx == 0
target_idx[not_matched] = -1
del(target_idx_r,not_matched)
# Not every tile has 5000 targets, so use individual counts to
# construct offset of each tile in target_idx.
offset = np.concatenate([[0],np.cumsum(ntargets_per_tile[:-1])])
# For each tile, process targets.
for i, tileid in enumerate(obsconditions['TILEID']):
if ntargets_per_tile[i] > 0:
# Quickly get all the matched targets on this tile.
targets_this_tile = target_idx[offset[i]:offset[i]+ntargets_per_tile[i]]
targets_this_tile = targets_this_tile[targets_this_tile > 0]
# List of indices into sorted target list for each observed
# source.
ii = sort_targetid[targets_this_tile]
tmp = (simulated_eff[ii]*zeff_obs[i]).clip(0, 1)
pfail[ii] *= (1-tmp)
observed[ii] = True
simulated_eff = (1-pfail)
return observed, simulated_eff
# Efficiency model
def eff_model(x, nsigma, sigma, max_efficiency=1):
return 0.5*max_efficiency*(1.+sp.erf((x-nsigma)/(np.sqrt(2.)*sigma)))
def reverse_dictionary(a):
"""Inverts a dictionary mapping.
Args:
a: input dictionary.
Returns:
b: output reversed dictionary.
"""
b = {}
for i in a.items():
try:
for k in i[1]:
if k not in b.keys():
b[k] = [i[0]]
else:
b[k].append(i[0])
except:
k = i[1]
if k not in b.keys():
b[k] = [i[0]]
else:
b[k].append(i[0])
return b
def get_observed_redshifts(targets, truth, targets_in_tile, obsconditions, parameter_filename=None, ignore_obscondition=False):
"""
Returns observed z, zerr, zwarn arrays given true object types and redshifts
Args:
targets: target catalog table; currently used only for target mask bits
truth: truth table with OIIFLUX, TRUEZ
targets_in_tile: dictionary. Keys correspond to tileids, its values are the
arrays of targetids observed in that tile.
obsconditions: table observing conditions with columns
'TILEID': array of tile IDs
'AIRMASS': array of airmass values on a tile
'EBMV': array of E(B-V) values on a tile
'LINTRANS': array of atmospheric transparency during spectro obs; floats [0-1]
'MOONFRAC': array of moonfraction values on a tile.
'SEEING': array of FWHM seeing during spectroscopic observation on a tile.
parameter_filename: yaml file with quickcat parameters
ignore_obscondition: if True, no variation of efficiency with obs. conditions (adjustment of exposure time should correct for mean change of S/N)
Returns:
tuple of (zout, zerr, zwarn)
"""
if parameter_filename is None :
# Load efficiency parameters yaml file
parameter_filename = resource_filename('desisim', 'data/quickcat.yaml')
params=None
with open(parameter_filename,"r") as file :
params = yaml.safe_load(file)
simtype = get_simtype(np.char.strip(truth['TRUESPECTYPE']), targets['DESI_TARGET'], targets['BGS_TARGET'], targets['MWS_TARGET'])
#simtype = get_simtype(np.char.strip(truth['TEMPLATETYPE']), targets['DESI_TARGET'], targets['BGS_TARGET'], targets['MWS_TARGET'])
truez = truth['TRUEZ']
targetid = truth['TARGETID']
try:
if 'DECAM_FLUX' in targets.dtype.names :
true_gflux = targets['DECAM_FLUX'][:, 1]
true_rflux = targets['DECAM_FLUX'][:, 2]
else:
true_gflux = targets['FLUX_G']
true_rflux = targets['FLUX_R']
except:
raise Exception('Missing photometry needed to estimate redshift efficiency!')
a_small_flux=1e-40
true_gflux[true_gflux<a_small_flux]=a_small_flux
true_rflux[true_rflux<a_small_flux]=a_small_flux
zout = truez.copy()
zerr = np.zeros(len(truez), dtype=np.float32)
zwarn = np.zeros(len(truez), dtype=np.int32)
objtypes = list(set(simtype))
n_tiles = len(np.unique(obsconditions['TILEID']))
if(n_tiles!=len(targets_in_tile)):
raise ValueError('Number of obsconditions {} != len(targets_in_tile) {}'.format(n_tiles, len(targets_in_tile)))
for objtype in objtypes:
ii=(simtype==objtype)
###################################
# redshift errors
###################################
if objtype =='ELG' :
sigma = params["ELG"]["UNCERTAINTY"]["SIGMA_17"]
powerlawindex = params["ELG"]["UNCERTAINTY"]["POWER_LAW_INDEX"]
oiiflux = truth['OIIFLUX'][ii]*1e17
zerr[ii] = sigma/(1.e-9+oiiflux**powerlawindex)*(1.+truez[ii])
zout[ii] += np.random.normal(scale=zerr[ii])
log.info("ELG sigma={:6.5f} index={:3.2f} median zerr={:6.5f}".format(sigma,powerlawindex,np.median(zerr[ii])))
elif objtype == 'LRG' :
sigma = params["LRG"]["UNCERTAINTY"]["SIGMA_17"]
powerlawindex = params["LRG"]["UNCERTAINTY"]["POWER_LAW_INDEX"]
zerr[ii] = sigma/(1.e-9+true_rflux[ii]**powerlawindex)*(1.+truez[ii])
zout[ii] += np.random.normal(scale=zerr[ii])
log.info("LRG sigma={:6.5f} index={:3.2f} median zerr={:6.5f}".format(sigma,powerlawindex,np.median(zerr[ii])))
elif objtype == 'QSO' :
zsplit = params['QSO_ZSPLIT']
sigma = params["LOWZ_QSO"]["UNCERTAINTY"]["SIGMA_17"]
powerlawindex = params["LOWZ_QSO"]["UNCERTAINTY"]["POWER_LAW_INDEX"]
jj=ii&(truth['TRUEZ']<=zsplit)
zerr[jj] = sigma/(1.e-9+(true_rflux[jj])**powerlawindex)*(1.+truez[jj])
log.info("LOWZ QSO sigma={:6.5f} index={:3.2f} median zerr={:6.5f}".format(sigma,powerlawindex,np.median(zerr[jj])))
sigma = params["LYA_QSO"]["UNCERTAINTY"]["SIGMA_17"]
powerlawindex = params["LYA_QSO"]["UNCERTAINTY"]["POWER_LAW_INDEX"]
jj=ii&(truth['TRUEZ']>zsplit)
zerr[jj] = sigma/(1.e-9+(true_rflux[jj])**powerlawindex)*(1.+truez[jj])
log.info("LYA QSO sigma={:6.5f} index={:3.2f} median zerr={:6.5f}".format(sigma,powerlawindex,np.median(zerr[jj])))
zout[ii] += np.random.normal(scale=zerr[ii])
elif objtype in _sigma_v.keys() :
log.info("{} use constant sigmav = {} km/s".format(objtype,_sigma_v[objtype]))
ii = (simtype == objtype)
zerr[ii] = _sigma_v[objtype] * (1+truez[ii]) / c
zout[ii] += np.random.normal(scale=zerr[ii])
else :
log.info("{} no redshift error model, will use truth")
###################################
# redshift efficiencies
###################################
# Set ZWARN flags for some targets
# the redshift efficiency only sets warning, but does not impact
# the redshift value and its error.
was_observed, goodz_prob = get_redshift_efficiency(
objtype, targets[ii], truth[ii], targets_in_tile,
obsconditions=obsconditions,params=params,
ignore_obscondition=ignore_obscondition)
n=np.sum(ii)
assert len(was_observed) == n
assert len(goodz_prob) == n
r = np.random.random(len(was_observed))
zwarn[ii] = 4 * (r > goodz_prob) * was_observed
###################################
# catastrophic failures
###################################
zlim=[0.,3.5]
cata_fail_fraction = np.zeros(n)
if objtype == "ELG" :
cata_fail_fraction[:] = params["ELG"]["FAILURE_RATE"]
zlim=[0.6,1.7]
elif objtype == "LRG" :
cata_fail_fraction[:] = params["LRG"]["FAILURE_RATE"]
zlim=[0.5,1.1]
elif objtype == "QSO" :
zsplit = params["QSO_ZSPLIT"]
cata_fail_fraction[truth['TRUEZ'][ii]<=zsplit] = params["LOWZ_QSO"]["FAILURE_RATE"]
cata_fail_fraction[truth['TRUEZ'][ii]>zsplit] = params["LYA_QSO"]["FAILURE_RATE"]
zlim=[0.5,3.5]
elif objtype in _cata_fail_fraction :
cata_fail_fraction[:] = _cata_fail_fraction[objtype]
failed = (np.random.uniform(size=n)<cata_fail_fraction)&(zwarn[ii]==0)
failed_indices = np.where(ii)[0][failed]
log.info("{} n_failed/n_tot={}/{}={:4.3f}".format(objtype,failed_indices.size,n,failed_indices.size/float(n)))
zout[failed_indices] = np.random.uniform(zlim[0],zlim[1],failed_indices.size)
return zout, zerr, zwarn
def get_median_obsconditions(tileids):
"""Gets the observational conditions for a set of tiles.
Args:
tileids : list of tileids that were observed
Returns:
Table with the observational conditions for every tile.
It inclues at least the following columns::
'TILEID': array of tile IDs
'AIRMASS': array of airmass values on a tile
'EBMV': array of E(B-V) values on a tile
'LINTRANS': array of atmospheric transparency during spectro obs; floats [0-1]
'MOONFRAC': array of moonfraction values on a tile.
'SEEING': array of FWHM seeing during spectroscopic observation on a tile.
"""
#- Load standard DESI tiles and trim to this list of tileids
import desimodel.io
tiles = desimodel.io.load_tiles()
tileids = np.asarray(tileids)
ii = np.in1d(tiles['TILEID'], tileids)
tiles = tiles[ii]
assert len(tiles) == len(tileids)
#- Sort tiles to match order of tileids
i = | np.argsort(tileids) | numpy.argsort |
# Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Operators for math."""
import numpy as np
from ... import context
from .. import signature as sig
from ..._checkparam import Validator as validator
from ..._checkparam import Rel
from ...common import dtype as mstype
from ...common.tensor import Tensor
from ...common._decorator import deprecated
from .._utils import get_broadcast_shape
from ..primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register, _run_op
from ..._c_expression import Tensor as Tensor_
def _infer_shape_reduce(x, axis, keep_dims, prim_name):
"""Common infer for reduce operator"""
def reduce_one_axis(one_axis):
validator.check_int_range(one_axis, -dim, dim, Rel.INC_LEFT, 'axis', prim_name)
if one_axis < 0:
one_axis += dim
axis_reduce.add(one_axis)
validator.check_value_type('axis', axis, [int, tuple, list], prim_name)
dim = len(x)
axis_reduce = set()
if isinstance(axis, int):
reduce_one_axis(axis)
else:
if not axis:
if keep_dims:
return [1] * dim
return []
for index, one_axis in enumerate(axis):
validator.check_value_type('axis[%d]' % index, one_axis, [int], prim_name)
reduce_one_axis(one_axis)
out_shape = []
for i in range(dim):
if i in axis_reduce:
if keep_dims:
out_shape.append(1)
else:
out_shape.append(x[i])
return out_shape
class _BinaryOp(PrimitiveWithInfer):
"""
Define binary operators.
"""
__mindspore_signature__ = (sig.sig_dtype.T, sig.sig_dtype.T)
@prim_attr_register
def __init__(self):
"""Initialize _BinaryOp"""
self.init_prim_io_names(inputs=['x', 'y'], outputs=['output'])
def infer_shape(self, x_shape, y_shape):
return get_broadcast_shape(x_shape, y_shape, self.name)
def infer_min_shape(self, x_shape, y_shape):
return get_broadcast_shape(x_shape, y_shape, self.name, "min_shape")
def infer_max_shape(self, x_shape, y_shape):
return get_broadcast_shape(x_shape, y_shape, self.name, "max_shape")
class _MathBinaryOp(_BinaryOp):
"""
Define math binary operators.
"""
@staticmethod
def do_infer_dtype(x_dtype, y_dtype, valid_dtype=mstype.number_type, prim_name=None):
"""Staticmethod of infer dtype for _MathBinaryOp."""
args_type = {"x": x_dtype, "y": y_dtype}
complex_types = [mstype.tensor_type(mstype.complex64), mstype.tensor_type(mstype.complex128)]
if x_dtype in complex_types or y_dtype in complex_types:
type_infer_dict = {
(mstype.complex64, mstype.complex64): mstype.tensor_type(mstype.complex64),
(mstype.complex64, mstype.float32): mstype.tensor_type(mstype.complex64),
(mstype.float32, mstype.complex64): mstype.tensor_type(mstype.complex64),
(mstype.complex128, mstype.complex128): mstype.tensor_type(mstype.complex128),
(mstype.complex128, mstype.float64): mstype.tensor_type(mstype.complex128),
(mstype.float64, mstype.complex128): mstype.tensor_type(mstype.complex128),
}
if (x_dtype.element_type(), y_dtype.element_type()) not in type_infer_dict.keys():
raise TypeError('Complex math binary op expecting Tensor [complex64, complex64],'
+ '[complex64, float32], [float32, complex64], [complex128, complex128],'
+ '[complex128, float64], [float64, complex128],'
+ f'but got : [{format(x_dtype)},{format(y_dtype)}].')
return type_infer_dict.get((x_dtype.element_type(), y_dtype.element_type()))
validator.check_tensors_dtypes_same_and_valid(args_type, valid_dtype, prim_name)
return x_dtype
def infer_dtype(self, x_dtype, y_dtype):
return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type, self.name)
class _BitwiseBinaryOp(_MathBinaryOp):
"""
Define bitwise binary operators.
"""
@prim_attr_register
def __init__(self):
"""Initialize _BitwiseBinaryOp"""
self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
@staticmethod
def _check_bitwise_op_input_type(x1_type, x2_type, prim):
args = {'x1': x1_type, 'x2': x2_type}
valid_dtypes = mstype.int_type + mstype.uint_type
validator.check_tensors_dtypes_same_and_valid(args, valid_dtypes, prim)
return x1_type
def infer_dtype(self, x1_type, x2_type):
return _BitwiseBinaryOp._check_bitwise_op_input_type(x1_type, x2_type, self.name)
class Ger(Primitive):
r"""
Ger product of `x1` and `x2`. Calculate the outer product of two one-dimensional arrays.If `x1` is a 1D Tensor of
shape :math:`(m,)` and `x2` is a 1D Tensor of shape :math:`(n,)`,then `output` must be a Tensor of shape
:math:`(m * n)`.
Inputs:
- **x1** - (Tensor) - 1-D input Tensor, with dtype of float16 or float32.
- **x2** - (Tensor) - 1-D input Tensor, with dtype of float16 or float32.
Outputs:
Tensor, output matrix with the same dtype as inputs.With `x1` shape :math:`(m,)` and
`x2` shape of :math:`(n,)`,the `output` has shape :math:`(m * n)`.
Raises:
TypeError: If `x1` or `x2` is not a Tensor.
TypeError: If the dtype of `x1` and `x2` is neither float16 nor float32.
ValueError: If `x1` or `x2` is not a 1D Tensor.
Supported Platforms:
``Ascend``
Examples:
>>> x1 = Tensor([1., 2., 3., 4.], mindspore.float32)
>>> x2 = Tensor([1., 2., 3.], mindspore.float32)
>>> ger = ops.Ger()
>>> output = ger(x1, x2)
>>> print(output)
[[ 1. 2. 3.]
[ 2. 4. 6.]
[ 3. 6. 9.]
[ 4. 8. 12.]]
"""
@prim_attr_register
def __init__(self):
"""Initialize Ger"""
self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['y'])
class Add(_MathBinaryOp):
r"""
Adds two input tensors element-wise.
Refer to :func:`mindspore.ops.add` for more detail.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> # case 1: x and y are both Tensor.
>>> add = ops.Add()
>>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
>>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
>>> output = add(x, y)
>>> print(output)
[5. 7. 9.]
>>> # case 2: x is a scalar and y is a Tensor
>>> add = ops.Add()
>>> x = Tensor(1, mindspore.int32)
>>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
>>> output = add(x, y)
>>> print(output)
[5. 6. 7.]
>>> # the data type of x is int32, the data type of y is float32,
>>> # and the output is the data format of higher precision float32.
>>> print(output.dtype)
Float32
"""
@staticmethod
def _infer_specified_add_value(a, b):
"""Calculate min/max value for output for Add op"""
if a is not None and b is not None:
if isinstance(a, (Tensor, Tensor_)):
a = a.asnumpy()
if isinstance(b, (Tensor, Tensor_)):
b = b.asnumpy()
a = np.array(a)
b = np.array(b)
out = a + b
out = tuple(out.tolist())
return out
return None
def _infer_min_value(self, x, y):
"""Calculate min value for output for Add op"""
return self._infer_specified_add_value(x, y)
def _infer_max_value(self, x, y):
"""Calculate max value for output for Add op"""
return self._infer_specified_add_value(x, y)
def infer_value(self, x, y):
if x is not None and y is not None:
x = x.asnumpy()
y = y.asnumpy()
out = x + y
out = np.array(out, x.dtype)
return Tensor(out)
return None
class Addcdiv(Primitive):
r"""
Performs the element-wise division of tensor x1 by tensor x2,
multiply the result by the scalar value and add it to input_data.
.. math::
y[i] = input\_data[i] + value[i] * (x1[i] / x2[i])
Inputs:
- **input_data** (Tensor) - The tensor to be added, with data type float16 and float32.
- **x1** (Tensor) - The numerator tensor, with data type float16 and float32.
- **x2** (Tensor) - The denominator tensor, with data type float16 and float32.
- **value** (Tensor) - The multiplier for tensor x1/x2, with data type float16, float32.
Outputs:
Tensor y, has the same shape and dtype as x1/x2.
Raises:
TypeError: If dtype of `x1`, `x2`, `value`, `input_data` is not tensor.
TypeError: If dtype of `input_data` is not one of: float32, float16.
TypeError: If dtype of `x1` or `x2` is not one of: float32, float16.
TypeError: If dtype of `value` is not one of: float32, float16.
ValueError: If `x1` could not be broadcast to a tensor with shape of `x2`.
ValueError: If `value` could not be broadcast to tensors with shapes of `x1/x2`.
ValueError: If `input_data` could not be broadcast to tensors with shapes of `value*(x1/x2)`.
Supported Platforms:
``Ascend``
Examples:
>>> input_data = Tensor(np.array([1, 1, 1, 1]), mindspore.float32)
>>> x1 = Tensor(np.array([1, 2, 3, 4]), mindspore.float32)
>>> x2 = Tensor(np.array([4, 3, 2, 1]), mindspore.float32)
>>> value = Tensor([1], mindspore.float32)
>>> addcdiv = ops.Addcdiv()
>>> y = addcdiv(input_data, x1, x2, value)
>>> print(y)
[1.25 1.6666667 2.5 5. ]
"""
@prim_attr_register
def __init__(self):
"""Initialize Addcdiv """
self.init_prim_io_names(inputs=['input_data', 'x1', 'x2', 'value'], outputs=['y'])
class Addcmul(Primitive):
r"""
Performs the element-wise product of tensor x1 and tensor x2,
multiply the result by the scalar value and add it to input_data.
.. math::
output[i] = input\_data[i] + value[i] * (x1[i] * x2[i])
Inputs:
- **input_data** (Tensor) - The tensor to be added, with data type float16, float32 and int32.
- **x1** (Tensor) - The tensor to be multiplied, with data type float16, float32 and int32.
- **x2** (Tensor) - The tensor to be multiplied, with data type float16, float32 and int32.
- **value** (Tensor) - The multiplier for tensor x1*x2, with data type float16, float32 and int32.
Outputs:
Tensor, has the same shape and dtype as x1*x2.
Raises:
TypeError: If dtype of `x1`, `x2`, `value`, `input_data` is not tensor.
TypeError: If dtype of `input_data` is not one of: float32, float16, int32.
TypeError: If dtype of `x1` or `x2` is not one of: float32, float16, int32.
TypeError: If dtype of `value` is not one of: float32, float16, int32.
ValueError: If `x1` could not be broadcast to a tensor with shape of `x2`.
ValueError: If `value` could not be broadcast to tensors with shapes of `x1` * `x2`.
ValueError: If `input_data` could not be broadcast to tensors with shapes of `value*(x1*x2)`.
Supported Platforms:
``Ascend``
Examples:
>>> input_data = Tensor(np.array([1, 1, 1]), mindspore.float32)
>>> x1 = Tensor(np.array([[1], [2], [3]]), mindspore.float32)
>>> x2 = Tensor(np.array([[1, 2, 3]]), mindspore.float32)
>>> value = Tensor([1], mindspore.float32)
>>> addcmul = ops.Addcmul()
>>> y = addcmul(input_data, x1, x2, value)
>>> print(y)
[[ 2. 3. 4.]
[ 3. 5. 7.]
[ 4. 7. 10.]]
"""
@prim_attr_register
def __init__(self):
"""Initialize Addcmul """
self.init_prim_io_names(inputs=['input_data', 'x1', 'x2', 'value'], outputs=['y'])
class TensorAdd(_MathBinaryOp):
"""
Same as operator Add. TensorAdd will be deprecated in the future.
Please use Add instead.
"""
@deprecated("1.1", "Add", True)
@prim_attr_register
def __init__(self):
"""Initialize TensorAdd."""
_MathBinaryOp.__init__(self)
def infer_value(self, x, y):
if x is not None and y is not None:
x = x.asnumpy()
y = y.asnumpy()
out = x + y
out = np.array(out, x.dtype)
return Tensor(out)
return None
class AssignAdd(Primitive):
"""
Updates a `Parameter` by adding a value to it.
Refer to :func:`mindspore.ops.assign_add` for more detail.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.AssignAdd = ops.AssignAdd()
... self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int64), name="global_step")
...
... def construct(self, x):
... self.AssignAdd(self.variable, x)
... return self.variable
...
>>> net = Net()
>>> value = Tensor(np.ones([1]).astype(np.int64)*100)
>>> output = net(value)
>>> print(net.variable.asnumpy())
[101]
"""
__mindspore_signature__ = (
sig.make_sig('ref', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('value', dtype=sig.sig_dtype.T)
)
@prim_attr_register
def __init__(self):
"""Initialize AssignAdd"""
self.init_prim_io_names(inputs=['ref', 'value'], outputs=['ref'])
self.add_prim_attr('side_effect_mem', True)
class AssignSub(Primitive):
"""
Updates a `Parameter` by subtracting a value from it.
Refer to :func:`mindspore.ops.assign_sub` for more detail.
Supported Platforms:
``Ascend``
Examples:
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.AssignSub = ops.AssignSub()
... self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step")
...
... def construct(self, x):
... self.AssignSub(self.variable, x)
... return self.variable
...
>>> net = Net()
>>> value = Tensor(np.ones([1]).astype(np.int32)*100)
>>> output = net(value)
>>> print(output)
[-99]
"""
__mindspore_signature__ = (
sig.make_sig('val', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
sig.make_sig('value', dtype=sig.sig_dtype.T)
)
@prim_attr_register
def __init__(self):
"""Initialize AssignSub"""
self.init_prim_io_names(inputs=['val', 'value'], outputs=['val'])
self.add_prim_attr('side_effect_mem', True)
class _Reduce(PrimitiveWithInfer):
"""
Definition of base class of reduction class operators.
Args:
keep_dims (bool): If true, keep these reduced dimensions and the length is 1.
If false, don't keep these dimensions. Default: False.
"""
__mindspore_signature__ = (
sig.make_sig('input_x'),
sig.make_sig('axis', default=())
)
@prim_attr_register
def __init__(self, keep_dims=False):
"""Initialize Reduce"""
validator.check_value_type('keep_dims', keep_dims, [bool], self.name)
self.init_prim_io_names(inputs=['input_x', 'axis'], outputs=['y'])
def __call__(self, x, axis=()):
args = [x, axis]
output = _run_op(self, self.name, args)
return output
def do_infer(self, input_x, axis, valid_dtype=mstype.number_type):
""" return meta infos of input parameters """
axis_v = axis['value']
input_shp = input_x['shape']
args = {'input_x': input_x['dtype']}
validator.check_tensors_dtypes_same_and_valid(args, valid_dtype, self.name)
if not isinstance(axis['dtype'], mstype.tensor_type) and axis_v is None:
raise ValueError(f"For '{self.name}', the 'axis' cannot be None, but got {axis}.")
if -1 in input_shp:
if axis_v is None:
max_v = max(input_shp)
if 'max_shape' and 'min_shape' in input_x:
input_max_shp = input_x['max_shape']
max_v = max(input_max_shp)
axis_shape_list = axis['shape']
if len(axis_shape_list) != 1:
raise ValueError(f"For '{self.name}', the shape of 'axis' must be 1-D, but "
f"got {len(axis_shape_list)}.")
axis_shape = axis_shape_list[0]
if axis_shape == -1 and not self.keep_dims:
out_shape = np.array([-2]).tolist()
output_min_shape = input_x['min_shape']
output_max_shape = input_x['max_shape']
elif not self.keep_dims:
out_shape = -1 * np.ones_like(input_shp[:-axis_shape])
out_shape = out_shape.tolist()
output_min_shape = np.ones_like(out_shape).tolist()
output_max_shape = max_v * | np.ones_like(out_shape) | numpy.ones_like |
import os
import argparse
import pickle
import torch
import numpy as np
from src.models.api import EvaluationModel, NegSampleGenerator
from torch import nn
from src.models.ensemble.model_selector import ModelSelector, generate_lookup
from src.models.rotate import RotatE
from src.models.conve import ConveEvaluationModel
import mlflow
import mlflow.pytorch
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def parse_args(args=None):
parser = argparse.ArgumentParser(
description='Load trained model and use it for predictions'
)
parser.add_argument('-m', '--model', type=str, default=None)
parser.add_argument('-d', '--data', type=str, default=None)
parser.add_argument('-o', '--output_path', type=str, default=None)
parser.add_argument('-p', '--prediction_only', type=bool, default=False)
return parser.parse_args(args)
def load_data(data_path):
path_train = os.path.join(data_path, 'train.pickle')
with open(path_train, 'rb') as handle:
train = pickle.load(handle)
path_valid = os.path.join(data_path, 'valid.pickle')
with open(path_valid, 'rb') as handle:
valid = pickle.load(handle)
path_test = os.path.join(data_path, 'test.pickle')
with open(path_test, 'rb') as handle:
test = pickle.load(handle)
return train, valid, test
def main(args):
"""
Load trained model and use it for predictions.
"""
if args.model is None or args.data is None:
raise ValueError('You have to specify model and data input paths.')
# load data
train_triples, valid_triples, test_triples = load_data(args.data)
# create model and load already trained embeddings
all_true_triples = np.concatenate([train_triples, valid_triples,
test_triples], axis=0)
neg_sample_generator = NegSampleGenerator(all_true_triples,
create_filter_bias=True)
# get all model structures(wo.parameters yet)
# Paths might need to be changed, so all models should be in a directory
# With that we still need only 1 arg
# Second element is the model name
models = dict()
num_samples = 80
batch_size = 4
epsilon = 2.0
hidden_dim = 1000
rotate_gamma = nn.Parameter(
torch.Tensor([9.0]),
requires_grad=False
)
embedding_range = nn.Parameter(
torch.Tensor([(rotate_gamma.item() + epsilon) / hidden_dim]),
requires_grad=False
)
# Loading convE
mlflow.set_tracking_uri('http://10.195.1.54')
conve_model = mlflow.pytorch.load_model('sftp://[email protected]/sftpuser/mlruns/2/0278ec00cc7b47eda553db7c4f66120e/artifacts/models/conve-model-43') # noqa
device = torch.device('cuda')
conve_model.device = device
tensor_triples = torch.tensor(all_true_triples,
dtype=torch.long,
device=device)
all_entities = torch.arange(conve_model.num_entities,
dtype=torch.long,
device=device)
models['ConvE'] = ConveEvaluationModel(conve_model, tensor_triples,
all_entities, device='cuda')
models['TransE'] = EvaluationModel(model_class=TransE(transe_gamma),
neg_sample_generator=neg_sample_generator)
models['RotatE'] = EvaluationModel(model_class=RotatE(embedding_range, rotate_gamma),
neg_sample_generator=neg_sample_generator)
models['DistMult'] = EvaluationModel(model_class=DistMult(),
neg_sample_generator=neg_sample_generator)
models['RESCAL'] = EvaluationModel(model_class=RESCAL(batch_size=batch_size),
neg_sample_generator=neg_sample_generator)
# Loading DistMult
distmult_path = os.path.join(args.model, 'DistMult')
path = os.path.join(distmult_path, 'entity_embedding.npy')
new_entity_embedding = nn.Parameter(torch.from_numpy(np.load(path)))
path = os.path.join(distmult_path, 'relation_embedding.npy')
new_relation_embedding = nn.Parameter(torch.from_numpy( | np.load(path) | numpy.load |
# Copyright (c) 2020 <NAME>
from baselines.common import Dataset, explained_variance, fmt_row, zipsame
from baselines import logger
import baselines.common.tf_util as U
import tensorflow as tf, numpy as np
import time
from baselines.common.mpi_adam import MpiAdam
from baselines.common.mpi_moments import mpi_moments
from mpi4py import MPI
from collections import deque
import pdb
import os
import shutil
from scipy import spatial
import gym
def traj_segment_generator(pi, env, horizon, stochastic, num_options,saves,results,rewbuffer,dc):
# sample state action pairs, i.e. sample rollouts on the real system
max_action = env.action_space.high
t = 0
glob_count = 0
glob_count_thresh = -1
ac = env.action_space.sample() # not used, just so we have the datatype
new = True # marks if we're on first timestep of an episode
ob = env.reset()
ob_env_shape = np.shape(ob)
ac_env_shape = np.shape(ac)
ac = pi.reset_last_act().eval()
ob = | np.concatenate((ob,ac)) | numpy.concatenate |
"""plotting functionality using matplotlib."""
import numpy as np
from scipy.spatial.distance import squareform
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.ticker
def imagelabels(images, ax=None, axis="y", nrow=1, zoom=0.1, offset=20, linewidth=None):
"""draw images at each tick along the axis ('x', 'y', or ['x','y'] of axis handle
ax, potentially arranging the images into multiple rows. images is a dict where each
key is a potential xtick and the value is an image matrix.
Parameters
----------
images : dict, required, default: 1
dict where each key is a tick and each value is an image matrix.
ax : matplotlib.Axes handle, optional, default: None
axis to draw on
axis : str, optional, default: 'y'
axis direction to add labels to (lists are supported e.g. ['x','y'])
nrow : int, optional, default: 1
number of rows or columns to stack images along.
zoom : float, optional, default: .1
matplotlib.offsetbox.OffsetImage argument. As proportion of actual image size in
figure, so a bit of tweaking is usually required to achieve something that looks
reasonable.
offset : float, optional, default: 20
offset for each additional row of images (barring the first, which gets
offset*.25). As zoom, this will require tuning.
linewidth: float, optional, default: None
width of connecting line. If 0, no line is drawn. If None (the default), the
line width is set by the current major tick line width *.5 (why .5? Don't know,
but it seems to work).
If printing with 'bbox_inches'='tight' (or equivalently using matplotlib inline in a
notebook), the annotations get cropped. This is a known Matplotlib issue (see
https://github.com/matplotlib/matplotlib/issues/12699). Workaround this by passing
bbox_extra_artists=[a.offsetbox for a in ab] when saving."""
if ax is None:
ax = plt.gca()
axis = list(axis)
ab = []
for thisax in axis:
axhand = getattr(ax, thisax + 'axis')
if linewidth is None:
linewidth = .5 * axhand.get_majorticklines()[0].properties()['linewidth']
if linewidth > 0:
# we replace the existing ticks
axhand.set_tick_params(length=0)
targetticks = list(set(axhand.get_ticklocs()).intersection(images.keys()))
targetticks.sort()
# start on the back row to reduce overdrawing of lines
currentoffset = offset * (nrow+0.2)
for thisrow in range(nrow):
thistarget = targetticks[thisrow::nrow]
currentoffset -= offset
for key in thistarget:
posarg = dict(xy = (0, key),
xybox = (-currentoffset, 0),
xycoords = ("axes fraction", "data"),
box_alignment = (1, .5))
# map to correct order for axis
if thisax.lower() == 'x':
posarg = {k: tuple(list(v)[::-1]) for k,v in posarg.items()}
imbox = matplotlib.offsetbox.OffsetImage(images[key], zoom=zoom,
cmap='gray')
imbox.image.axes = ax
ab.append(matplotlib.offsetbox.AnnotationBbox(imbox, boxcoords="offset points",
bboxprops=dict(edgecolor='w', facecolor='w'),
arrowprops=dict(linewidth=linewidth,
arrowstyle='-'),
pad=0.,
annotation_clip=False,
**posarg))
artist = ax.add_artist(ab[-1])
return ab
class SpecialTickLocator(matplotlib.ticker.LinearLocator):
"""LinearLocator sub-class to support manually inserting extra ticks as indicated
by special_values. Otherwise identical functionality to LinearLocator.
Example usage:
ax_cb.yaxis.set_major_locator(SpecialTickLocator(numticks=2, special_values=0.))"""
def __init__(self, special_values=[], **kwarg):
super().__init__(**kwarg)
self.special_values = np.asarray(special_values).flatten()
def tick_values(self, vmin, vmax):
# post filter
ticklocs = super().tick_values(vmin, vmax)
if self.special_values.size > 0:
ticklocs = np.concatenate((ticklocs, self.special_values))
return ticklocs
class ImageTickLocator(SpecialTickLocator):
"""SpecialTickLocator sub-class to support image plots, where the center of mass
is offset (typically by 0.5) from the plot limit, and where only integer ticks make
sense. Otherwise identical functionality to SpecialTickLocator.
Example usage:
ax_plt.xaxis.set_major_locator(ImageTickLocator(numticks=5))
"""
def __init__(self, **kwarg):
super().__init__(**kwarg)
def rescore(self, x, tickfun):
x = np.asarray(tickfun(x))
# fix ugly rounding error
x[x == -0.] = 0.
if x.size == 1:
x = float(x)
return x
def tick_values(self, vmin, vmax):
# we need to prefilter the method to place the ticks away from lims
if vmax < vmin:
vmin, vmax = vmax, vmin
vmin = self.rescore(vmin, np.ceil)
vmax = self.rescore(vmax, np.floor)
ticklocs = super().tick_values(vmin, vmax)
# now any non-lim values can just be rounded
ticklocs[1:-1] = self.rescore(ticklocs[1:-1], np.round)
return ticklocs
def plotdim(npanel, maxcol=12, mode="square"):
"""work out a rectangular plot grid that achieves a total of npanel. maxcol
limits how many columns we allow. mode can be 'square' (keep approximately
equal nrow and ncol), 'cols' (as few rows as possible), 'rows' (as few
columns as possible)."""
# here's what we know: the most rows we'll consider is the square case
maxrow = int(np.ceil(np.sqrt(npanel)))
# and the max number of columns is the min of npanel and some sensible max
maxcol = min(maxcol, npanel)
# so all the possible pairings would be
row, col = np.meshgrid( | np.arange(maxrow) | numpy.arange |
# Copyright (c) 2020 CNES
#
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import unittest
import os
import netCDF4
import numpy as np
import pyinterp
import pyinterp.fill
class Fill(unittest.TestCase):
GRID = os.path.join(os.path.dirname(os.path.abspath(__file__)), "dataset",
"mss.nc")
@classmethod
def _load(cls, cube=False):
ds = netCDF4.Dataset(cls.GRID)
x_axis = pyinterp.Axis(ds.variables["lon"][::5], is_circle=True)
y_axis = pyinterp.Axis(ds.variables["lat"][::5])
mss = ds.variables["mss"][::5, ::5].T
mss[mss.mask] = float("nan")
if cube:
z_axis = pyinterp.Axis(np.arange(2))
mss = np.stack([mss.data] * len(z_axis)).transpose(1, 2, 0)
return pyinterp.grid.Grid3D(x_axis, y_axis, z_axis, mss)
return pyinterp.grid.Grid2D(x_axis, y_axis, mss.data)
def test_loess(self):
grid = self._load()
filled0 = pyinterp.fill.loess(grid, num_threads=0)
filled1 = pyinterp.fill.loess(grid, num_threads=1)
data = np.copy(grid.array)
data[np.isnan(data)] = 0
filled0[ | np.isnan(filled0) | numpy.isnan |
"""
Created on Mon Jun 24 10:52:25 2019
Reads a wav file with SDR IQ capture of FM stations located in :
https://mega.nz/#F!3UUUnSiD!WLhWZ3ff4f4Pi7Ko_zcodQ
Also: https://drive.google.com/open?id=1itb_ePcPeDRXrVBIVL-1Y3wrt8yvpW28
Also generates IQ stream sampled at 2.4Msps to simulate a similar spectrum
sinusoids, this might be useful in an early stage to use a known signal.
@author: f.divruno
"""
#!/usr/bin/env python3
import wave
import numpy as np
import matplotlib.pyplot as plt
# ------------ PARAMETERS
N = 5000 #number of samples to read
nAverages = 10 # number of averages
#folder = "C:\\Users\\F.Divruno\\Downloads\\" # change this to your folder.
#filename = "17-22-08_89100kHz.wav"
folder = "FM_station_data/"
filename = "17-22-08_89100kHz.wav"
CenterFrequency = 89100e3 # Centre freq of the recording is the number at the end of the filename.
# ------------
#Read an IQ recording of FM stations:
wav_in = wave.open(folder+ filename, "r")
sampleFreq = 2.4e6 # sample freq of the SDR to acquire this signals
timeMax = N/sampleFreq # duration of the loaded signals
t = np.linspace(0,timeMax,N)
# Read the file
I = np.zeros(N)
Q = np.zeros(N)
for n in range(N):
aux = wav_in.readframes(1)
I[n] = aux[0]
Q[n] = aux[1]
# Plot the spectrum of the recording
I_fft = np.fft.fftshift(np.fft.fft(I))
Q_fft = np.fft.fftshift(np.fft.fft(Q))
V = abs(I_fft-1j*Q_fft)
freq = np.fft.fftshift( | np.fft.fftfreq(N,d=1/sampleFreq) | numpy.fft.fftfreq |
"""
This module provides functions for using Orca models for various
types of the predictions. This is the main module that you need for
interacting with Orca models.
To use any of the prediction functions, `load_resources` has to be
called first to load the necessary resources.
The coordinates used in Orca are 0-based, inclusive for the start
coordinate and exclusive for the end coordinate, consistent with
python conventions.
"""
import os
import pathlib
import numpy as np
import torch
from scipy.stats import spearmanr
from selene_utils2 import MemmapGenome, Genomic2DFeatures
import selene_sdk
from selene_sdk.sequences import Genome
from orca_models import H1esc, Hff, H1esc_1M, Hff_1M, H1esc_256M, Hff_256M
from orca_utils import (
genomeplot,
genomeplot_256Mb,
StructuralChange2,
process_anno,
coord_round,
coord_clip,
)
ORCA_PATH = str(pathlib.Path(__file__).parent.absolute())
model_dict_global, target_dict_global = {}, {}
def load_resources(models=["32M"], use_cuda=True, use_memmapgenome=True):
"""
Load resources for Orca predictions including the specified
Orca models and hg38 reference genome. It also creates Genomic2DFeatures
objects for experimental micro-C datasets (for comparison with prediction).
Load resourced are accessible as global variables.
The list of globl variables generated is here:
Global Variables
----------------
hg38 : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
If `use_memmapgenome==True` and the resource file for hg38 mmap exists,
use MemmapGenome instead of Genome.
h1esc : orca_models.H1esc
1-32Mb Orca H1-ESC model
hff : orca_models.Hff
1-32Mb Orca HFF model
h1esc_256m : orca_models.H1esc_256M
32-256Mb Orca H1-ESC model
hff_256m : orca_models.Hff_256M
32-256Mb Orca HFF model
h1esc_1m : orca_models.H1esc_1M
1Mb Orca H1-ESC model
hff_1m : orca_models.Hff_1M
1Mb Orca HFF model
target_h1esc : selene_utils2.Genomic2DFeatures
Genomic2DFeatures object that load H1-ESC micro-C dataset 4DNFI9GMP2J8
at 4kb resolution, used for comparison with 1-32Mb models.
target_hff : selene_utils2.Genomic2DFeatures
Genomic2DFeatures object that load HFF micro-C dataset 4DNFI643OYP9
at 4kb resolution, used for comparison with 1-32Mb models.
target_h1esc_256m : selene_utils2.Genomic2DFeatures
Genomic2DFeatures object that load H1-ESC micro-C dataset 4DNFI9GMP2J8
at 32kb resolution, used for comparison with 32-256Mb models.
target_hff_256m : selene_utils2.Genomic2DFeatures
Genomic2DFeatures object that load HFF micro-C dataset 4DNFI643OYP9
at 32kb resolution, used for comparison with 32-256Mb models.
target_h1esc_1m : selene_utils2.Genomic2DFeatures
Genomic2DFeatures object that load H1-ESC micro-C dataset 4DNFI9GMP2J8
at 32kb resolution, used for comparison with 1Mb models.
target_hff_1m : selene_utils2.Genomic2DFeatures
Genomic2DFeatures object that load HFF micro-C dataset 4DNFI643OYP9
at 1kb resolution, used for comparison with 1Mb models.
target_available : bool
Indicate whether the micro-C dataset resource file is available.
Parameters
----------
models : list(str)
List of model types to load, supported model types includes
"32M", "256M", "1M", corresponding to 1-32Mb, 32-256Mb, and 1Mb
models. Lower cases are also accepted.
use_cuda : bool, optional
Default is True. If true, loaded models are moved to GPU.
use_memmapgenome : bool, optional
Default is True. If True and the resource file for hg38 mmap exists,
use MemmapGenome instead of Genome.
"""
global hg38, target_hff, target_h1esc, target_hff_256m, target_h1esc_256m, target_hff_1m, target_h1esc_1m, target_available
if "32M" in models or "32m" in models:
global h1esc, hff
h1esc = H1esc()
h1esc.eval()
hff = Hff()
hff.eval()
if use_cuda:
h1esc.cuda()
hff.cuda()
else:
h1esc.cpu()
hff.cpu()
model_dict_global["h1esc"] = h1esc
model_dict_global["hff"] = hff
if "1M" in models or "1m" in models:
global h1esc_1m, hff_1m
h1esc_1m = H1esc_1M()
h1esc_1m.eval()
hff_1m = Hff_1M()
hff_1m.eval()
if use_cuda:
h1esc_1m.cuda()
hff_1m.cuda()
else:
h1esc_1m.cpu()
hff_1m.cpu()
model_dict_global["h1esc_1m"] = h1esc_1m
model_dict_global["hff_1m"] = hff_1m
if "256M" in models or "256m" in models:
global h1esc_256m, hff_256m
h1esc_256m = H1esc_256M()
h1esc_256m.eval()
hff_256m = Hff_256M()
hff_256m.eval()
if use_cuda:
h1esc_256m.cuda()
hff_256m.cuda()
else:
h1esc_256m.cpu()
hff_256m.cpu()
model_dict_global["h1esc_256m"] = h1esc_256m
model_dict_global["hff_256m"] = hff_256m
if (
use_memmapgenome
and pathlib.Path("/resources/Homo_sapiens.GRCh38.dna.primary_assembly.fa.mmap").exists()
):
hg38 = MemmapGenome(
input_path=ORCA_PATH + "/resources/Homo_sapiens.GRCh38.dna.primary_assembly.fa",
memmapfile=ORCA_PATH + "/resources/Homo_sapiens.GRCh38.dna.primary_assembly.fa.mmap",
)
else:
hg38 = Genome(
input_path=ORCA_PATH + "/resources/Homo_sapiens.GRCh38.dna.primary_assembly.fa",
)
target_available = True
if os.path.exists(ORCA_PATH + "/resources/4DNFI643OYP9.rebinned.mcool"):
target_hff = Genomic2DFeatures(
[ORCA_PATH + "/resources/4DNFI643OYP9.rebinned.mcool::/resolutions/4000"],
["r4000"],
(8000, 8000),
cg=True,
)
target_hff_256m = Genomic2DFeatures(
[ORCA_PATH + "/resources/4DNFI643OYP9.rebinned.mcool::/resolutions/32000"],
["r32000"],
(8000, 8000),
cg=True,
)
target_hff_1m = Genomic2DFeatures(
[ORCA_PATH + "/resources/4DNFI643OYP9.rebinned.mcool::/resolutions/1000"],
["r1000"],
(8000, 8000),
cg=True,
)
target_dict_global['hff'] = target_hff
target_dict_global['hff_256m'] = target_hff_256m
target_dict_global['hff_1m'] = target_hff_1m
else:
target_available = False
if os.path.exists(ORCA_PATH + "/resources/4DNFI9GMP2J8.rebinned.mcool"):
target_h1esc = Genomic2DFeatures(
[ORCA_PATH + "/resources/4DNFI9GMP2J8.rebinned.mcool::/resolutions/4000"],
["r4000"],
(8000, 8000),
cg=True,
)
target_h1esc_256m = Genomic2DFeatures(
[ORCA_PATH + "/resources/4DNFI9GMP2J8.rebinned.mcool::/resolutions/32000"],
["r32000"],
(8000, 8000),
cg=True,
)
target_h1esc_1m = Genomic2DFeatures(
[ORCA_PATH + "/resources/4DNFI9GMP2J8.rebinned.mcool::/resolutions/1000"],
["r1000"],
(8000, 8000),
cg=True,
)
target_dict_global['h1esc'] = target_h1esc
target_dict_global['h1esc_256m'] = target_h1esc_256m
target_dict_global['h1esc_1m'] = target_h1esc_1m
else:
target_available = False
def genomepredict(
sequence, mchr, mpos=-1, wpos=-1, models=["h1esc", "hff"], targets=None, annotation=None, use_cuda=True, nan_thresh=1,
):
"""Multiscale prediction for a 32Mb sequence
input, zooming into the position specified when generating a series
of 32Mb, 16Mb, 8Mb, 4Mb, 2Mb and 1Mb predictions with increasing
resolutions (up to 4kb). This function also processes
information used only for plotting including targets and annotation.
For larger sequence and interchromosomal predictions, you can use
256Mb input with genomepredict_256Mb.
Parameters
----------
sequence : numpy.ndarray
One-hot sequence encoding of shape 1 x 4 x 32000000.
The encoding can be generated with `selene_sdk.Genome.sequence_to_encoding()`.
mchr : str
Chromosome name. This is used for annotation purpose only.
mpos : int, optional
The coordinate to zoom into for multiscale prediction.
wpos : int, optional
The coordinate of the center position of the sequence, which is
start position + 16000000.
models : list(torch.nn.Module or str), optional
Models to use. Default is H1-ESC and HFF Orca models.
targets : list(numpy.ndarray), optional
The observed balanced contact matrices from the
32Mb region. Used only for plotting when used with genomeplot. The length and
order of the list of targets should match the models specified (default is
H1-ESC and HFF Orca models).
The dimensions of the arrays should be 8000 x 8000 (1kb resolution).
annotation : str or None, optional
List of annotations for plotting. The annotation can be generated with
See orca_utils.process_anno and see its documentation for more details.
use_cuda : bool, optional
Default is True. If False, use CPU.
nan_thresh : int, optional
Default is 1. Specify the threshold of the proportion of NaNs values
allowed during downsampling for the observed matrices. Only relevant for plotting.
The lower resolution observed matrix value are computed by averaging multiple
bins into one. By default, we allow missing values and only average over the
non-missing values, and the values with more than the specified proprotion
of missing values will be filled with NaN.
Returns
----------
output : dict
Result dictionary that can be used as input for genomeplot. The dictionary
has the following keys:
- predictions : list(list(numpy.ndarray), list(numpy.ndarray))
Multi-level predictions for H1-ESC and HFF cell types.
- experiments : list(list(numpy.ndarray), list(numpy.ndarray))
Observations for H1-ESC and HFF cell types that matches the predictions.
Exists if `targets` is specified.
- normmats : list(list(numpy.ndarray), list(numpy.ndarray))
Background distance-based expected balanced contact matrices for
H1-ESC and HFF cell types that matches the predictions.
- start_coords : list(int)
Start coordinates for the prediction at each level.
- end_coords : list(int)
End coordinates for the prediction at each level.
- chr : str
The chromosome name.
- annos : list(list(...))
Annotation information. The format is as outputed by orca_utils.process_anno
Exists if `annotation` is specified.
"""
model_objs = []
for m in models:
if isinstance(m, torch.nn.Module):
model_objs.append(m)
else:
try:
if m in model_dict_global:
model_objs.append(model_dict_global[m])
except KeyError:
load_resources(models=["32M"], use_cuda=use_cuda)
if m in model_dict_global:
model_objs.append(model_dict_global[m])
models = model_objs
n_models = len(models)
with torch.no_grad():
allpreds = []
allstarts = []
if targets:
alltargets = []
if annotation is not None:
allannos = []
for iii, seq in enumerate(
[
torch.FloatTensor(sequence),
torch.FloatTensor(np.ascontiguousarray(sequence[:, ::-1, ::-1])),
]
):
for ii, model in enumerate(models):
if targets and iii == 0:
target = targets[ii]
(encoding1, encoding2, encoding4, encoding8, encoding16, encoding32,) = model.net(
model.net0(torch.Tensor(seq.float()).transpose(1, 2).cuda())
if use_cuda
else model.net0(torch.Tensor(seq.float()).transpose(1, 2))
)
encodings = {
1: encoding1,
2: encoding2,
4: encoding4,
8: encoding8,
16: encoding16,
32: encoding32,
}
def eval_step(level, start, coarse_pred=None):
distenc = torch.log(
torch.FloatTensor(model.normmats[level][None, None, :, :]).cuda()
if use_cuda
else torch.FloatTensor(model.normmats[level][None, None, :, :])
).expand(sequence.shape[0], 1, 250, 250)
if coarse_pred is not None:
if level == 1:
pred = model.denets[level].forward(
encodings[level][
:, :, int(start / level) : int(start / level) + 250
],
distenc,
coarse_pred,
) + model.denet_1_pt.forward(
encodings[level][
:, :, int(start / level) : int(start / level) + 250
]
)
else:
pred = model.denets[level].forward(
encodings[level][
:, :, int(start / level) : int(start / level) + 250
],
distenc,
coarse_pred,
)
else:
pred = model.denets[level].forward(
encodings[level][:, :, int(start / level) : int(start / level) + 250],
distenc,
)
return pred
preds = []
starts = [0]
if targets and iii == 0:
ts = []
if annotation is not None and iii == 0:
annos = []
for j, level in enumerate([32, 16, 8, 4, 2, 1]):
if j == 0:
pred = eval_step(level, starts[j])
else:
pred = eval_step(
level,
starts[j],
preds[j - 1][
:,
:,
start_index : start_index + 125,
start_index : start_index + 125,
],
)
if targets and iii == 0:
target_r = np.nanmean(
np.nanmean(
np.reshape(
target[
:,
starts[j] : starts[j] + 250 * level,
starts[j] : starts[j] + 250 * level,
].numpy(),
(target.shape[0], 250, level, 250, level),
),
axis=4,
),
axis=2,
)
target_nan = np.mean(
np.mean(
np.isnan(
np.reshape(
target[
:,
starts[j] : starts[j] + 250 * level,
starts[j] : starts[j] + 250 * level,
].numpy(),
(target.shape[0], 250, level, 250, level),
)
),
axis=4,
),
axis=2,
)
target_r[target_nan > nan_thresh] = np.nan
target_np = np.log(
(target_r + model.epss[level])
/ (model.normmats[level] + model.epss[level])
)[0, 0:, 0:]
ts.append(target_np)
if annotation is not None and iii == 0:
newstart = starts[j] / 8000.0
newend = (starts[j] + 250 * level) / 8000.0
anno_r = []
for r in annotation:
if len(r) == 3:
if not (r[0] >= newend or r[1] <= newstart):
anno_r.append(
(
np.fmax((r[0] - newstart) / (newend - newstart), 0,),
np.fmin((r[1] - newstart) / (newend - newstart), 1,),
r[2],
)
)
else:
if r[0] >= newstart and r[0] < newend:
anno_r.append(((r[0] - newstart) / (newend - newstart), r[1]))
annos.append(anno_r)
if iii == 0:
start_index = int(
np.clip(
np.floor(
(
(mpos - level * 1000000 / 4)
- (wpos - 16000000 + starts[j] * 4000)
)
/ (4000 * level)
),
0,
125,
)
)
else:
start_index = int(
np.clip(
np.ceil(
(
(wpos + 16000000 - starts[j] * 4000)
- (mpos + level * 1000000 / 4)
)
/ (4000 * level)
),
0,
125,
)
)
starts.append(starts[j] + start_index * level)
preds.append(pred)
allpreds.append(preds)
if iii == 0:
if targets:
alltargets.append(ts)
if annotation is not None:
allannos.append(annos)
allstarts.append(starts[:-1])
output = {}
output["predictions"] = [[] for _ in range(n_models)]
for i in range(n_models):
for j in range(len(allpreds[i])):
if allpreds[i][j].shape[1] == 1:
output["predictions"][i].append(
allpreds[i][j].cpu().detach().numpy()[0, 0, :, :] * 0.5
+ allpreds[i + n_models][j].cpu().detach().numpy()[0, 0, ::-1, ::-1] * 0.5
)
else:
output["predictions"][i].append(
allpreds[i][j].cpu().detach().numpy()[0, :, :, :] * 0.5
+ allpreds[i + n_models][j].cpu().detach().numpy()[0, :, ::-1, ::-1] * 0.5
)
if targets:
output["experiments"] = alltargets
else:
output["experiments"] = None
output["start_coords"] = [wpos - 16000000 + s * 4000 for s in allstarts[0]]
output["end_coords"] = [
int(output["start_coords"][ii] + 32000000 / 2 ** (ii)) for ii in range(6)
]
output["chr"] = mchr
if annotation is not None:
output["annos"] = allannos[0]
else:
output["annos"] = None
output["normmats"] = [
[model.normmats[ii] for ii in [32, 16, 8, 4, 2, 1]] for model in models
]
return output
def genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen,
mpos=-1,
wpos=-1,
models=["h1esc_256m", "hff_256m"],
targets=None,
annotation=None,
padding_chr=None,
use_cuda=True,
nan_thresh=1,
):
"""Multiscale prediction for a 256Mb sequence
input, zooming into the position specified when generating a series
of 256Mb, 128Mb, 64Mb, and 32Mb predictions with increasing
resolutions (up to 128kb). This function also processes
information used only for plotting including targets and annotation.
This function accepts multichromosal input sequence. Thus it needs an
extra input `normmats` to encode the chromosomal information. See documentation
for normmats argument for details.
Parameters
----------
sequence : numpy.ndarray
One-hot sequence encoding of shape 1 x 4 x 256000000.
The encoding can be generated with `selene_sdk.Genome.sequence_to_encoding()`.
mchr : str
The chromosome name of the first chromosome included in the seqeunce.
This is used for annotation purpose only.
normmats : list(numpy.ndarray)
A list of distance-based background matrices for H1-ESC and HFF.The
normmats contains arrays with dimensions 8000 x 8000 (32kb resolution).
Interchromosomal interactions are filled with the expected balanced contact
score for interchromomsal interactions.
chrlen : int
The coordinate of the end of the first chromosome in the input, which is the
chromosome that will be zoomed into.
mpos : int, optional
Default is -1. The coordinate to zoom into for multiscale prediction. If neither
`mpos` nor `wpos` are specified, it zooms into the center of the input by default.
wpos : int, optional
Default is -1. The coordinate of the center position of the sequence, which is
start position + 16000000. If neither `mpos` nor `wpos` are specified, it zooms
into the center of the input by default.
models : list(torch.nn.Module or str), optional
Models to use. Default is H1-ESC(256Mb) and HFF(256Mb) Orca models.
targets : list(numpy.ndarray), optional
The observed balanced contact matrices from the 256Mb sequence.
Used only for plotting when used with genomeplot. The length and
order of the list of targets should match the models specified (default is
H1-ESC and HFF Orca models). The dimensions of the arrays should be
8000 x 8000 (32kb resolution).
annotation : str or None, optional
Default is None. List of annotations for plotting. The annotation can be generated with
See orca_utils.process_anno and see its documentation for more details.
padding_chr : str, None, optional
Default is None. Name of the padding chromosome after the first. Used for annotation
only. TODO: be more flexible in the support for multiple chromosomes.
use_cuda : bool, optional
Default is True. If False, use CPU.
nan_thresh : int, optional
Default is 1. Specify the threshold of the proportion of NaNs values
allowed during downsampling for the observed matrices. Only relevant for plotting.
The lower resolution observed matrix value are computed by averaging multiple
bins into one. By default, we allow missing values and only average over the
non-missing values, and the values with more than the specified proprotion
of missing values will be filled with NaN.
Returns
----------
output : dict
Result dictionary that can be used as input for genomeplot. The dictionary
has the following keys:
- predictions : list(list(numpy.ndarray), list(numpy.ndarray))
Multi-level predictions for H1-ESC and HFF cell types.
- experiments : list(list(numpy.ndarray), list(numpy.ndarray))
Observations for H1-ESC and HFF cell types that matches the predictions.
Exists if `targets` is specified.
- normmats : list(list(numpy.ndarray), list(numpy.ndarray))
Background distance-based expected balanced contact matrices for
H1-ESC and HFF cell types that matches the predictions.
- start_coords : list(int)
Start coordinates for the prediction at each level.
- end_coords : list(int)
End coordinates for the prediction at each level.
- chr : str
The chromosome name.
- annos : list(list(...))
Annotation information. The format is as outputed by orca_utils.process_anno
Exists if `annotation` is specified.
"""
model_objs = []
for m in models:
if isinstance(m, torch.nn.Module):
model_objs.append(m)
else:
try:
if m in model_dict_global:
model_objs.append(model_dict_global[m])
except KeyError:
load_resources(models=["256M"], use_cuda=use_cuda)
if m in model_dict_global:
model_objs.append(model_dict_global[m])
models = model_objs
with torch.no_grad():
allpreds = []
allstarts = []
allnormmats = []
if targets:
alltargets = []
if annotation is not None:
allannos = []
for iii, seq in enumerate(
[
torch.FloatTensor(sequence),
torch.FloatTensor(np.ascontiguousarray(sequence[:, ::-1, ::-1])),
]
):
for ii, model in enumerate(models):
normmat = normmats[ii]
normmat_nan = np.isnan(normmat)
if np.any(normmat_nan):
normmat[normmat_nan] = np.nanmin(normmat[~normmat_nan])
if targets and iii == 0:
target = targets[ii]
(encoding32, encoding64, encoding128, encoding256) = model.net(
model.net1(
model.net0(
torch.Tensor(seq.float()).transpose(1, 2).cuda()
if use_cuda
else torch.Tensor(seq.float()).transpose(1, 2)
)
)[-1]
)
encodings = {
32: encoding32,
64: encoding64,
128: encoding128,
256: encoding256,
}
def eval_step(level, start, coarse_pred=None):
distenc = torch.log(
torch.FloatTensor(ns[level][None, :, :]).cuda()
if use_cuda
else torch.FloatTensor(ns[level][None, :, :])
).expand(sequence.shape[0], 1, 250, 250)
if coarse_pred is not None:
pred = model.denets[level].forward(
encodings[level][
:, :, int(start / (level // 8)) : int(start / (level // 8)) + 250,
],
distenc if iii == 0 else torch.flip(distenc, [2, 3]),
coarse_pred,
)
else:
pred = model.denets[level].forward(
encodings[level][
:, :, int(start / (level // 8)) : int(start / (level // 8)) + 250,
],
distenc if iii == 0 else torch.flip(distenc, [2, 3]),
)
return pred
preds = []
starts = [0]
ns = {}
if targets and iii == 0:
ts = []
if annotation is not None and iii == 0:
annos = []
for j, level in enumerate([256, 128, 64, 32]):
normmat_r = np.nanmean(
np.nanmean(
np.reshape(
normmat[
starts[j] : starts[j] + 250 * level // 8,
starts[j] : starts[j] + 250 * level // 8,
],
(1, 250, level // 8, 250, level // 8),
),
axis=4,
),
axis=2,
)
ns[level] = normmat_r
if j == 0:
pred = eval_step(level, starts[j])
else:
pred = eval_step(
level,
starts[j],
preds[j - 1][
:,
:,
start_index : start_index + 125,
start_index : start_index + 125,
],
)
if targets and iii == 0:
target_r = np.nanmean(
np.nanmean(
np.reshape(
target[
:,
starts[j] : starts[j] + 250 * level // 8,
starts[j] : starts[j] + 250 * level // 8,
].numpy(),
(target.shape[0], 250, level // 8, 250, level // 8),
),
axis=4,
),
axis=2,
)
target_nan = np.mean(
np.mean(
np.isnan(
np.reshape(
target[
:,
starts[j] : starts[j] + 250 * level // 8,
starts[j] : starts[j] + 250 * level // 8,
].numpy(),
(target.shape[0], 250, level // 8, 250, level // 8,),
)
),
axis=4,
),
axis=2,
)
target_r[target_nan > nan_thresh] = np.nan
eps = np.nanmin(normmat_r)
target_np = np.log((target_r + eps) / (normmat_r + eps))[0, 0:, 0:]
ts.append(target_np)
if annotation is not None and iii == 0:
newstart = starts[j] / 8000.0
newend = (starts[j] + 250 * level // 8) / 8000.0
anno_r = []
for r in annotation:
if len(r) == 3:
if not (r[0] >= newend or r[1] <= newstart):
anno_r.append(
(
np.fmax((r[0] - newstart) / (newend - newstart), 0,),
np.fmin((r[1] - newstart) / (newend - newstart), 1,),
r[2],
)
)
else:
if r[0] >= newstart and r[0] < newend:
anno_r.append(((r[0] - newstart) / (newend - newstart), r[1]))
annos.append(anno_r)
if iii == 0:
proposed_start = (mpos - level * 1000000 / 4) - (
wpos - 128000000 + starts[j] * 4000 * 8
)
else:
proposed_start = (mpos - level * 1000000 / 4) - (
wpos + 128000000 - starts[j] * 4000 * 8 - level * 1000000
)
if chrlen is not None:
bounds = [
0 - (wpos - 128000000),
chrlen - level * 1000000 / 2 - (wpos - 128000000),
]
if bounds[0] < bounds[1]:
proposed_start = np.clip(proposed_start, bounds[0], bounds[1])
else:
proposed_start = bounds[0]
start_index = int(np.clip(np.floor(proposed_start / (4000 * level)), 0, 125,))
if iii != 0:
start_index = 250 - (start_index + 125)
starts.append(starts[j] + start_index * level // 8)
preds.append(pred)
allpreds.append(preds)
allnormmats.append(ns)
if iii == 0:
if targets:
alltargets.append(ts)
if annotation is not None:
allannos.append(annos)
allstarts.append(starts[:-1])
output = {}
output["predictions"] = [[] for _ in range(n_models)]
for i in range(n_models):
for j in range(len(allpreds[i])):
if allpreds[i][j].shape[1] == 1:
output["predictions"][i].append(
allpreds[i][j].cpu().detach().numpy()[0, 0, :, :] * 0.5
+ allpreds[i + n_models][j].cpu().detach().numpy()[0, 0, ::-1, ::-1] * 0.5
)
else:
output["predictions"][i].append(
allpreds[i][j].cpu().detach().numpy()[0, :, :, :] * 0.5
+ allpreds[i + n_models][j].cpu().detach().numpy()[0, :, ::-1, ::-1] * 0.5
)
if targets:
output["experiments"] = alltargets
else:
output["experiments"] = None
output["start_coords"] = [wpos - 128000000 + s * 32000 for s in allstarts[0]]
output["end_coords"] = [
np.fmin(int(output["start_coords"][ii] + 256000000 / 2 ** (ii)), chrlen) for ii in range(4)
]
if annotation is not None:
output["annos"] = allannos[0]
else:
output["annos"] = None
output["chr"] = mchr
output["padding_chr"] = padding_chr
output["normmats"] = allnormmats
return output
def _retrieve_multi(regionlist, genome, target=True, normmat=True, normmat_regionlist=None):
sequences = []
for region in regionlist:
if len(region) == 4:
chrom, start, end, strand = region
sequences.append(genome.get_encoding_from_coords(chrom, start, end, strand))
else:
chrom, start, end = region
sequences.append(genome.get_encoding_from_coords(chrom, start, end, "+"))
sequence = np.vstack(sequences)[None, :, :]
if isinstance(target, list):
target_objs = target
has_target = True
elif target and target_available:
target_objs = [target_h1esc_256m, target_hff_256m]
has_target = True
else:
has_target = False
if has_target:
targets = []
for target_obj in target_objs:
targets_ = []
for region in regionlist:
if len(region) == 4:
chrom, start, end, strand = region
else:
chrom, start, end = region
strand = "+"
t = []
for region2 in regionlist:
if len(region2) == 4:
chrom2, start2, end2, strand2 = region2
else:
chrom2, start2, end2 = region2
strand = "+"
t.append(
target_obj.get_feature_data(
chrom, start, end, chrom2=chrom2, start2=start2, end2=end2
)
)
if strand == "-":
t[-1] = t[-1][::-1, :]
if strand2 == "-":
t[-1] = t[-1][:, ::-1]
targets_.append(t)
targets_= np.vstack([np.hstack(l) for l in targets_])
targets.append(targets_)
targets = [
torch.FloatTensor(l[None, :, :]) for l in targets
]
if normmat:
if isinstance(normmat, list):
normmat_objs = normmat
else:
normmat_objs = [h1esc_256m, hff_256m]
if normmat_regionlist is None:
normmat_regionlist = regionlist
normmats = []
for normmat_obj in normmat_objs:
normmats_ = []
for chrom, start, end, strand in normmat_regionlist:
b = []
for chrom2, start2, end2, strand2 in normmat_regionlist:
if chrom2 != chrom:
b.append(
np.full(
(int((end - start) / 32000), int((end2 - start2) / 32000)),
normmat_obj.background_trans,
)
)
else:
binsize = 32000
acoor = np.linspace(start, end, int((end - start) / 32000) + 1)[:-1]
bcoor = np.linspace(start2, end2, int((end2 - start2) / 32000) + 1)[:-1]
b.append(
normmat_obj.background_cis[
(np.abs(acoor[:, None] - bcoor[None, :]) / binsize).astype(int)
]
)
if strand == "-":
b[-1] = b[-1][::-1, :]
if strand2 == "-":
b[-1] = b[-1][:, ::-1]
normmats_.append(b)
normmats_ = np.vstack([np.hstack(l) for l in normmats_])
normmats.append(normmats_)
datatuple = (sequence,)
if normmat:
datatuple = datatuple + (normmats,)
if has_target:
datatuple = datatuple + (targets,)
return datatuple
def process_region(
mchr,
mstart,
mend,
genome,
file=None,
custom_models=None,
target=True,
show_genes=True,
show_tracks=False,
window_radius=16000000,
padding_chr="chr1",
use_cuda=True,
):
"""
Generate multiscale genome interaction predictions for
the specified region.
Parameters
----------
mchr : str
The chromosome name of the first segment
mstart : int
The start coordinate of the region.
mend : ind
The end coordinate of the region.
genome : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
The reference genome object to extract sequence from
custom_models : list(torch.nn.Module or str) or None, optional
Models to use instead of the default H1-ESC and HFF Orca models.
Default is None.
target : list(selene_utils2.Genomic2DFeatures or str) or bool, optional
If specified as list, use this list of targets to retrieve experimental
data (for plotting only). Default is True and will use micro-C data
for H1-ESC and HFF cells (4DNFI9GMP2J8, 4DNFI643OYP9) that correspond
to the default models.
file : str or None, optional
Default is None. The output file prefix.
show_genes : bool, optional
Default is True. If True, generate gene annotation visualization
file in pdf format that matches the windows of multiscale predictions.
show_tracks : bool, optional
Default is False. If True, generate chromatin tracks visualization
file in pdf format that matches the windows of multiscale predictions.
window_radius : int, optional
Default is 16000000. The acceptable values are 16000000 which selects
the 1-32Mb models or 128000000 which selects the 32-256Mb models.
padding_chr : str, optional
Default is "chr1". If window_radius is 128000000, padding is generally
needed to fill the sequence to 256Mb. The padding sequence will be
extracted from the padding_chr.
use_cuda : bool, optional
Default is True. Use CPU if False.
Returns
-------
outputs_ref_l, outputs_ref_r, outputs_alt : dict, dict, dict
Reference allele predictions zooming into the left boundary of the
duplication,
Reference allele predictions zooming into the right boundary of the
duplication,
Alternative allele predictions zooming into the duplication breakpoint.
The returned results are in the format of dictonaries
containing the prediction outputs and other
retrieved information. These dictionaries can be directly used as
input to genomeplot or genomeplot_256Mb. See documentation of `genomepredict` or `genomepredict_256Mb` for
details of the dictionary content.
"""
chrlen = [l for c, l in genome.get_chr_lens() if c == mchr].pop()
mpos = int((int(mstart) + int(mend)) / 2)
if custom_models is None:
if window_radius == 16000000:
models = ["h1esc", "hff"]
elif window_radius == 128000000:
models = ["h1esc_256m", "hff_256m"]
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
else:
models = custom_models
if target:
try:
if target == True:
if window_radius == 16000000:
target = ["h1esc", "hff"]
elif window_radius == 128000000:
target = ["h1esc_256m", "hff_256m"]
target = [t if isinstance(t, Genomic2DFeatures) else target_dict_global[t] for t in target]
except KeyError:
target = False
if window_radius == 16000000:
wpos = coord_clip(mpos, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
mchr, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
)
for t in target
]
else:
targets = None
elif window_radius == 128000000:
chrlen_round = chrlen - chrlen % 32000
wpos = 128000000
if has_target:
sequence, normmats, targets = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
else:
sequence, normmats = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
targets = None
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
if mstart - mend < 2 * window_radius:
anno_scaled = process_anno(
[
[
np.clip(mstart, wpos - window_radius, wpos + window_radius),
np.clip(mend, wpos - window_radius, wpos + window_radius),
"black",
]
],
base=wpos - window_radius,
window_radius=window_radius,
)
else:
anno_scaled = None
if window_radius == 128000000:
outputs_ref = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mpos,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
targets=targets,
use_cuda=use_cuda,
)
else:
outputs_ref = genomepredict(
sequence, mchr, mpos, wpos, annotation=anno_scaled, models=models, targets=targets, use_cuda=use_cuda,
)
if file is not None:
if window_radius == 128000000:
genomeplot_256Mb(
outputs_ref, show_coordinates=True, file=file + ".256m.pdf",
)
else:
genomeplot(
outputs_ref,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".pdf",
)
return outputs_ref
def process_dup(
mchr,
mstart,
mend,
genome,
file=None,
custom_models=None,
target=True,
show_genes=True,
show_tracks=False,
window_radius=16000000,
padding_chr="chr1",
use_cuda=True,
):
"""
Generate multiscale genome interaction predictions for
an duplication variant.
Parameters
----------
mchr : str
The chromosome name of the first segment
mstart : int
The start coordinate of the duplication.
mend : ind
The end coordinate of the duplication.
genome : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
The reference genome object to extract sequence from
custom_models : list(torch.nn.Module or str) or None, optional
Models to use instead of the default H1-ESC and HFF Orca models.
Default is None.
target : list(selene_utils2.Genomic2DFeatures or str) or bool, optional
If specified as list, use this list of targets to retrieve experimental
data (for plotting only). Default is True and will use micro-C data
for H1-ESC and HFF cells (4DNFI9GMP2J8, 4DNFI643OYP9) that correspond
to the default models.
file : str or None, optional
Default is None. The output file prefix.
show_genes : bool, optional
Default is True. If True, generate gene annotation visualization
file in pdf format that matches the windows of multiscale predictions.
show_tracks : bool, optional
Default is False. If True, generate chromatin tracks visualization
file in pdf format that matches the windows of multiscale predictions.
window_radius : int, optional
Default is 16000000. The acceptable values are 16000000 which selects
the 1-32Mb models or 128000000 which selects the 32-256Mb models.
padding_chr : str, optional
Default is "chr1". If window_radius is 128000000, padding is generally
needed to fill the sequence to 256Mb. The padding sequence will be
extracted from the padding_chr.
use_cuda : bool, optional
Default is True. Use CPU if False.
Returns
-------
outputs_ref_l, outputs_ref_r, outputs_alt : dict, dict, dict
Reference allele predictions zooming into the left boundary of the
duplication,
Reference allele predictions zooming into the right boundary of the
duplication,
Alternative allele predictions zooming into the duplication breakpoint.
The returned results are in the format of dictonaries
containing the prediction outputs and other
retrieved information. These dictionaries can be directly used as
input to genomeplot or genomeplot_256Mb. See documentation of `genomepredict` or `genomepredict_256Mb` for
details of the dictionary content.
"""
chrlen = [l for c, l in genome.get_chr_lens() if c == mchr].pop()
if custom_models is None:
if window_radius == 16000000:
models = ["h1esc", "hff"]
elif window_radius == 128000000:
models = ["h1esc_256m", "hff_256m"]
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
else:
models = custom_models
if target:
try:
if target == True:
if window_radius == 16000000:
target = ["h1esc", "hff"]
elif window_radius == 128000000:
target = ["h1esc_256m", "hff_256m"]
target = [t if isinstance(t, Genomic2DFeatures) else target_dict_global[t] for t in target]
except KeyError:
target = False
# ref.l
if window_radius == 16000000:
wpos = coord_clip(mstart, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
mchr, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
elif window_radius == 128000000:
chrlen_round = chrlen - chrlen % 32000
wpos = 128000000
if target:
sequence, normmats, targets = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
else:
sequence, normmats = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
targets = None
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
if wpos + window_radius > mend:
anno_scaled = process_anno(
[[mstart, mend, "black"]], base=wpos - window_radius, window_radius=window_radius
)
else:
anno_scaled = process_anno(
[[mstart, wpos + window_radius, "black"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 128000000:
outputs_ref_l = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mstart,
wpos,
annotation=anno_scaled,
padding_chr=padding_chr,
models=models,
targets=targets,
use_cuda=use_cuda,
)
else:
outputs_ref_l = genomepredict(
sequence,
mchr,
mstart,
wpos,
annotation=anno_scaled,
models=models,
targets=targets,
use_cuda=use_cuda,
)
if file is not None:
if window_radius == 128000000:
genomeplot_256Mb(
outputs_ref_l, show_coordinates=True, file=file + ".ref.l.256m.pdf",
)
else:
genomeplot(
outputs_ref_l,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".ref.l.pdf",
)
# ref.r
if window_radius == 16000000:
wpos = coord_clip(mend, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
mchr, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
if wpos - window_radius < mstart:
anno_scaled = process_anno(
[[mstart, mend, "black"]], base=wpos - window_radius, window_radius=window_radius
)
else:
anno_scaled = process_anno(
[[wpos - window_radius, mend, "black"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 16000000:
outputs_ref_r = genomepredict(
sequence, mchr, mend, wpos, models=models, annotation=anno_scaled, targets=targets, use_cuda=use_cuda,
)
if file is not None:
genomeplot(
outputs_ref_r,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".ref.r.pdf",
)
else:
outputs_ref_r = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mend,
wpos,
annotation=anno_scaled,
padding_chr=padding_chr,
models=models,
targets=targets,
use_cuda=use_cuda,
)
genomeplot_256Mb(
outputs_ref_r, show_coordinates=True, file=file + ".ref.r.256m.pdf",
)
# alt (r)
s = StructuralChange2(mchr, chrlen)
s.duplicate(mstart, mend)
chrlen_alt = chrlen + mend - mstart
if window_radius == 16000000:
wpos = coord_clip(mend, chrlen_alt)
sequence = []
for chrm, start, end, strand in s[wpos - window_radius : wpos + window_radius]:
seq = genome.get_encoding_from_coords(chrm, start, end)
if strand == "-":
seq = seq[None, ::-1, ::-1]
else:
seq = seq[None, :, :]
sequence.append(seq)
sequence = np.concatenate(sequence, axis=1)
else:
chrlen_alt_round = chrlen_alt - chrlen_alt % 32000
if chrlen_alt_round < 256000000:
wpos = 128000000
(sequence, normmats) = _retrieve_multi(
list(s[0:chrlen_alt_round]) + [[padding_chr, 0, 256000000 - chrlen_alt_round, "+"]],
genome,
target=False,
normmat=True,
normmat_regionlist=[
[mchr, 0, chrlen_alt_round, "+"],
[padding_chr, 0, 256000000 - chrlen_alt_round, "+"],
],
)
else:
wpos = coord_clip(mend, chrlen_alt_round, window_radius=128000000)
(sequence, normmats) = _retrieve_multi(
list(s[wpos - window_radius : wpos + window_radius]),
genome,
target=False,
normmat=True,
normmat_regionlist=[[mchr, wpos - window_radius, wpos + window_radius, "+"]],
)
if wpos - window_radius < mstart and mend + mend - mstart < wpos + window_radius:
anno_scaled = process_anno(
[[mstart, mend, "black"], [mend, mend + mend - mstart, "gray"]],
base=wpos - window_radius,
window_radius=window_radius,
)
elif wpos - window_radius >= mstart and mend + mend - mstart < wpos + window_radius:
anno_scaled = process_anno(
[[wpos - window_radius, mend, "black"], [mend, mend + mend - mstart, "gray"],],
base=wpos - window_radius,
window_radius=window_radius,
)
elif wpos - window_radius < mstart and mend + mend - mstart >= wpos + window_radius:
anno_scaled = process_anno(
[[mstart, mend, "black"], [mend, wpos + window_radius, "gray"]],
base=wpos - window_radius,
window_radius=window_radius,
)
else:
anno_scaled = process_anno(
[[wpos - window_radius, mend, "black"], [mend, wpos + window_radius, "gray"],],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 16000000:
outputs_alt = genomepredict(
sequence, mchr, mend, wpos, models=models, annotation=anno_scaled, use_cuda=use_cuda
)
if file is not None:
genomeplot(outputs_alt, show_coordinates=True, file=file + ".alt.pdf")
else:
outputs_alt = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_alt_round,
mend,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_alt, show_coordinates=True, file=file + ".alt.256m.pdf",
)
return outputs_ref_l, outputs_ref_r, outputs_alt
def process_del(
mchr,
mstart,
mend,
genome,
cmap=None,
file=None,
custom_models=None,
target=True,
show_genes=True,
show_tracks=False,
window_radius=16000000,
padding_chr="chr1",
use_cuda=True,
):
"""
Generate multiscale genome interaction predictions for
an deletion variant.
Parameters
----------
mchr : str
The chromosome name of the first segment
mstart : int
The start coordinate of the deletion.
mend : ind
The end coordinate of the deletion.
genome : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
The reference genome object to extract sequence from
custom_models : list(torch.nn.Module or str) or None, optional
Models to use instead of the default H1-ESC and HFF Orca models.
Default is None.
target : list(selene_utils2.Genomic2DFeatures or str) or bool, optional
If specified as list, use this list of targets to retrieve experimental
data (for plotting only). Default is True and will use micro-C data
for H1-ESC and HFF cells (4DNFI9GMP2J8, 4DNFI643OYP9) that correspond
to the default models.
file : str or None, optional
Default is None. The output file prefix.
show_genes : bool, optional
Default is True. If True, generate gene annotation visualization
file in pdf format that matches the windows of multiscale predictions.
show_tracks : bool, optional
Default is False. If True, generate chromatin tracks visualization
file in pdf format that matches the windows of multiscale predictions.
window_radius : int, optional
Default is 16000000. The acceptable values are 16000000 which selects
the 1-32Mb models or 128000000 which selects the 32-256Mb models.
padding_chr : str, optional
Default is "chr1". If window_radius is 128000000, padding is generally
needed to fill the sequence to 256Mb. The padding sequence will be
extracted from the padding_chr.
use_cuda : bool, optional
Default is True. Use CPU if False.
Returns
-------
outputs_ref_l, outputs_ref_r, outputs_alt : dict, dict, dict
Reference allele predictions zooming into the left boundary of the
deletion,
Reference allele predictions zooming into the right boundary of the
deletion,
Alternative allele predictions zooming into the deletion breakpoint.
The returned results are in the format of dictonaries
containing the prediction outputs and other
retrieved information. These dictionaries can be directly used as
input to genomeplot or genomeplot_256Mb. See documentation of `genomepredict` or `genomepredict_256Mb` for
details of the dictionary content.
"""
chrlen = [l for c, l in genome.get_chr_lens() if c == mchr].pop()
if custom_models is None:
if window_radius == 16000000:
models = ["h1esc", "hff"]
elif window_radius == 128000000:
models = ["h1esc_256m", "hff_256m"]
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
else:
models = custom_models
if target:
try:
if target == True:
if window_radius == 16000000:
target = ["h1esc", "hff"]
elif window_radius == 128000000:
target = ["h1esc_256m", "hff_256m"]
target = [t if isinstance(t, Genomic2DFeatures) else target_dict_global[t] for t in target]
except KeyError:
target = False
# ref.l
if window_radius == 16000000:
wpos = coord_clip(mstart, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
mchr, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
elif window_radius == 128000000:
chrlen_round = chrlen - chrlen % 32000
wpos = 128000000
if target:
sequence, normmats, targets = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
else:
sequence, normmats = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
if wpos + window_radius > mend:
anno_scaled = process_anno(
[[mstart, mend, "black"]], base=wpos - window_radius, window_radius=window_radius
)
else:
anno_scaled = process_anno(
[[mstart, wpos + window_radius, "black"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 128000000:
outputs_ref_l = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mstart,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
targets=targets,
use_cuda=use_cuda,
)
else:
outputs_ref_l = genomepredict(
sequence,
mchr,
mstart,
wpos,
models=models,
annotation=anno_scaled,
targets=targets,
use_cuda=use_cuda,
)
if file is not None:
if window_radius == 128000000:
genomeplot_256Mb(
outputs_ref_l, show_coordinates=True, file=file + ".ref.l.256m.pdf",
)
else:
genomeplot(
outputs_ref_l,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
cmap=cmap,
file=file + ".ref.l.pdf",
)
# ref.r
if window_radius == 16000000:
wpos = coord_clip(mend, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
mchr, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
if wpos - window_radius < mstart:
anno_scaled = process_anno(
[[mstart, mend, "black"]], base=wpos - window_radius, window_radius=window_radius
)
else:
anno_scaled = process_anno(
[[wpos - window_radius, mend, "black"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 16000000:
outputs_ref_r = genomepredict(
sequence, mchr, mend, wpos, models=models, annotation=anno_scaled, targets=targets, use_cuda=use_cuda,
)
if file is not None:
genomeplot(
outputs_ref_r,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
cmap=cmap,
file=file + ".ref.r.pdf",
)
else:
outputs_ref_r = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mend,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
targets=targets,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_ref_r, show_coordinates=True, file=file + ".ref.r.256m.pdf",
)
# alt
s = StructuralChange2(mchr, chrlen)
s.delete(mstart, mend)
chrlen_alt = chrlen - (mend - mstart)
if window_radius == 16000000:
wpos = coord_clip(mstart, chrlen_alt)
sequence = []
for chrm, start, end, strand in s[wpos - window_radius : wpos + window_radius]:
seq = genome.get_encoding_from_coords(chrm, start, end)
if strand == "-":
seq = seq[None, ::-1, ::-1]
else:
seq = seq[None, :, :]
sequence.append(seq)
sequence = np.concatenate(sequence, axis=1)
else:
chrlen_alt_round = chrlen_alt - chrlen_alt % 32000
wpos = 128000000
(sequence, normmats) = _retrieve_multi(
list(s[0:chrlen_alt_round]) + [[padding_chr, 0, 256000000 - chrlen_alt_round, "+"]],
genome,
target=False,
normmat=True,
normmat_regionlist=[
[mchr, 0, chrlen_alt_round, "+"],
[padding_chr, 0, 256000000 - chrlen_alt_round, "+"],
],
)
anno_scaled = process_anno(
[[mstart, "double"]], base=wpos - window_radius, window_radius=window_radius
)
if window_radius == 16000000:
outputs_alt = genomepredict(
sequence, mchr, mstart, wpos, models=models, annotation=anno_scaled, use_cuda=use_cuda
)
if file is not None:
genomeplot(outputs_alt, show_coordinates=True, cmap=cmap, file=file + ".alt.pdf")
else:
outputs_alt = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_alt_round,
mstart,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_alt, show_coordinates=True, file=file + ".alt.256m.pdf",
)
return outputs_ref_l, outputs_ref_r, outputs_alt
def process_inv(
mchr,
mstart,
mend,
genome,
file=None,
custom_models=None,
target=True,
show_genes=True,
show_tracks=False,
window_radius=16000000,
padding_chr="chr1",
use_cuda=True,
):
"""
Generate multiscale genome interaction predictions for
an inversion variant.
Parameters
----------
mchr : str
The chromosome name of the first segment
mstart : int
The start coordinate of the inversion.
mend : ind
The end coordinate of the inversion.
genome : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
The reference genome object to extract sequence from
custom_models : list(torch.nn.Module or str) or None, optional
Models to use instead of the default H1-ESC and HFF Orca models.
Default is None.
target : list(selene_utils2.Genomic2DFeatures or str) or bool, optional
If specified as list, use this list of targets to retrieve experimental
data (for plotting only). Default is True and will use micro-C data
for H1-ESC and HFF cells (4DNFI9GMP2J8, 4DNFI643OYP9) that correspond
to the default models.
file : str or None, optional
Default is None. The output file prefix.
show_genes : bool, optional
Default is True. If True, generate gene annotation visualization
file in pdf format that matches the windows of multiscale predictions.
show_tracks : bool, optional
Default is False. If True, generate chromatin tracks visualization
file in pdf format that matches the windows of multiscale predictions.
window_radius : int, optional
Default is 16000000. The acceptable values are 16000000 which selects
the 1-32Mb models or 128000000 which selects the 32-256Mb models.
padding_chr : str, optional
Default is "chr1". If window_radius is 128000000, padding is generally
needed to fill the sequence to 256Mb. The padding sequence will be
extracted from the padding_chr.
use_cuda : bool, optional
Default is True. Use CPU if False.
Returns
-------
outputs_ref_l, outputs_ref_r, outputs_alt_l, outputs_alt_r : dict, dict, dict, dict
Reference allele predictions zooming into the left boundary of the
inversion,
Reference allele predictions zooming into the right boundary of the
inversion,
Alternative allele predictions zooming into the left boundary of
the inversion,
Alternative allele prediction zooming into the right boundary of
the inversion.
The returned results are in the format of dictonaries
containing the prediction outputs and other
retrieved information. These dictionaries can be directly used as
input to genomeplot or genomeplot_256Mb. See documentation of `genomepredict` or `genomepredict_256Mb` for
details of the dictionary content.
"""
chrlen = [l for c, l in genome.get_chr_lens() if c == mchr].pop()
if custom_models is None:
if window_radius == 16000000:
models = ["h1esc", "hff"]
elif window_radius == 128000000:
models = ["h1esc_256m", "hff_256m"]
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
else:
models = custom_models
if target:
try:
if target == True:
if window_radius == 16000000:
target = ["h1esc", "hff"]
elif window_radius == 128000000:
target = ["h1esc_256m", "hff_256m"]
target = [t if isinstance(t, Genomic2DFeatures) else target_dict_global[t] for t in target]
except KeyError:
target = False
if window_radius == 16000000:
wpos = coord_clip(mstart, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
mchr, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
elif window_radius == 128000000:
chrlen_round = chrlen - chrlen % 32000
wpos = 128000000
if target:
sequence, normmats, targets = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
else:
sequence, normmats = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
targets = None
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
if wpos + window_radius > mend:
anno_scaled = process_anno(
[[mstart, mend, "black"]], base=wpos - window_radius, window_radius=window_radius,
)
else:
anno_scaled = process_anno(
[[mstart, wpos + window_radius, "black"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 128000000:
outputs_ref_l = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mstart,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
targets=targets,
use_cuda=use_cuda,
)
else:
outputs_ref_l = genomepredict(
sequence,
mchr,
mstart,
wpos,
models=models,
annotation=anno_scaled,
targets=targets,
use_cuda=use_cuda,
)
if file is not None:
if window_radius == 128000000:
genomeplot_256Mb(
outputs_ref_l, show_coordinates=True, file=file + ".ref.l.256m.pdf",
)
else:
genomeplot(
outputs_ref_l,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".ref.l.pdf",
)
# ref.r
if window_radius == 16000000:
wpos = coord_clip(mend, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
mchr, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
if wpos - window_radius < mstart:
anno_scaled = process_anno(
[[mstart, mend, "black"]], base=wpos - window_radius, window_radius=window_radius,
)
else:
anno_scaled = process_anno(
[[wpos - window_radius, mend, "black"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 16000000:
outputs_ref_r = genomepredict(
sequence, mchr, mend, wpos, models=models, annotation=anno_scaled, targets=targets, use_cuda=use_cuda,
)
if file is not None:
genomeplot(
outputs_ref_r,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".ref.r.pdf",
)
else:
outputs_ref_r = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mend,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
targets=targets,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_ref_r, show_coordinates=True, file=file + ".ref.r.256m.pdf",
)
# alt.l
s = StructuralChange2(mchr, chrlen)
s.invert(mstart, mend)
if window_radius == 16000000:
wpos = coord_clip(mstart, chrlen)
sequence = []
for chrm, start, end, strand in s[wpos - window_radius : wpos + window_radius]:
seq = genome.get_encoding_from_coords(chrm, start, end)
if strand == "-":
seq = seq[None, ::-1, ::-1]
else:
seq = seq[None, :, :]
sequence.append(seq)
sequence = np.concatenate(sequence, axis=1)
else:
wpos = 128000000
(sequence,) = _retrieve_multi(
list(s[0:chrlen_round]) + [[padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=False,
normmat=False,
)
# normmats are not changed for inversion
if mend < wpos + window_radius:
anno_scaled = process_anno(
[[mstart, mend, "gray"]], base=wpos - window_radius, window_radius=window_radius,
)
else:
anno_scaled = process_anno(
[[mstart, wpos + window_radius, "gray"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 16000000:
outputs_alt_l = genomepredict(
sequence, mchr, mstart, wpos, models=models, annotation=anno_scaled, use_cuda=use_cuda
)
if file is not None:
genomeplot(outputs_alt_l, show_coordinates=True, file=file + ".alt.l.pdf")
else:
outputs_alt_l = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mstart,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_alt_l, show_coordinates=True, file=file + ".alt.l.256m.pdf",
)
if window_radius == 16000000:
wpos = coord_clip(mend, chrlen)
sequence = []
for chrm, start, end, strand in s[wpos - window_radius : wpos + window_radius]:
seq = genome.get_encoding_from_coords(chrm, start, end)
if strand == "-":
seq = seq[None, ::-1, ::-1]
else:
seq = seq[None, :, :]
sequence.append(seq)
sequence = np.concatenate(sequence, axis=1)
if mstart > wpos - window_radius:
anno_scaled = process_anno(
[[mstart, mend, "gray"]], base=wpos - window_radius, window_radius=window_radius,
)
else:
anno_scaled = process_anno(
[[wpos - window_radius, mend, "gray"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 16000000:
outputs_alt_r = genomepredict(
sequence, mchr, mend, wpos, models=models, annotation=anno_scaled, use_cuda=use_cuda
)
if file is not None:
genomeplot(outputs_alt_r, show_coordinates=True, file=file + ".alt.r.pdf")
else:
outputs_alt_r = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mend,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_alt_r, show_coordinates=True, file=file + ".alt.r.256m.pdf",
)
return outputs_ref_l, outputs_ref_r, outputs_alt_l, outputs_alt_r
def process_ins(
mchr,
mpos,
ins_seq,
genome,
strand="+",
file=None,
custom_models=None,
target=True,
show_genes=True,
show_tracks=False,
window_radius=16000000,
padding_chr="chr1",
use_cuda=True,
):
"""
Generate multiscale genome interaction predictions for
an insertion variant that inserts the specified sequence
to the insertion site.
Parameters
----------
mchr : str
The chromosome name of the first segment
mpos : int
The insertion site coordinate.
ins_seq : str
The inserted sequence in string format.
genome : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
The reference genome object to extract sequence from
custom_models : list(torch.nn.Module or str) or None, optional
Models to use instead of the default H1-ESC and HFF Orca models.
Default is None.
target : list(selene_utils2.Genomic2DFeatures or str) or bool, optional
If specified as list, use this list of targets to retrieve experimental
data (for plotting only). Default is True and will use micro-C data
for H1-ESC and HFF cells (4DNFI9GMP2J8, 4DNFI643OYP9) that correspond
to the default models.
file : str or None, optional
Default is None. The output file prefix.
show_genes : bool, optional
Default is True. If True, generate gene annotation visualization
file in pdf format that matches the windows of multiscale predictions.
show_tracks : bool, optional
Default is False. If True, generate chromatin tracks visualization
file in pdf format that matches the windows of multiscale predictions.
window_radius : int, optional
Default is 16000000. The acceptable values are 16000000 which selects
the 1-32Mb models or 128000000 which selects the 32-256Mb models.
padding_chr : str, optional
Default is "chr1". If window_radius is 128000000, padding is generally
needed to fill the sequence to 256Mb. The padding sequence will be
extracted from the padding_chr.
use_cuda : bool, optional
Default is True. Use CPU if False.
Returns
-------
outputs_ref, outputs_alt_l, outputs_alt_r : dict, dict, dict
Reference allele predictions zooming into the insertion site,
Alternative allele predictions zooming into the left boundary of
the insertion seqeunce,
Alternative allele prediction zooming into the right boundary of
the insertion seqeunce.
The returned results are in the format of dictonaries
containing the prediction outputs and other
retrieved information. These dictionaries can be directly used as
input to genomeplot or genomeplot_256Mb. See documentation of `genomepredict` or `genomepredict_256Mb` for
details of the dictionary content.
"""
chrlen = [l for c, l in genome.get_chr_lens() if c == mchr].pop()
if custom_models is None:
if window_radius == 16000000:
models = ["h1esc", "hff"]
elif window_radius == 128000000:
models = ["h1esc_256m", "hff_256m"]
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
else:
models = custom_models
if target:
try:
if target == True:
if window_radius == 16000000:
target = ["h1esc", "hff"]
elif window_radius == 128000000:
target = ["h1esc_256m", "hff_256m"]
target = [t if isinstance(t, Genomic2DFeatures) else target_dict_global[t] for t in target]
except KeyError:
target = False
if window_radius == 16000000:
wpos = coord_clip(mpos, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
"chr" + mchr.replace("chr", ""),
coord_round(wpos - window_radius),
coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
elif window_radius == 128000000:
chrlen_round = chrlen - chrlen % 32000
wpos = 128000000
if target:
sequence, normmats, targets = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
else:
sequence, normmats = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
targets = None
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
anno_scaled = process_anno(
[[mpos, "single"]], base=wpos - window_radius, window_radius=window_radius
)
if window_radius == 128000000:
outputs_ref_l = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mpos,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
targets=targets,
use_cuda=use_cuda,
)
else:
outputs_ref = genomepredict(
sequence, mchr, mpos, wpos, annotation=anno_scaled, models=models, targets=targets, use_cuda=use_cuda,
)
if file is not None:
if window_radius == 128000000:
genomeplot_256Mb(
outputs_ref_l, show_coordinates=True, file=file + ".ref.256m.pdf",
)
else:
genomeplot(
outputs_ref,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".ref.pdf",
)
# alt
s = StructuralChange2(mchr, chrlen)
s.insert(mpos, len(ins_seq), strand=strand)
chrlen_alt = chrlen + len(ins_seq)
if window_radius == 16000000:
wpos = coord_clip(mpos, chrlen_alt)
sequence = []
for chr_name, start, end, strand in s[wpos - window_radius : wpos + window_radius]:
if chr_name.startswith("ins"):
seq = Genome.sequence_to_encoding(ins_seq[start:end])
else:
seq = genome.get_encoding_from_coords(chr_name, start, end)
if strand == "-":
seq = seq[None, ::-1, ::-1]
else:
seq = seq[None, :, :]
sequence.append(seq)
sequence = np.concatenate(sequence, axis=1)
else:
chrlen_alt_round = chrlen_alt - chrlen_alt % 32000
if chrlen_alt_round < 256000000:
wpos = 128000000
(sequence, normmats) = _retrieve_multi(
list(s[0:chrlen_alt_round]) + [[padding_chr, 0, 256000000 - chrlen_alt_round, "+"]],
genome,
target=False,
normmat=True,
normmat_regionlist=[
[mchr, 0, chrlen_alt_round, "+"],
[padding_chr, 0, 256000000 - chrlen_alt_round, "+"],
],
)
else:
wpos = coord_clip(mpos, chrlen_alt_round, window_radius=128000000)
(sequence, normmats) = _retrieve_multi(
list(s[wpos - window_radius : wpos + window_radius]),
genome,
target=False,
normmat=True,
normmat_regionlist=[[mchr, wpos - window_radius, wpos + window_radius, "+"]],
)
if mpos + len(ins_seq) < wpos + window_radius:
anno_scaled = process_anno(
[[mpos, mpos + len(ins_seq), "gray"]],
base=wpos - window_radius,
window_radius=window_radius,
)
else:
anno_scaled = process_anno(
[[mpos, wpos + window_radius, "gray"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 16000000:
outputs_alt_l = genomepredict(
sequence, mchr, mpos, wpos, models=models, annotation=anno_scaled, use_cuda=use_cuda
)
if file is not None:
genomeplot(outputs_alt_l, show_coordinates=True, file=file + ".alt.l.pdf")
else:
outputs_alt_l = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_alt_round,
mpos,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_alt_l, show_coordinates=True, file=file + ".alt.l.256m.pdf",
)
if window_radius == 16000000:
wpos = coord_clip(mpos + len(ins_seq), chrlen_alt)
sequence = []
for chr_name, start, end, strand in s[wpos - window_radius : wpos + window_radius]:
if chr_name.startswith("ins"):
seq = Genome.sequence_to_encoding(ins_seq[start:end])
else:
seq = genome.get_encoding_from_coords(chr_name, start, end)
if strand == "-":
seq = seq[None, ::-1, ::-1]
else:
seq = seq[None, :, :]
sequence.append(seq)
sequence = np.concatenate(sequence, axis=1)
else:
if chrlen_alt_round > 256000000:
wpos = coord_clip(mpos + len(ins_seq), chrlen_alt_round, window_radius=128000000)
(sequence, normmats) = _retrieve_multi(
list(s[wpos - window_radius : wpos + window_radius]),
genome,
target=False,
normmat=True,
normmat_regionlist=[[mchr, wpos - window_radius, wpos + window_radius, "+"]],
)
if mpos > wpos + window_radius:
anno_scaled = process_anno(
[[mpos, mpos + len(ins_seq), "gray"]],
base=wpos - window_radius,
window_radius=window_radius,
)
else:
anno_scaled = process_anno(
[[wpos - window_radius, mpos + len(ins_seq), "gray"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 16000000:
outputs_alt_r = genomepredict(
sequence, mchr, mpos + len(ins_seq), wpos, annotation=anno_scaled, use_cuda=use_cuda
)
if file is not None:
genomeplot(outputs_alt_r, show_coordinates=True, file=file + ".alt.r.pdf")
else:
outputs_alt = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_alt_round,
mpos + len(ins_seq),
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_alt, show_coordinates=True, file=file + ".alt.r.256m.pdf",
)
return outputs_ref, outputs_alt_l, outputs_alt_r
def process_custom(
region_list,
ref_region_list,
mpos,
genome,
ref_mpos_list=None,
anno_list=None,
ref_anno_list=None,
custom_models=None,
target=True,
file=None,
show_genes=True,
show_tracks=False,
window_radius=16000000,
use_cuda=True,
):
"""
Generate multiscale genome interaction predictions for
a custom variant by an ordered list of genomic segments.
Parameters
----------
region_list : list(list(...))
List of segments to complete the alternative. Each segment is specified
by a list( chr: str, start: int, end: int, strand: str), and segments
are concatenated together in the given order. The total length
should sum up to 32Mb. An example input is
[['chr5', 89411065, 89411065+16000000, '-'], ['chr7', 94378248, 94378248+16000000,'+']].
ref_region_list : list(list(...))
The reference regions to predict. This can be any reference regions with
the length of the specified window size. If the Each reference region is specified
with a list( chr: str, start: int, end: int, strand: str). The strand must
be '+'. The intended use is predicting the genome interactions for each
segment that constitute the alternative allele within the native
reference sequence context. An example
input is [['chr5', 89411065-16000000, 89411065+16000000,'+'],
['chr7', 94378248-16000000, 94378248+16000000,'+']].
mpos : int
The position to zoom into in the alternative allele. Note that `mpos`
here specify the relative position with respect to the to start of the 32Mb.
genome : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
The reference genome object to extract sequence from.
ref_mpos_list : list(int) or None, optional
Default is None. List of positions to zoom into for each of the
reference regions specified in `ref_region_list`. If not specified,
then zoom into the center of each region. Note that `ref_mpos_list`
specifies the relative positions with respect to start of the 32Mb.
For example, `16000000` means the center of the sequence.
custom_models : list(torch.nn.Module or str) or None, optional
Models to use instead of the default H1-ESC and HFF Orca models.
Default is None.
target : list(selene_utils2.Genomic2DFeatures or str) or bool, optional
If specified as list, use this list of targets to retrieve experimental
data (for plotting only). Default is True and will use micro-C data
for H1-ESC and HFF cells (4DNFI9GMP2J8, 4DNFI643OYP9) that correspond
to the default models.
file : str or None, optional
Default is None. The output file prefix.
show_genes : bool, optional
Default is True. If True, generate gene annotation visualization
file in pdf format that matches the windows of multiscale predictions.
show_tracks : bool, optional
Default is False. If True, generate chromatin tracks visualization
file in pdf format that matches the windows of multiscale predictions.
window_radius : int, optional
Default is 16000000. Currently only 16000000 (32Mb window) is accepted.
use_cuda : bool, optional
Default is True. Use CPU if False.
Returns
-------
outputs_ref_l, outputs_ref_r, outputs_alt : dict, dict, dict
Reference allele predictions zooming into the left boundary of the
duplication,
Reference allele predictions zooming into the right boundary of the
duplication,
Alternative allele predictions zooming into the duplication breakpoint.
The returned results are in the format of dictonaries
containing the prediction outputs and other
retrieved information. These dictionaries can be directly used as
input to genomeplot or genomeplot_256Mb. See documentation of `genomepredict` or `genomepredict_256Mb` for
details of the dictionary content.
"""
if custom_models is None:
if window_radius == 16000000:
models = ["h1esc", "hff"]
elif window_radius == 128000000:
models = ["h1esc_256m", "hff_256m"]
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
else:
models = custom_models
if target:
try:
if target == True:
if window_radius == 16000000:
target = ["h1esc", "hff"]
elif window_radius == 128000000:
target = ["h1esc_256m", "hff_256m"]
target = [t if isinstance(t, Genomic2DFeatures) else target_dict_global[t] for t in target]
except KeyError:
target = False
def validate_region_list(region_list, enforce_strand=None):
sumlen = 0
for chrm, start, end, strand in region_list:
chrlen = [l for c, l in genome.get_chr_lens() if c == chrm].pop()
assert start >= 0 and end <= chrlen
sumlen += end - start
if enforce_strand:
if strand != enforce_strand:
raise ValueError("The specified strand must be " + enforce_strand)
assert sumlen == 2 * window_radius
validate_region_list(region_list)
for i, ref_region in enumerate(ref_region_list):
validate_region_list([ref_region], enforce_strand="+")
ref_sequence = genome.get_encoding_from_coords(*ref_region)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
ref_region[0], coord_round(ref_region[1]), coord_round(ref_region[2]),
)[None, :]
) for t in target
]
else:
targets = None
anno_scaled = process_anno(ref_anno_list, base=0, window_radius=window_radius)
outputs_ref = genomepredict(
ref_sequence,
ref_region[0],
ref_region[1] + window_radius if ref_mpos_list is None else ref_mpos_list[i],
ref_region[1] + window_radius,
annotation=anno_scaled,
models=models,
targets=targets,
use_cuda=use_cuda,
)
if file is not None:
genomeplot(
outputs_ref,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".ref." + str(i) + ".pdf",
)
sequence = []
for chrm, start, end, strand in region_list:
seq = genome.get_encoding_from_coords(chrm, start, end)
if strand == "-":
seq = seq[None, ::-1, ::-1].copy()
else:
seq = seq[None, :, :]
sequence.append(seq)
alt_sequence = np.concatenate(sequence, axis=1)
anno_scaled = process_anno(anno_list, base=0, window_radius=window_radius)
outputs_alt = genomepredict(
alt_sequence, "chimeric", mpos, window_radius, models=models, annotation=anno_scaled, use_cuda=use_cuda,
)
if file is not None:
genomeplot(outputs_alt, show_coordinates=False, file=file + ".alt.pdf")
return outputs_ref, outputs_alt
def process_single_breakpoint(
chr1,
pos1,
chr2,
pos2,
orientation1,
orientation2,
genome,
custom_models=None,
target=True,
file=None,
show_genes=True,
show_tracks=False,
window_radius=16000000,
padding_chr="chr1",
use_cuda=True,
):
"""
Generate multiscale genome interaction predictions for
a simple translocation event that connects
two chromosomal breakpoints. Specifically, two breakpoint
positions and the corresponding two orientations are needed.
The orientations decide how the breakpoints are connected.
The ‘+’ or ‘-’ sign indicate whether the left or right side of
the breakpoint is used. For example, for an input
('chr1', 85691449, 'chr5', 89533745 '+', '+'), two plus signs
indicate connecting chr1:0-85691449 with chr5:0-89533745.
Parameters
----------
chr1 : str
The chromosome name of the first segment
pos1 : int
The coorindate of breakpoint on the first segment
chr2 : str
The chromosome name of the second segment
pos2 : int
The coorindate of breakpoint on the second segment
orientation1 : str
Indicate which side of the breakpoint should be used for
the first segment,
'+' indicate the left and '-' indicate the right side.
orientation2 : str
Indicate which side of the breakpoint should be used for
the second segment,
'+' indicate the left and '-' indicate the right side.
genome : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
The reference genome object to extract sequence from
custom_models : list(torch.nn.Module or str) or None, optional
Models to use instead of the default H1-ESC and HFF Orca models.
Default is None.
target : list(selene_utils2.Genomic2DFeatures or str) or bool, optional
If specified as list, use this list of targets to retrieve experimental
data (for plotting only). Default is True and will use micro-C data
for H1-ESC and HFF cells (4DNFI9GMP2J8, 4DNFI643OYP9) that correspond
to the default models.
file : str or None, optional
Default is None. The output file prefix.
show_genes : bool, optional
Default is True. If True, generate gene annotation visualization
file in pdf format that matches the windows of multiscale predictions.
show_tracks : bool, optional
Default is False. If True, generate chromatin tracks visualization
file in pdf format that matches the windows of multiscale predictions.
window_radius : int, optional
Default is 16000000. The acceptable values are 16000000 which selects
the 1-32Mb models or 128000000 which selects the 32-256Mb models.
padding_chr : str, optional
Default is "chr1". If window_radius is 128000000, padding is generally
needed to fill the sequence to 256Mb. The padding sequence will be
extracted from the padding_chr.
use_cuda : bool, optional
Default is True. Use CPU if False.
Returns
-------
outputs_ref_1, outputs_ref_2, outputs_alt : dict, dict, dict
Reference allele predictions zooming into the chr1 breakpoint,
Reference allele predictions zooming into the chr2 breakpoint,
Alternative allele prediction zooming into the junction.
The returned results are in the format of dictonaries
containing the prediction outputs and other
retrieved information. These dictionaries can be directly used as
input to genomeplot or genomeplot_256Mb. See documentation of `genomepredict` or `genomepredict_256Mb` for
details of the dictionary content.
"""
if custom_models is None:
if window_radius == 16000000:
models = ["h1esc", "hff"]
elif window_radius == 128000000:
models = ["h1esc_256m", "hff_256m"]
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
else:
models = custom_models
if target:
try:
if target == True:
if window_radius == 16000000:
target = ["h1esc", "hff"]
elif window_radius == 128000000:
target = ["h1esc_256m", "hff_256m"]
target = [t if isinstance(t, Genomic2DFeatures) else target_dict_global[t] for t in target]
except KeyError:
target = False
chrlen1 = [l for c, l in genome.get_chr_lens() if c == chr1].pop()
# ref.l
if window_radius == 16000000:
wpos = coord_clip(pos1, chrlen1)
sequence = genome.get_encoding_from_coords(
chr1, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
chr1, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
elif window_radius == 128000000:
chrlen1_round = chrlen1 - chrlen1 % 32000
wpos = 128000000
if target:
sequence, normmats, targets = _retrieve_multi(
[[chr1, 0, chrlen1_round, "+"], [padding_chr, 0, 256000000 - chrlen1_round, "+"]],
genome,
target=target,
)
else:
sequence, normmats = _retrieve_multi(
[[chr1, 0, chrlen1_round, "+"], [padding_chr, 0, 256000000 - chrlen1_round, "+"]],
genome,
target=target,
)
targets = None
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
anno_scaled = process_anno(
[[pos1, "single"]], base=wpos - window_radius, window_radius=window_radius
)
if window_radius == 128000000:
outputs_ref_1 = genomepredict_256Mb(
sequence,
chr1,
normmats,
chrlen1_round,
pos1,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
targets=targets,
use_cuda=use_cuda,
)
else:
outputs_ref_1 = genomepredict(
sequence, chr1, pos1, wpos, models=models, annotation=anno_scaled, targets=targets, use_cuda=use_cuda,
)
if file is not None:
if window_radius == 128000000:
genomeplot_256Mb(
outputs_ref_1, show_coordinates=True, file=file + ".ref.1.256m.pdf",
)
else:
genomeplot(
outputs_ref_1,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".ref.1.pdf",
colorbar=True,
)
chrlen2 = [l for c, l in genome.get_chr_lens() if c == chr2].pop()
if window_radius == 16000000:
wpos = coord_clip(pos2, chrlen2)
sequence = genome.get_encoding_from_coords(
chr2, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
chr2, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
elif window_radius == 128000000:
chrlen2_round = chrlen2 - chrlen2 % 32000
wpos = 128000000
if target:
sequence, normmats, targets = _retrieve_multi(
[[chr2, 0, chrlen2_round, "+"], [padding_chr, 0, 256000000 - chrlen2_round, "+"]],
genome,
target=target,
)
else:
sequence, normmats = _retrieve_multi(
[[chr2, 0, chrlen2_round, "+"], [padding_chr, 0, 256000000 - chrlen2_round, "+"]],
genome,
target=target,
)
targets = None
anno_scaled = process_anno(
[[pos2, "single"]], base=wpos - window_radius, window_radius=window_radius
)
if window_radius == 128000000:
outputs_ref_2 = genomepredict_256Mb(
sequence,
chr2,
normmats,
chrlen2_round,
pos2,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
targets=targets,
use_cuda=use_cuda,
)
else:
outputs_ref_2 = genomepredict(
sequence, chr2, pos2, wpos, models=models, annotation=anno_scaled, targets=targets, use_cuda=use_cuda,
)
if file is not None:
if window_radius == 128000000:
genomeplot_256Mb(
outputs_ref_2, show_coordinates=True, file=file + ".ref.2.256m.pdf",
)
else:
genomeplot(
outputs_ref_2,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".ref.2.pdf",
colorbar=True,
)
chrlen = [l for c, l in genome.get_chr_lens() if c == chr1].pop()
s = StructuralChange2(chr1, chrlen)
if orientation1 == "+":
s.delete(pos1, chrlen)
else:
s.delete(0, pos1 - 1)
s.invert(0, chrlen - pos1 + 1)
chrlen = [l for c, l in genome.get_chr_lens() if c == chr2].pop()
s2 = StructuralChange2(chr2, chrlen)
if orientation2 == "-":
s2.delete(0, pos2 - 1)
else:
s2.delete(pos2, chrlen)
s2.invert(0, pos2)
breakpos = s.coord_points[-1]
s = s + s2
if window_radius == 16000000:
wpos = coord_clip(breakpos, s.coord_points[-1])
sequence = []
curpos = 0
anno = []
for chrm, start, end, strand in s[wpos - window_radius : wpos + window_radius]:
seq = genome.get_encoding_from_coords(chrm, start, end)
if strand == "-":
seq = seq[None, ::-1, ::-1]
else:
seq = seq[None, :, :]
sequence.append(seq)
anno.append([curpos, curpos + end - start])
curpos = curpos + end - start
sequence = | np.concatenate(sequence, axis=1) | numpy.concatenate |
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import cameratransform as ct
def random_unit_vector():
norm = 0
while norm < 1E-15:
gauss = np.random.normal(0, 1, 3)
norm = np.linalg.norm(gauss)
return gauss / norm
def nudge(direction, weight):
rows, _ = direction.shape
for row in range(rows):
new_direction = direction[row,:] * (1 - weight) + weight * random_unit_vector()
direction[row,:] = new_direction / np.linalg.norm(new_direction)
return direction
def wrap(coordinate, side):
if np.isscalar(coordinate):
if coordinate >= side:
coordinate -= side
elif coordinate < 0:
coordinate += side
return coordinate
for index in range(len(coordinate)):
if coordinate[index] >= side:
coordinate[index] -= side
elif coordinate[index] < 0:
coordinate[index] += side
return coordinate
def cube_check(xyz, center, radius):
r = None
xyz -= center
if max(abs(xyz)) <= radius:
r = np.linalg.norm(xyz)
return r
class Environment(object):
def __init__(self, cube_side, insect_count:int, show_3D=True):
self.side = cube_side
self.N = insect_count
self.data = np.zeros((self.N, 7))
self.origin = np.ones(3) * self.side / 2
# Give each insect a random speed between 0.25 and 1.25 m/s
self.data[:,0] = | np.random.rand(self.N) | numpy.random.rand |
import numpy as np
from baselines.deepq.experiments.atari.knn_cuda import knn as knn_cuda
# each action -> a lru_knn buffer
class LRU_KNN_UCB_GPU(object):
def __init__(self, capacity, z_dim, env_name, action, mode="mean", num_actions=6,knn=4):
self.action = action
self.knn = knn
self.env_name = env_name
self.capacity = capacity
self.num_actions = num_actions
self.states = np.empty((capacity, z_dim), dtype=np.float32)
self.q_values_decay = np.zeros(capacity)
self.count = np.zeros(capacity)
self.lru = np.zeros(capacity)
self.best_action = np.zeros((capacity, num_actions), dtype=np.int)
self.curr_capacity = 0
self.tm = 0.0
self.addnum = 0
self.buildnum = 256
self.buildnum_max = 256
self.bufpath = './buffer/%s' % self.env_name
self.mode = mode
self.threshold = 1e-2
def peek(self, key, value_decay, action=-1, modify=False):
if self.curr_capacity ==0 :
return None, None, None
dist, ind = knn_cuda.knn(np.transpose(np.array([key])), np.transpose(self.states[:self.curr_capacity]), 1)
dist, ind = np.transpose(dist), np.transpose(ind - 1)
ind = ind[0][0]
# print(dist.shape,ind.shape)
if dist[0][0] < self.threshold:
# print("peek success")
self.lru[ind] = self.tm
self.tm += 0.01
if modify:
if self.mode == "max":
if value_decay > self.q_values_decay[ind]:
self.q_values_decay[ind] = value_decay
if action >= 0:
self.best_action[ind, action] = 1
elif self.mode == "mean":
self.q_values_decay[ind] = (value_decay + self.q_values_decay[ind] * self.count[ind]) / (
self.count[ind] + 1)
self.count[ind] += 1
return self.q_values_decay[ind], self.best_action[ind], self.count[ind]
# print self.states[ind], key
# if prints:
# print("peek", dist[0][0])
return None, None, None
def knn_value(self, key, knn, ):
# knn = min(self.curr_capacity, knn)
if self.curr_capacity < knn:
return 0.0, None, 1.0
dist, ind = knn_cuda.knn(np.transpose(key), np.transpose(self.states[:self.curr_capacity]), knn)
dist, ind = np.transpose(dist), np.transpose(ind - 1)
coeff = np.exp(dist[0])
coeff = coeff / np.sum(coeff)
value = 0.0
action = np.zeros((self.num_actions,))
value_decay = 0.0
count = 0
# print("nearest dist", dist[0][0])
for j, index in enumerate(ind[0]):
value_decay += self.q_values_decay[index] * coeff[j]
count += self.count[index] * coeff[j]
action += self.best_action[index] * coeff[j]
self.lru[index] = self.tm
self.tm += 0.01
q_decay = value_decay
return q_decay, action, count
def act_value(self, key, knn):
# knn = min(self.curr_capacity, knn)
values = []
actions = np.zeros((len(key), self.num_actions))
counts = []
exact_refer = []
if self.curr_capacity < knn:
for i in range(len(key)):
actions[i, self.action] = 1
values.append(0)
counts.append(1)
exact_refer.append(False)
return values, actions, counts, | np.array(exact_refer) | numpy.array |
import tensorflow as tf
import numpy as np
class StochasticPolicyGradientAgent():
"""
A Gaussian Policy Gradient based agent implementation
"""
def __init__(self, env, learning_rate = 0.001, discount_rate = 0.99, batch_size = 1, quiet = True):
self._optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)
self._sess = tf.Session()
self._env = env
self._batch_size = batch_size
self._discount_rate = discount_rate
self._state_buffer = []
self._reward_buffer = []
self._action_buffer = []
self._quiet = quiet
state_dim = np.prod(np.array(env.observation_space.shape))
self._states = tf.placeholder(tf.float32,
shape=(None, state_dim),
name="states")
init = tf.contrib.layers.xavier_initializer()
# neural featurizer parameters
h1 = 256
h2 = 128
h3 = 128
mu_hidden = tf.layers.dense(self._states, h1,
activation = tf.nn.tanh,
name = 'dense_0',
kernel_initializer=init)
mu_hidden_2 = tf.layers.dense(mu_hidden, h2,
activation = tf.nn.tanh,
name = 'dense_1',
kernel_initializer=init)
mu_hidden_3 = tf.layers.dense(mu_hidden_2, h3,
activation = tf.nn.tanh,
name = 'dense_2',
kernel_initializer=init)
self._mu = tf.layers.dense(mu_hidden_3, 1,
activation = tf.tanh,
name = 'mu',
kernel_initializer=init)
self._mu = tf.squeeze(self._mu)
# Building sigma Model
sig_hidden = tf.layers.dense(self._states, h1,
activation = tf.sigmoid,
name = 'sigma_dense_0',
kernel_initializer=init)
sig_hidden_2 = tf.layers.dense(sig_hidden, h2,
activation = tf.sigmoid,
name = 'sig_dense_1',
kernel_initializer=init)
sig_hidden_3 = tf.layers.dense(sig_hidden_2, h3,
activation = tf.sigmoid,
name = 'sig_dense_2',
kernel_initializer=init)
self._sigma = tf.layers.dense(sig_hidden_3, 1,
activation = tf.exp,
name = 'sigma',
kernel_initializer=init)
self._sigma = tf.squeeze(self._sigma)
self._sigma = tf.add(self._sigma, 1e-5)
#Sampling action from distribuition
self._normal_dist = tf.contrib.distributions.Normal(self._mu, self._sigma)
self._action = self._normal_dist.sample()
#Computing loss function
self._discounted_rewards = tf.placeholder(tf.float32, (None, 1), name="discounted_rewards")
self._taken_actions = tf.placeholder(tf.float32, (None, 1), name="taken_actions")
self._loss = -tf.reduce_mean(tf.log(1e-5 + self._normal_dist.prob(self._taken_actions)) * self._discounted_rewards,0)
self._train_op = self._optimizer.minimize(self._loss)
self._sess.run(tf.global_variables_initializer())
def act(self, state):
mu, sigma, action = self._sess.run([self._mu, self._sigma, self._action], feed_dict={
self._states: state})
action = | np.clip(action, self._env.action_space.low[0], self._env.action_space.high[0]) | numpy.clip |
"""
This file contains the code required for IteratedWatersheds
"""
#----------------------------------------------------------------------------------------------#
#--------------------------------------- PRIORITY QUEUE ---------------------------------------#
#----------------------------------------------------------------------------------------------#
import itertools
import heapq
class priorityQueue:
def __init__(self):
self.pq = []
self.entry_finder = {}
self.REMOVED = "REMOVED"
self.counter = itertools.count()
def add_element(self, elt, priority=0):
""" Add an element to the queue
"""
if elt in self.entry_finder.keys():
self.remove_element(elt)
count = next(self.counter)
entry = [priority, count, elt]
self.entry_finder[elt] = entry
heapq.heappush(self.pq, entry)
def remove_element(self, elt):
"""
"""
entry = self.entry_finder[elt]
entry[-1] = self.REMOVED
def pop_element(self):
while self.pq:
priority, count, elt = heapq.heappop(self.pq)
if elt != self.REMOVED:
del self.entry_finder[elt]
return elt
raise KeyError('Cannot pop an element from empty queue')
#-----------------------------------------------------------------------------------------------#
#---------------------------------- IMAGE FORESTING TRANSFORM ----------------------------------#
#-----------------------------------------------------------------------------------------------#
import numpy as np
def _get_cost(a,b,flag='SP_SUM'):
if flag == 'SP_SUM':
return a+b
elif flag == 'SP_MAX':
return max(a,b)
else:
raise Exception('flag should be SP_SUM or SP_MAX but got {}'.format(flag))
def _ift(graph,init_labels,alg_flag='SP_SUM'):
"""Return the image foresting transform for the labels
graph : sparse matrix
The edge weighted graph on which the shortest path must be calculated
init_labels : ndarray
Initial Labelling. 0 indicates unlabelled pixels.
"""
size = graph.shape[0]
indices, indptr, data = graph.indices, graph.indptr, graph.data
# Initialization - Labels and Cost
labelling = np.array(init_labels)
cost = np.inf*np.ones(size, dtype=np.int32)
cost[init_labels > 0] = 0
pq = priorityQueue()
for i in np.where(init_labels > 0)[0]:
pq.add_element(i,0)
while pq.pq:
try:
x = pq.pop_element()
except:
break
for i in range(indptr[x],indptr[x+1]):
y = indices[i]
c_prime = _get_cost(cost[x],data[i],alg_flag) # New cost
if c_prime < cost[y]:
cost[y] = c_prime
pq.add_element(y,priority=c_prime)
labelling[y] = labelling[x]
assert np.all(labelling > 0), "Some labellings are still 0. Check if the graph is connected!!"
return labelling, np.sum(cost)
#-----------------------------------------------------------------------------------------------#
#-------------------------------------- CALCULATE CENTERS --------------------------------------#
#-----------------------------------------------------------------------------------------------#
from scipy.sparse.csgraph import floyd_warshall
def _calc_centers(graph, X, labelling, method='nearest'):
"""Return the new centers
graph : sparse matrix
Indicates the graph constructed from X
X : ndarray
Original Data
labelling: 1d array
The labelling of the vertices
method : one of 'nearest', 'floyd_warshall', 'erosion'
Method to calculate the new centers
"""
size = graph.shape[0]
centers = np.zeros(size)
max_label = int(np.max(labelling))
for label in range(1, max_label+1):
index_vert = np.where(labelling == label)[0]
if method == 'floyd_warshall':
subgraph = ((graph[index_vert]).transpose())[index_vert]
FW = floyd_warshall(subgraph, directed=False)
ind_center = np.argmin(np.max(FW, axis=-1))
centers[index_vert[ind_center]] = label
elif method == 'nearest':
mean_subgraph = np.mean(X[index_vert,:], axis=0, keepdims=True)
dist_from_mean = np.sum((X[index_vert,:] - mean_subgraph)**2, axis = -1)
ind_center = np.argmin(dist_from_mean.flatten())
centers[index_vert[ind_center]] = label
else:
raise Exception("Only use floyd_warshall or nearest methods (for now)")
return centers
#------------------------------------------------------------------------------------------------#
#-------------------------------------- ITERATED WATERSHED --------------------------------------#
#------------------------------------------------------------------------------------------------#
import numpy as np
def iterated_watershed(graph, X, number_clusters=6, max_iterations=100):
"""
"""
size = graph.shape[0]
#Initialize Random Centers
centers = np.zeros(size, dtype=np.int32)
index_centers = np.random.choice(size,number_clusters,replace=False)
centers[index_centers] = np.arange(number_clusters) + 1
#Cost
cost_history = []
opt_cost = np.inf
opt_labels = None
opt_centers = None
for i in range(max_iterations):
# Label all the vertices
labels, cost_arr = _ift(graph,centers)
# Update the optimal cost
if cost_arr < opt_cost:
opt_labels = labels
opt_cost = cost_arr
opt_centers = centers
# Compute the cost and append it to the history
cost_history.append(cost_arr)
# Compute the new centers
centersNew = _calc_centers(graph, X, labels)
# Break if the centers did not change!
if np.all(centers==centersNew):
break
else:
centers=centersNew
return opt_labels, cost_history, opt_centers
#-------------------------------------------------------------------------------------#
#------------------------------- MAKE GRAPH UNDIRECTED -------------------------------#
#-------------------------------------------------------------------------------------#
import scipy as sp
def make_undirected(G):
"""This function takes the graph and returns the undirected version.
"""
u,v,w = sp.sparse.find(G)
edges = dict()
for i in range(u.shape[0]):
edges[(u[i],v[i])] = w[i]
edges[(v[i],u[i])] = w[i]
sizeNew = len(edges)
uNew = np.zeros(sizeNew, dtype=np.int32)
vNew = np.zeros(sizeNew, dtype=np.int32)
wNew = np.zeros(sizeNew, dtype=np.float64)
i = 0
for ((u,v),w) in edges.items():
uNew[i], vNew[i], wNew[i] = u, v, w
i += 1
assert i == sizeNew, "Something went wrong"
return sp.sparse.csr_matrix((wNew,(uNew,vNew)), shape=G.shape)
#-----------------------------------------------------------------------------------------------#
#------------------------------------ CONSTRUCT 4-ADJ GRAPH ------------------------------------#
#-----------------------------------------------------------------------------------------------#
from scipy.sparse import csr_matrix
def img_to_graph(img, beta=1., eps=1e-6, which='similarity'):
"""
"""
s0, s1, s2 = img.shape
xGrid, yGrid = np.meshgrid(np.arange(s0), np.arange(s1))
indGrid = (xGrid*s1 + yGrid).transpose()
data_vert = np.sum((img[:-1,:,:] - img[1:,:,:])**2, axis = -1).flatten()
row_vert = indGrid[:-1,:].flatten()
col_vert = indGrid[1:,:].flatten()
data_horiz = np.sum((img[:,:-1,:] - img[:,1:,:])**2, axis = -1).flatten()
row_horiz = indGrid[:,:-1].flatten()
col_horiz = indGrid[:,1:].flatten()
data = np.concatenate((data_vert, data_horiz))
row = np.concatenate((row_vert, row_horiz))
col = np.concatenate((col_vert, col_horiz))
if which == 'similarity':
# Make the data into similarities
data = np.exp(-beta*data/data.std()) + eps
elif which == 'dissimilarity':
data += eps
else:
raise Exception("Should be one of similarity or dissimilarity.")
graph = csr_matrix((data,(row, col)), shape = (s0*s1, s0*s1))
graph = make_undirected(graph)
return graph
#-------------------------------------------------------------------------------------------------#
#----------------------------------------- GENERATE DATA -----------------------------------------#
#-------------------------------------------------------------------------------------------------#
from PIL import Image
import numpy as np
import os
def generate_data_1Object(number_images=10**6):
"""Generate data from weizman 1-Object dataset
"""
list_names = list(filter(lambda x:(x[0] != '.') and (x[-3:] != "mat"), os.listdir("./Workstation_files/1obj")))
np.random.shuffle(list_names)
total_count = len(list_names)
for i in range(min(total_count, number_images)):
fname = list_names[i]
img = np.array(Image.open("./Workstation_files/1obj/"+fname+"/src_color/"+fname+".png"), dtype=np.float64)
img = img/255.
list_gt_fname = list(filter(lambda x: x[0] != '.', os.listdir("./Workstation_files/1obj/"+fname+"/human_seg/")))
gt = []
for gt_name in list_gt_fname:
tmp = np.array(Image.open("./Workstation_files/1obj/"+fname+"/human_seg/"+gt_name), dtype=np.int32)
z = np.zeros(tmp.shape[:2], dtype=np.int32)
z[np.where(tmp[:,:,0]/255. == 1)] = 1
gt.append(z)
yield img, gt, fname
def generate_data_2Object(number_images=10**6):
"""Generate data from weizman 2-Object dataset
"""
list_names = list(filter(lambda x: (x[0] != '.') and (x[-3:] != "mat"), os.listdir("./Workstation_files/2obj")))
np.random.shuffle(list_names)
total_count = len(list_names)
for i in range(min(total_count, number_images)):
fname = list_names[i]
img = np.array(Image.open("./Workstation_files/2obj/"+fname+"/src_color/"+fname+".png"), dtype=np.float64)
img = img/255.
list_gt_fname = list(filter(lambda x: x[0] != '.', os.listdir("./Workstation_files/2obj/"+fname+"/human_seg/")))
gt = []
for gt_name in list_gt_fname:
tmp = np.array(Image.open("./Workstation_files/2obj/"+fname+"/human_seg/"+gt_name), dtype=np.int32)
z = np.zeros(tmp.shape[:2], dtype=np.int32)
z[np.where(tmp[:,:,0]/255. == 1)] = 1
z[np.where(tmp[:,:,2]/255. == 1)] = 2
gt.append(z)
yield img, gt, fname
#-------------------------------------------------------------------------------------------------#
#---------------------------------------- EVAULATE OUTPUT ----------------------------------------#
#-------------------------------------------------------------------------------------------------#
from sklearn.metrics import adjusted_mutual_info_score
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster.supervised import _comb2
def evaluate_output(ypred, list_gt):
"""
"""
list_AMI, list_ARI, list_fScore, list_acc = [], [], [], []
for gt in list_gt:
ytrue = gt.flatten()
ypred = ypred.flatten()
AMI = adjusted_mutual_info_score(ytrue, ypred)
list_AMI.append(AMI)
ARI = adjusted_rand_score(ytrue, ypred)
list_ARI.append(ARI)
# Get the contigency matrix
contingency = contingency_matrix(ytrue, ypred)
# F-Score :
TP = sum(_comb2(n_ij) for n_ij in contingency.flatten())
total_positive_pred = sum(_comb2(n_c) for n_c in np.ravel(contingency.sum(axis=1)))
total_positive_true = sum(_comb2(n_c) for n_c in np.ravel(contingency.sum(axis=0)))
precision, recall = TP/total_positive_pred, TP/total_positive_true
f_score = 2*precision*recall/(precision + recall)
list_fScore.append(f_score)
# Assume that the class of a predicted label is the class with highest intersection
accuracy = np.sum(np.max(contingency, axis=0))/np.sum(contingency)
list_acc.append(accuracy)
return np.max(list_AMI), np.max(list_ARI), np.max(list_fScore), np.max(list_acc)
#-------------------------------------------------------------------------------------------------#
#-------------------------------------- SPECTRAL CLUSTERING --------------------------------------#
#-------------------------------------------------------------------------------------------------#
from scipy.sparse import csr_matrix
from sklearn.cluster import k_means
from scipy.sparse.csgraph import connected_components, laplacian
from scipy.sparse.linalg import eigsh
import scipy as sp
from scipy import sparse
from sklearn.cluster import spectral_clustering as _spectral_clustering
def spectral_clustering(graph, n_clusters, beta_weight=1., eps_weight=1e-6):
"""
"""
graphTmp = csr_matrix(graph, copy=True)
graphTmp.data = np.exp(-beta_weight*graphTmp.data/graphTmp.data.std()) + eps_weight
L = laplacian(graphTmp, normed=True)
eigval, embed = eigsh(L, 6, sigma = 1e-10)
d0, labels, d2 = k_means(embed,6, n_init=10)
return labels
#--------------------------------------------------------------------------------------------------#
#----------------------------------- ISOPERIMETRIC PARTITIONING -----------------------------------#
#--------------------------------------------------------------------------------------------------#
from IsoperimetricPartitioning import recursive_iso_parition, isoperimetric_Full
"""
isoperimetric_Full(img_graph, ground=0)
recursive_iso_parition(img_graph, algCode='full')
"""
def isoperimetric_partitioning(graph, beta_weight=1., eps_weight=1e-6, which='full'):
"""
"""
graphTmp = csr_matrix(graph, copy=True)
graphTmp.data = np.exp(-beta_weight*graphTmp.data/graphTmp.data.std()) + eps_weight
seed = 0
if which == 'full':
labels, isoSolution = isoperimetric_Full(graphTmp, ground=seed)
elif which == 'recursive':
labels = recursive_iso_parition(graphTmp, algCode='full')
return labels
#--------------------------------------------------------------------------------------------------#
#-------------------------------------- K-MEANS PARTITIONING --------------------------------------#
#--------------------------------------------------------------------------------------------------#
from sklearn.cluster import KMeans
def kmeans_adapted(img, n_clusters):
"""
"""
s0, s1, s2 = img.shape
X = img.reshape((s0*s1, s2))
xgrid, ygrid = np.meshgrid(np.arange(s0), np.arange(s1))
xgrid, ygrid = xgrid.transpose(), ygrid.transpose()
xgrid, ygrid = (xgrid.flatten()).reshape((-1,1)), (ygrid.flatten()).reshape((-1,1))
grid = np.hstack((xgrid, ygrid))
grid = grid/np.max(grid)
X = np.hstack((X, grid))
clf = KMeans(n_clusters=n_clusters)
labels = clf.fit_predict(X)
return labels
#---------------------------------------------------------------------------------------------------#
#-------------------------------------- GET ROAD NETWORK DATA --------------------------------------#
#---------------------------------------------------------------------------------------------------#
import pandas as pd
import numpy as np
import networkx as nx
import scipy as sp
def get_road_network_data(city='Mumbai'):
"""
"""
data = pd.read_csv("./RoadNetwork/"+city+"/"+city+"_Edgelist.csv")
size = data.shape[0]
X = np.array(data[['XCoord','YCoord']])
u, v = np.array(data['START_NODE'], dtype=np.int32), np.array(data['END_NODE'], dtype=np.int32)
w = np.array(data['LENGTH'], dtype=np.float64)
w = w/np.max(w) + 1e-6
G = sp.sparse.csr_matrix((w, (u,v)), shape = (size, size))
n, labels = sp.sparse.csgraph.connected_components(G)
if n == 1:
return G
# If there are more than one connected component, return the largest connected component
count_size_comp = | np.bincount(labels) | numpy.bincount |
import numpy as np
from glob import glob
import os
import json
from neuralparticles.tools.param_helpers import *
from neuralparticles.tools.data_helpers import particle_radius
from neuralparticles.tools.shell_script import *
from neuralparticles.tools.uniio import writeParticlesUni, writeNumpyRaw, readNumpyOBJ, writeNumpyOBJ, readParticlesUni, writeUni
from neuralparticles.tools.particle_grid import ParticleIdxGrid
from neuralparticles.tensorflow.losses.tf_approxmatch import approx_vel, emd_loss
from scipy import optimize
import random
import math
from collections import OrderedDict
import time
import imageio
import keras.backend as K
def _approx_vel(pos, npos, h=0.5, it=1):
"""cost = np.linalg.norm(np.expand_dims(pos, axis=1) - np.expand_dims(npos, axis=0), axis=-1)
idx = optimize.linear_sum_assignment(cost)
vel = np.zeros_like(pos)
vel[idx[0]] = npos[idx[1]] - pos[idx[0]]"""
vel = K.eval(approx_vel(K.constant(np.expand_dims(pos, 0)), K.constant(np.expand_dims(npos, 0))))[0]
dist = np.linalg.norm( | np.expand_dims(pos, axis=0) | numpy.expand_dims |
"""Class for calibrating the color-based red-sequence model.
"""
from __future__ import division, absolute_import, print_function
from past.builtins import xrange
import os
import numpy as np
import fitsio
import time
from scipy.optimize import least_squares
from ..configuration import Configuration
from ..fitters import MedZFitter, RedSequenceFitter, RedSequenceOffDiagonalFitter, CorrectionFitter
from ..redsequence import RedSequenceColorPar
from ..color_background import ColorBackground
from ..galaxy import GalaxyCatalog
from ..catalog import Catalog, Entry
from ..zred_color import ZredColor
from ..utilities import make_nodes, CubicSpline, interpol
class RedSequenceCalibrator(object):
"""
Class for calibrating the color-based red-sequence model.
Requires an input galfile that has the following fields:
z: host cluster redshift
pcol: probability of membership using color/luminosity
p: probability of membership using color/luminosity/radial filter
refmag: total magnitude in the reference band
mag: magnitude array
mag_err: magnitude error array
"""
def __init__(self, conf, galfile):
"""
Instantiate a RedSequenceCalibrator.
Parameters
----------
conf: `str` or `redmapper.Configuration`
Configuration yaml file or configuration object
galfile: `str`
Galaxy file with the required fields
"""
if not isinstance(conf, Configuration):
self.config = Configuration(conf)
else:
self.config = conf
self._galfile = galfile
def run(self, doRaise=True):
"""
Run the red-sequence calibration.
Parameters
----------
doRaise: `bool`, optional
Raise an error if background cannot be computed for any galaxies
Default is True. Can be set to False for certain testing.
"""
gals = GalaxyCatalog.from_galfile(self._galfile)
if self.config.calib_use_pcol:
use, = np.where((gals.z > self.config.zrange[0]) &
(gals.z < self.config.zrange[1]) &
(gals.pcol > self.config.calib_pcut))
else:
use, = np.where((gals.z > self.config.zrange[0]) &
(gals.z < self.config.zrange[1]) &
(gals.p > self.config.calib_pcut))
if use.size == 0:
raise RuntimeError("No good galaxies in %s!" % (self._galfile))
gals = gals[use]
nmag = self.config.nmag
ncol = nmag - 1
# Reference mag nodes for pivot
pivotnodes = make_nodes(self.config.zrange, self.config.calib_pivotmag_nodesize)
# Covmat nodes
covmatnodes = make_nodes(self.config.zrange, self.config.calib_covmat_nodesize)
# correction nodes
corrnodes = make_nodes(self.config.zrange, self.config.calib_corr_nodesize)
# correction slope nodes
corrslopenodes = make_nodes(self.config.zrange, self.config.calib_corr_slope_nodesize)
# volume factor (hard coded)
volnodes = make_nodes(self.config.zrange, 0.01)
# Start building the par dtype
dtype = [('pivotmag_z', 'f4', pivotnodes.size),
('pivotmag', 'f4', pivotnodes.size),
('minrefmag', 'f4', pivotnodes.size),
('maxrefmag', 'f4', pivotnodes.size),
('medcol', 'f4', (pivotnodes.size, ncol)),
('medcol_width', 'f4', (pivotnodes.size, ncol)),
('covmat_z', 'f4', covmatnodes.size),
('sigma', 'f4', (ncol, ncol, covmatnodes.size)),
('covmat_amp', 'f4', (ncol, ncol, covmatnodes.size)),
('covmat_slope', 'f4', (ncol, ncol, covmatnodes.size)),
('corr_z', 'f4', corrnodes.size),
('corr', 'f4', corrnodes.size),
('corr_slope_z', 'f4', corrslopenodes.size),
('corr_slope', 'f4', corrslopenodes.size),
('corr_r', 'f4', corrslopenodes.size),
('corr2', 'f4', corrnodes.size),
('corr2_slope', 'f4', corrslopenodes.size),
('corr2_r', 'f4', corrslopenodes.size),
('volume_factor_z', 'f4', volnodes.size),
('volume_factor', 'f4', volnodes.size)]
# And for each color, make the nodes
node_dict = {}
self.ztag = [None] * ncol
self.ctag = [None] * ncol
self.zstag = [None] * ncol
self.stag = [None] * ncol
for j in xrange(ncol):
self.ztag[j] = 'z%02d' % (j)
self.ctag[j] = 'c%02d' % (j)
self.zstag[j] = 'zs%02d' % (j)
self.stag[j] = 'slope%02d' % (j)
node_dict[self.ztag[j]] = make_nodes(self.config.zrange, self.config.calib_color_nodesizes[j],
maxnode=self.config.calib_color_maxnodes[j])
node_dict[self.zstag[j]] = make_nodes(self.config.zrange, self.config.calib_slope_nodesizes[j],
maxnode=self.config.calib_color_maxnodes[j])
dtype.extend([(self.ztag[j], 'f4', node_dict[self.ztag[j]].size),
(self.ctag[j], 'f4', node_dict[self.ztag[j]].size),
(self.zstag[j], 'f4', node_dict[self.zstag[j]].size),
(self.stag[j], 'f4', node_dict[self.zstag[j]].size)])
# Make the pars ... and fill them with the defaults
self.pars = Entry(np.zeros(1, dtype=dtype))
self.pars.pivotmag_z = pivotnodes
self.pars.covmat_z = covmatnodes
self.pars.corr_z = corrnodes
self.pars.corr_slope_z = corrslopenodes
self.pars.volume_factor_z = volnodes
for j in xrange(ncol):
self.pars._ndarray[self.ztag[j]] = node_dict[self.ztag[j]]
self.pars._ndarray[self.zstag[j]] = node_dict[self.zstag[j]]
# And a special subset of color galaxies
if self.config.calib_use_pcol:
coluse, = np.where(gals.pcol > self.config.calib_color_pcut)
else:
coluse, = np.where(gals.p > self.config.calib_color_pcut)
colgals = gals[coluse]
# And a placeholder zredstr which allows us to do stuff
self.zredstr = RedSequenceColorPar(None, config=self.config)
# And read the color background
self.bkg = ColorBackground(self.config.bkgfile_color)
# And prepare for luptitude corrections
if self.config.b[0] == 0.0:
self.do_lupcorr = False
else:
self.do_lupcorr = True
self.bnmgy = self.config.b * 1e9
self.lupzp = 22.5
# Compute pivotmags
self._calc_pivotmags(colgals)
# Compute median colors
self._calc_medcols(colgals)
# Compute diagonal parameters
self._calc_diagonal_pars(gals, doRaise=doRaise)
# Compute off-diagonal parameters
self._calc_offdiagonal_pars(gals, doRaise=doRaise)
# Compute volume factor
self._calc_volume_factor(self.config.zrange[1])
# Write out the parameter file
self.save_pars(self.config.parfile, clobber=False)
# Compute zreds without corrections
# Later will want this parallelized, I think
self._calc_zreds(gals, do_correction=False)
# Compute correction (mode1)
self._calc_corrections(gals)
# Compute correction (mode2)
self._calc_corrections(gals, mode2=True)
# And re-save the parameter file
self.save_pars(self.config.parfile, clobber=True)
# Recompute zreds with corrections
# Later will want this parallelized, I think
self._calc_zreds(gals, do_correction=True)
# And want to save galaxies and zreds
zredfile = os.path.join(self.config.outpath, os.path.basename(self._galfile.rstrip('.fit') + '_zreds.fit'))
gals.to_fits_file(zredfile)
# Make diagnostic plots
self._make_diagnostic_plots(gals)
def _compute_startvals(self, nodes, z, val, xval=None, err=None, median=False, fit=False, mincomp=3):
"""
Compute the starting fit values using a simple algorithm.
Must select one (and only one) of median=True (median fit) or
fit=True (weighted mean fit).
Parameters
----------
nodes: `np.array`
Float array of redshift nodes
z: `np.array`
Float array of redshifts
val: `np.array`
Float array of values to fit (e.g. refmag, color)
xval: `np.array`, optional
X-axis value for color-magnitude relation if fitting slope.
Usually refmag.
Default is None, which means not fitting a slope.
err: `np.array`, optional
Float array of error on val. Not used if fitting median.
Default is None.
median: `bool`, optional
Perform median fit. Default is False.
fit: `bool`, optional
Perform weighted mean fit. Default is False.
"""
def _linfunc(p, x, y):
return (p[1] + p[0] * x) - y
if (not median and not fit) or (median and fit):
raise RuntimeError("Must select one and only one of median and fit")
if median:
mvals = np.zeros(nodes.size)
scvals = np.zeros(nodes.size)
else:
cvals = np.zeros(nodes.size)
svals = np.zeros(nodes.size)
if err is not None:
if err.size != val.size:
raise ValueError("val and err must be the same length")
# default all to 0.1
evals = np.zeros(nodes.size) + 0.1
else:
evals = None
for i in xrange(nodes.size):
if i == 0:
zlo = nodes[0]
else:
zlo = (nodes[i - 1] + nodes[i]) / 2.
if i == nodes.size - 1:
zhi = nodes[i]
else:
zhi = (nodes[i] + nodes[i + 1]) / 2.
u, = np.where((z > zlo) & (z < zhi))
if u.size < mincomp:
if i > 0:
if median:
mvals[i] = mvals[i - 1]
scvals[i] = scvals[i - 1]
else:
cvals[i] = cvals[i - 1]
svals[i] = svals[i - 1]
if err is not None:
evals[i] = evals[i - 1]
else:
if median:
mvals[i] = np.median(val[u])
scvals[i] = np.median(np.abs(val[u] - mvals[i]))
else:
fit = least_squares(_linfunc, [0.0, 0.0], loss='soft_l1', args=(xval[u], val[u]))
cvals[i] = fit.x[1]
svals[i] = np.clip(fit.x[0], None, 0.0)
if err is not None:
evals[i] = np.median(err[u])
if median:
return mvals, scvals
else:
return cvals, svals, evals
def _compute_single_lupcorr(self, j, cvals, svals, gals, dmags, mags, lups, mind, sign):
"""
Compute the luptitude correction for a single color
Parameters
----------
j: `int`
Color index
cvals: `np.array`
Float array of spline values for color at pivotmag
svals: `np.array`
Float array of slope values
gals: `redmapper.GalaxyCatalog`
Galaxy catalog being fit
dmags: `np.array`
Float array of refmag - pivotmag
mags: `np.array`
2d Float array of true (model) magnitudes
lups: `np.array`
2d Float array of true (model) luptitudes
mind: `int`
magnitude index, currently being worked on.
sign: `int`, -1 or 1
Sign of color; -1 if band is redder than ref_ind,
+1 if band is bluer than ref_ind
Returns
-------
lupcorr: `np.array`
Float array of luptitude color corrections
"""
spl = CubicSpline(self.pars._ndarray[self.ztag[j]], cvals)
cv = spl(gals.z)
spl = CubicSpline(self.pars._ndarray[self.zstag[j]], svals)
sv = spl(gals.z)
mags[:, mind] = mags[:, mind + sign] + sign * (cv + sv * dmags)
flux = 10.**((mags[:, mind] - self.lupzp) / (-2.5))
lups[:, mind] = 2.5 * np.log10(1.0 / self.config.b[mind]) - np.arcsinh(0.5 * flux / self.bnmgy[mind]) / (0.4 * np.log(10.0))
magcol = mags[:, j] - mags[:, j + 1]
lupcol = lups[:, j] - lups[:, j + 1]
lupcorr = lupcol - magcol
return lupcorr
def _calc_pivotmags(self, gals):
"""
Calculate the pivot magnitude parameters.
These are put into self.pars.pivotmag, self.pars.maxrefmag, and
self.pars.minrefmag
Parameters
----------
gals: `redmapper.GalaxyCatalog`
Galaxy catalog with fields required for fit.
"""
self.config.logger.info("Calculating pivot magnitudes...")
# With binning, approximate the positions for starting the fit
pivmags = np.zeros_like(self.pars.pivotmag_z)
for i in xrange(pivmags.size):
pivmags, _ = self._compute_startvals(self.pars.pivotmag_z, gals.z, gals.refmag, median=True)
medfitter = MedZFitter(self.pars.pivotmag_z, gals.z, gals.refmag)
pivmags = medfitter.fit(pivmags)
self.pars.pivotmag = pivmags
# and min and max...
self.pars.minrefmag = self.zredstr.mstar(self.pars.pivotmag_z) - 2.5 * np.log10(30.0)
lval_min = np.clip(self.config.lval_reference - 0.1, 0.001, None)
self.pars.maxrefmag = self.zredstr.mstar(self.pars.pivotmag_z) - 2.5 * np.log10(lval_min)
def _calc_medcols(self, gals):
"""
Calculate the median color spline parameters.
Sets self.pars.medcol, self.pars.medcol_width
Parameters
----------
gals: `redmapper.GalaxyCatalog`
Galaxy catalog with fields required for fit.
"""
self.config.logger.info("Calculating median colors...")
ncol = self.config.nmag - 1
galcolor = gals.galcol
for j in xrange(ncol):
col = galcolor[:, j]
# get the start values
mvals, scvals = self._compute_startvals(self.pars.pivotmag_z, gals.z, col, median=True)
# compute the median
medfitter = MedZFitter(self.pars.pivotmag_z, gals.z, col)
mvals = medfitter.fit(mvals)
# and the scatter
spl = CubicSpline(self.pars.pivotmag_z, mvals)
med = spl(gals.z)
medfitter = MedZFitter(self.pars.pivotmag_z, gals.z, np.abs(col - med))
scvals = medfitter.fit(scvals)
self.pars.medcol[:, j] = mvals
self.pars.medcol_width[:, j] = 1.4826 * scvals
def _calc_diagonal_pars(self, gals, doRaise=True):
"""
Calculate the model parameters and diagonal elements of the covariance
matrix (one color at a time).
Sets self.pars.sigma, self.pars.covmat_amp, self.pars.cXX, self.pars.slopeXX
Parameters
----------
gals: `redmapper.GalaxyCatalog`
Galaxy catalog with fields required for fit.
doRaise: `bool`, optional
Raise if there's a problem with the background? Default is True.
"""
# The main routine to compute the red sequence on the diagonal
ncol = self.config.nmag - 1
galcolor = gals.galcol
galcolor_err = gals.galcol_err
# compute the pivot mags
spl = CubicSpline(self.pars.pivotmag_z, self.pars.pivotmag)
pivotmags = spl(gals.z)
# And set the right probabilities
if self.config.calib_use_pcol:
probs = gals.pcol
else:
probs = gals.p
# Figure out the order of the colors for luptitude corrections
mags = np.zeros((gals.size, self.config.nmag))
if self.do_lupcorr:
col_indices = np.zeros(ncol, dtype=np.int32)
sign_indices = np.zeros(ncol, dtype=np.int32)
mind_indices = np.zeros(ncol, dtype=np.int32)
c=0
for j in xrange(self.config.ref_ind, self.config.nmag):
col_indices[c] = j - 1
sign_indices[c] = -1
mind_indices[c] = j
c += 1
for j in xrange(self.config.ref_ind - 2, -1, -1):
col_indices[c] = j
sign_indices[c] = 1
mind_indices[c] = j
c += 1
lups = np.zeros_like(mags)
mags[:, self.config.ref_ind] = gals.mag[:, self.config.ref_ind]
flux = 10.**((mags[:, self.config.ref_ind] - self.lupzp) / (-2.5))
lups[:, self.config.ref_ind] = 2.5 * np.log10(1.0 / self.config.b[self.config.ref_ind]) - np.arcsinh(0.5 * flux / self.bnmgy[self.config.ref_ind]) / (0.4 * np.log(10.0))
else:
col_indices = np.arange(ncol)
sign_indices = np.ones(ncol, dtype=np.int32)
mind_indices = col_indices
# One color at a time along the diagonal
for c in xrange(ncol):
starttime = time.time()
# The order is given by col_indices, which ensures that we work from the
# reference mag outward
j = col_indices[c]
sign = sign_indices[c]
mind = mind_indices[c]
self.config.logger.info("Working on diagonal for color %d" % (j))
col = galcolor[:, j]
col_err = galcolor_err[:, j]
# Need to go through the _ndarray because ztag and zstag are strings
cvals = np.zeros(self.pars._ndarray[self.ztag[j]].size)
svals = np.zeros(self.pars._ndarray[self.zstag[j]].size)
scvals = np.zeros(self.pars.covmat_z.size) + 0.05
photo_err = np.zeros_like(cvals)
# Calculate median truncation
spl = CubicSpline(self.pars.pivotmag_z, self.pars.medcol[:, j])
med = spl(gals.z)
spl = CubicSpline(self.pars.pivotmag_z, self.pars.medcol_width[:, j])
sc = spl(gals.z)
# What is the maximum scatter in each node?
# This is based on the median fit, which does not include photometric
# error, and should always be larger. This helps regularize the edges
# where things otherwise can run away.
scatter_max = spl(self.pars.covmat_z)
u, = np.where((galcolor[:, j] > (med - self.config.calib_color_nsig * sc)) &
(galcolor[:, j] < (med + self.config.calib_color_nsig * sc)))
trunc = self.config.calib_color_nsig * sc[u]
dmags = gals.refmag - pivotmags
# And the starting values...
# Note that this returns the slope values (svals) at the nodes from the cvals
# but these might not be the same nodes, so we have to approximate
cvals_temp, svals_temp, _ = self._compute_startvals(self.pars._ndarray[self.ztag[j]],
gals.z[u], col[u],
xval=dmags[u],
fit=True, mincomp=5)
cvals[:] = cvals_temp
inds = np.searchsorted(self.pars._ndarray[self.ztag[j]],
self.pars._ndarray[self.zstag[j]])
svals[:] = svals_temp[inds]
# And do the luptitude correction if necessary.
if self.do_lupcorr:
lupcorr = self._compute_single_lupcorr(j, cvals, svals, gals, dmags, mags, lups, mind, sign)
else:
lupcorr = np.zeros(gals.size)
# We fit in stages: first the mean, then the slope, then the scatter,
# and finally all three
rsfitter = RedSequenceFitter(self.pars._ndarray[self.ztag[j]],
gals.z[u], col[u], col_err[u],
dmags=dmags[u],
trunc=trunc,
slope_nodes=self.pars._ndarray[self.zstag[j]],
scatter_nodes=self.pars.covmat_z,
lupcorrs=lupcorr[u],
probs=probs[u],
bkgs=self.bkg.lookup_diagonal(j, col[u], gals.refmag[u], doRaise=doRaise),
scatter_max=scatter_max, use_scatter_prior=True)
# fit the mean
cvals, = rsfitter.fit(cvals, svals, scvals, fit_mean=True)
# Update the lupcorr...
if self.do_lupcorr:
rsfitter._lupcorrs[:] = self._compute_single_lupcorr(j, cvals, svals, gals, dmags, mags, lups, mind, sign)[u]
# fit the slope
svals, = rsfitter.fit(cvals, svals, scvals, fit_slope=True)
# fit the scatter
scvals, = rsfitter.fit(cvals, svals, scvals, fit_scatter=True)
# fit combined
cvals, svals, scvals = rsfitter.fit(cvals, svals, scvals,
fit_mean=True, fit_slope=True, fit_scatter=True)
# Re-fit...
#cvals, svals, scvals = rsfitter.fit(cvals, svals, scvals,
# fit_mean=True, fit_slope=True, fit_scatter=True)
# And record in the parameters
self.pars._ndarray[self.ctag[j]] = cvals
self.pars._ndarray[self.stag[j]] = svals
self.pars.sigma[j, j, :] = scvals
self.pars.covmat_amp[j, j, :] = scvals ** 2.
# And print the time taken
self.config.logger.info('Done in %.2f seconds.' % (time.time() - starttime))
def _calc_offdiagonal_pars(self, gals, doRaise=True):
"""
Calculate the off-diagonal elements of the covariance matrix.
Sets self.pars.sigma, self.pars.covmat_amp (off-diagonal).
Parameters
----------
gals: `redmapper.GalaxyCatalog`
Galaxy catalog with fields required for fit.
doRaise: `bool`, optional
Raise if there's a problem with the background? Default is True.
"""
# The routine to compute the off-diagonal elements
ncol = self.config.nmag - 1
galcolor = gals.galcol
galcolor_err = gals.galcol_err
# compute the pivot mags
spl = CubicSpline(self.pars.pivotmag_z, self.pars.pivotmag)
pivotmags = spl(gals.z)
# And set the right probabilities
if self.config.calib_use_pcol:
probs = gals.pcol
else:
probs = gals.p
# Compute c, slope, and median and width for all galaxies/colors
ci = np.zeros((gals.size, ncol))
si = np.zeros_like(ci)
medci = np.zeros_like(ci)
medwidthi = np.zeros_like(ci)
gsig = np.zeros_like(ci)
for j in xrange(ncol):
spl = CubicSpline(self.pars._ndarray[self.ztag[j]],
self.pars._ndarray[self.ctag[j]])
ci[:, j] = spl(gals.z)
spl = CubicSpline(self.pars._ndarray[self.zstag[j]],
self.pars._ndarray[self.stag[j]])
si[:, j] = spl(gals.z)
spl = CubicSpline(self.pars.pivotmag_z, self.pars.medcol[:, j])
medci[:, j] = spl(gals.z)
spl = CubicSpline(self.pars.pivotmag_z, self.pars.medcol_width[:, j])
medwidthi[:, j] = spl(gals.z)
spl = CubicSpline(self.pars.covmat_z, self.pars.sigma[j, j, :])
gsig[:, j] = spl(gals.z)
if self.do_lupcorr:
mags = np.zeros((gals.size, self.config.nmag))
lups = np.zeros_like(mags)
mags[:, self.config.ref_ind] = gals.refmag
for j in xrange(self.config.ref_ind + 1, self.config.nmag):
mags[:, j] = mags[:, j - 1] - (ci[:, j - 1] + si[:, j - 1] * (gals.refmag - pivotmags))
for j in xrange(self.config.ref_ind - 1, -1, -1):
mags[:, j] = mags[:, j + 1] + (ci[:, j] + si[:, j] * (gals.refmag - pivotmags))
for j in xrange(self.config.nmag):
flux = 10.**((mags[:, j] - self.lupzp) / (-2.5))
lups[:, j] = 2.5 * np.log10(1.0 / self.config.b[j]) - np.arcsinh(0.5 * flux / self.bnmgy[j]) / (0.5 * np.log(10.0))
magcol = mags[:, :-1] - mags[:, 1:]
lupcol = lups[:, :-1] - lups[:, 1:]
lupcorr = lupcol - magcol
else:
lupcorr = np.zeros((gals.size, ncol))
template_col = | np.zeros((gals.size, ncol)) | numpy.zeros |
import warnings
import numpy as np
from scipy.ndimage import map_coordinates
from dipy.reconst.recspeed import le_to_odf, sum_on_blocks_1d
from dipy.reconst.dsi import project_hemisph_bvecs
from scipy.ndimage.filters import laplace,gaussian_laplace
from scipy.ndimage import zoom,generic_laplace,correlate1d
from dipy.core.geometry import sphere2cart,cart2sphere,vec2vec_rotmat
from dipy.tracking.propspeed import map_coordinates_trilinear_iso
from dipy.reconst.odf import OdfModel
###############################################
# MODULE TEMPORARILY DISABLED FOR REFACTORING #
###############################################
import nose
class UnderConstruction(nose.SkipTest):
pass
raise UnderConstruction()
###############################################
class DiffusionNablaModel(OdfModel):
def __init__(self, bvals, gradients, odf_sphere='symmetric362',
half_sphere_grads=False, fast=True):
''' Reconstruct the signal using Diffusion Nabla Imaging
As described in E.Garyfallidis, "Towards an accurate brain
tractograph"tractograph, PhD thesis, 2011.
Parameters
-----------
bvals : array, shape (N,)
gradients : array, shape (N,3) also known as bvecs
odf_sphere : str or tuple, optional
If str, then load sphere of given name using ``get_sphere``.
If tuple, gives (vertices, faces) for sphere.
filter : array, shape(len(vertices),)
default is None (using standard hanning filter for DSI)
half_sphere_grads : boolean Default(False)
in order to create the q-space we use the bvals and gradients.
If the gradients are only one hemisphere then
See also
----------
dipy.reconst.eit.EquatorialInversionModel, dipy.reconst.dti.TensorModel, dipy.reconst.dsi.DiffusionSpectrumModel
'''
#check if bvectors are provided only on a hemisphere
if half_sphere_grads==True:
pass
#bvals=np.append(bvals.copy(),bvals[1:].copy())
#gradients=np.append(gradients.copy(),-gradients[1:].copy(),axis=0)
#data=np.append(data.copy(),data[...,1:].copy(),axis=-1)
#load bvals and bvecs
self.bvals=bvals
gradients[np.isnan(gradients)] = 0.
self.gradients=gradients
#save number of total diffusion volumes
self.dn=self.gradients.shape[0] #data.shape[-1]
odf_vertices, odf_faces = sphere_vf_from(odf_sphere)
self.set_odf_vertices(odf_vertices,None,odf_faces)
self.odfn=odf_vertices.shape[0]
#odf sampling radius
self.radius=np.arange(0,5,.2)
#self.radiusn=len(self.radius)
#self.create_qspace(bvals,gradients,16,8)
#peak threshold
#self.peak_thr=.7
#equatorial zone
self.zone=5.
self.gaussian_weight=0.05
self.fast=fast
if fast==True:
self.evaluate_odf=self.fast_odf
else:
self.evaluate_odf=self.slow_odf
self.precompute()
def precompute(self):
self.radiusn=len(self.radius)
self.create_qspace(self.bvals,self.gradients,17,8)
if self.fast==False:
self.radon_params()
self.precompute_interp_coords()
if self.fast==True:
self.precompute_fast_coords()
self.precompute_equator_indices(self.zone)
self.precompute_angular(self.gaussian_weight)
def precompute_botox(self,smooth,level):
self.botox_smooth=.05
self.botox_level=.3
def precompute_angular(self,smooth):
if smooth==None:
self.E=None
return
self.W=np.dot(self.odf_vertices,self.odf_vertices.T)
self.W=self.W.astype('f8')
E=np.exp(self.W/smooth)
self.E=E/np.sum(E,axis=1)[:,None]
def create_qspace(self,bvals,gradients,size,origin):
bv=bvals
bmin=np.sort(bv)[1]
bv=np.sqrt(bv/bmin)
qtable=np.vstack((bv,bv,bv)).T*gradients
qtable=np.floor(qtable+.5)
self.qtable=qtable
self.q=qtable+origin
self.q=self.q.astype('i8')
self.origin=origin
self.sz=size
def radon_params(self,ang_res=64):
#calculate radon integration parameters
phis=np.linspace(0,2*np.pi,ang_res)[:-1]
planars=[]
for phi in phis:
planars.append(sphere2cart(1,np.pi/2,phi))
planars=np.array(planars)
planarsR=[]
for v in self.odf_vertices:
R=vec2vec_rotmat(np.array([0,0,1]),v)
planarsR.append(np.dot(R,planars.T).T)
self.equators=planarsR
self.equatorn=len(phis)
def slow_odf(self,s):
""" Calculate the orientation distribution function
"""
odf = np.zeros(self.odfn)
Eq=np.zeros((self.sz,self.sz,self.sz))
for i in range(self.dn):
Eq[self.q[i][0],self.q[i][1],self.q[i][2]]=s[i]/np.float(s[0])
LEq=laplace(Eq)
self.Eq=Eq
self.LEq=LEq
LEs=map_coordinates(LEq,self.Xs,order=1)
le_to_odf(odf,LEs,self.radius,self.odfn,self.radiusn,self.equatorn)
return odf
def odfs(self):
return self.ODF
def fast_odf(self,s):
odf = np.zeros(self.odfn)
Eq=np.zeros((self.sz,self.sz,self.sz))
for i in xrange(self.dn):
Eq[self.q[i][0],self.q[i][1],self.q[i][2]]+=s[i]/s[0]
LEq=laplace(Eq)
LEs=map_coordinates(LEq,self.Ys.T,order=1)
LEs=LEs.reshape(self.odfn,self.radiusn)
LEs=LEs*self.radius
LEsum=np.sum(LEs,axis=1)
for i in xrange(self.odfn):
odf[i]=np.sum(LEsum[self.eqinds[i]])/self.eqinds_len[i]
return -odf
def precompute_equator_indices(self,thr=5):
eq_inds=[]
eq_inds_complete=[]
eq_inds_len=np.zeros(self.odfn)
for (i,v) in enumerate(self.odf_vertices):
eq_inds.append([])
for (j,k) in enumerate(self.odf_vertices):
vk=np.clip(np.dot(v,k),-1,1)
angle=np.rad2deg(np.arccos(vk))
if angle < 90 + thr and angle > 90 - thr:
eq_inds[i].append(j)
eq_inds_complete.append(j)
eq_inds_len[i]=len(eq_inds[i])
self.eqinds=eq_inds
self.eqinds_com=np.array(eq_inds_complete)
self.eqinds_len= | np.array(eq_inds_len,dtype='i8') | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 3 08:33:16 2021
@author: athulsun
"""
from mat4py import loadmat
from scipy.signal import filtfilt
import numpy as np
from scipy.interpolate import interp1d,PchipInterpolator
import matplotlib.pyplot as plt
import os
import sys
from dtqpy.src.classes.DTQPy_CLASS_OPTS import *
from dtqpy.src.classes.DTQPy_CLASS_SETUP import *
from dtqpy.src.DTQPy_solve import DTQPy_solve
def f_dtqp_fowt(LinearModels,disturbance):
# load linear models
Chan = LinearModels['Chan']
#breakpoint()
# obtain the size of the arrays
nl = len(Chan)
nx,nx = np.shape(Chan[0]['A'])
nx,nu = np.shape(Chan[0]['B'])
ny = len(LinearModels['OutName'])
OutputName = LinearModels['OutName']
# initialize
Aw = np.zeros((nl,nx,nx))
Bw = np.zeros((nl,nx,nu))
Cw = np.zeros((nl,ny,nx))
Dw = np.zeros((nl,ny,nu))
xw = np.zeros((nx,nl))
uw = np.zeros((nu,nl))
yw = np.zeros((nl,ny))
ws = np.zeros((nl))
# collect
for i in range(nl):
Aw[i,:,:] = np.array(Chan[i]['A'])
Bw[i,:,:] = | np.array(Chan[i]['B']) | numpy.array |
# Auxiliary functions to create the path for the
# results files and help handling the results formats.
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import locale
from sklearn.metrics import mean_squared_error
from sklearn.metrics.pairwise import cosine_similarity
# Standard file paths
def create_global_file_path_variables(results_path):
global local_path_train, local_path_test, \
global_path_train, global_path_test, regression_path, \
feynman_path, analyzed_path
# Local explanation files
local_path_train = f'{results_path}/3.explanation/3.1.local/3.1.1.traindata/'
local_path_test = f'{results_path}/3.explanation/3.1.local/3.1.2.testdata/'
global_path_train = f'{results_path}/3.explanation/3.2.global/3.2.1.traindata/'
global_path_test = f'{results_path}/3.explanation/3.2.global/3.2.2.testdata/'
regression_path = f'{results_path}/2.regression/'
def set_mpl_sns_params(abnt=False):
sns.set_theme(style='white')
if abnt:
plt.rcParams['font.family'] = ['serif']
plt.rcParams['font.serif'] = ['Times New Roman']
# comma decimal separator
locale.setlocale(locale.LC_NUMERIC, "pt_BR.UTF-8")
# Tell matplotlib to use the locale we set above
plt.rcParams['axes.formatter.use_locale'] = True
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
plt.rc('font', size=BIGGER_SIZE) # controls default text sizes
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=BIGGER_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.ion()
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def convert_to_array_of_floats(x):
"""All results are saved as floats or strings representing
an array of floats. This function will convert the strings to
actual arrays, and single values will also be saved as arrays
as a convention. This should be used when loading a result csv
file to properly calculate the metrics.
"""
# Strings will also enter here, but will fail in the all() check
if hasattr(x, '__len__'):
if all(isfloat(xprime) for xprime in x):
return x
if isfloat(x):
return np.array([float(x)])
if isinstance(x, str):
# if it is not an array
if x[0] != '[' and x[-1] != ']':
return np.nan
return np.fromstring(x[1:-1], dtype=np.float64, sep=' ')
return np.nan
# Some quality metrics
def Cosine_similarity(yhat, y):
return cosine_similarity(y, yhat)[0, 0]
def RMSE(yhat, y):
return mean_squared_error(y, yhat, squared=False)
def NMSE(yhat, y):
return mean_squared_error(yhat, y) / | np.var(y) | numpy.var |
'''Module for additional computations required by the model'''
from numpy import (
arange, array, atleast_2d, concatenate, copy, cumprod, diag, isnan, ix_,
ones, shape, sum, where, zeros)
from numpy import int64 as my_int
import pdb
from scipy.sparse import csc_matrix as sparse
from model.imports import NoImportModel
from model.subsystems import subsystem_key
def state_recursor(
states,
no_compartments,
age_class,
b_size,
n_blocks,
con_reps,
c,
x,
depth,
k):
if depth < no_compartments-1:
for x_i in arange(c + 1 - x.sum()):
x[0, depth] = x_i
x[0, depth+1:] = zeros(
(1, no_compartments-depth-1),
dtype=my_int)
states, k = state_recursor(
states,
no_compartments,
age_class,
b_size,
n_blocks,
con_reps,
c,
x,
depth+1,
k)
else:
x[0, -1] = c - sum(x[0, :depth])
for block in arange(n_blocks):
repeat_range = arange(
block * b_size
+ k * con_reps,
block * b_size +
(k + 1) * con_reps)
states[repeat_range, no_compartments*age_class:no_compartments*(age_class+1)] = \
ones(
(con_reps, 1),
dtype=my_int) \
* array(
x,
ndmin=2, dtype=my_int)
k += 1
return states, k
return states, k
def build_states_recursively(
total_size,
no_compartments,
classes_present,
block_size,
num_blocks,
consecutive_repeats,
composition):
states = zeros(
(total_size, no_compartments*len(classes_present)),
dtype=my_int)
for age_class in range(len(classes_present)):
k = 0
states, k = state_recursor(
states,
no_compartments,
age_class,
block_size[age_class],
num_blocks[age_class],
consecutive_repeats[age_class],
composition[classes_present[age_class]],
zeros([1, no_compartments], dtype=my_int),
0,
k)
return states, k
def build_state_matrix(household_spec):
# Number of times you repeat states for each configuration
consecutive_repeats = concatenate((
ones(1, dtype=my_int), cumprod(household_spec.system_sizes[:-1])))
block_size = consecutive_repeats * household_spec.system_sizes
num_blocks = household_spec.total_size // block_size
states, k = build_states_recursively(
household_spec.total_size,
household_spec.no_compartments,
household_spec.class_indexes,
block_size,
num_blocks,
consecutive_repeats,
household_spec.composition)
# Now construct a sparse vector which tells you which row a state appears
# from in the state array
# This loop tells us how many values each column of the state array can
# take
state_sizes = concatenate([
(household_spec.composition[i] + 1)
* ones(household_spec.no_compartments, dtype=my_int)
for i in household_spec.class_indexes]).ravel()
# This vector stores the number of combinations you can get of all
# subsequent elements in the state array, i.e. reverse_prod(i) tells you
# how many arrangements you can get in states(:,i+1:end)
reverse_prod = array([0, *cumprod(state_sizes[:0:-1])])[::-1]
# We can then define index_vector look up the location of a state by
# weighting its elements using reverse_prod - this gives a unique mapping
# from the set of states to the integers. Because lots of combinations
# don't actually appear in the states array, we use a sparse array which
# will be much bigger than we actually require
rows = [
states[k, :].dot(reverse_prod) + states[k, -1]
for k in range(household_spec.total_size)]
if min(rows) < 0:
print(
'Negative row indices found, proportional total',
sum(array(rows) < 0),
'/',
len(rows),
'=',
sum(array(rows) < 0) / len(rows))
index_vector = sparse((
arange(household_spec.total_size),
(rows, [0]*household_spec.total_size)))
return states, reverse_prod, index_vector, rows
def within_household_spread(
composition, model_input):
'''Assuming frequency-dependent homogeneous within-household mixing
composition[i] is the number of individuals in age-class i inside the
household'''
sus = model_input.sus
det = model_input.det
tau = model_input.tau
K_home = model_input.k_home
alpha = model_input.alpha
gamma = model_input.gamma
# Set of individuals actually present here
classes_present = where(composition.ravel() > 0)[0]
K_home = K_home[ix_(classes_present, classes_present)]
sus = sus[classes_present]
det = det[classes_present]
tau = tau[classes_present]
r_home = atleast_2d(diag(sus).dot(K_home))
states, total_size, reverse_prod, index_vector, rows = build_state_matrix(composition, classes_present, 5)
d_pos = 2 + 5 * arange(len(classes_present))
u_pos = 3 + 5 * arange(len(classes_present))
Q_int = sparse((total_size, total_size))
inf_event_row = array([], dtype=my_int)
inf_event_col = array([], dtype=my_int)
inf_event_class = array([], dtype=my_int)
# Add events for each age class
for i in range(len(classes_present)):
s_present = where(states[:, 5*i] > 0)[0]
e_present = where(states[:, 5*i+1] > 0)[0]
d_present = where(states[:, 5*i+2] > 0)[0]
u_present = where(states[:, 5*i+3] > 0)[0]
# First do infection events
inf_to = zeros(len(s_present), dtype=my_int)
inf_rate = zeros(len(s_present))
for k in range(len(s_present)):
old_state = copy(states[s_present[k], :])
inf_rate[k] = old_state[5*i] * (
r_home[i, :].dot(
(old_state[d_pos] / composition[classes_present])
+ (old_state[u_pos] / composition[classes_present]) * tau))
new_state = old_state.copy()
new_state[5*i] -= 1
new_state[5*i + 1] += 1
inf_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(inf_rate, (s_present, inf_to)),
shape=(total_size, total_size))
inf_event_row = concatenate((inf_event_row, s_present))
inf_event_col = concatenate((inf_event_col, inf_to))
inf_event_class = concatenate(
(inf_event_class, classes_present[i]*ones((len(s_present)))))
# input('Press enter to continue')
# # disp('Infection events done')
# # Now do exposure to detected or undetected
det_to = zeros(len(e_present), dtype=my_int)
det_rate = zeros(len(e_present))
undet_to = zeros(len(e_present), dtype=my_int)
undet_rate = zeros(len(e_present))
for k in range(len(e_present)):
# First do detected
old_state = copy(states[e_present[k], :])
det_rate[k] = det[i] * alpha * old_state[5*i+1]
new_state = copy(old_state)
new_state[5*i + 1] -= 1
new_state[5*i + 2] += 1
det_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
# First do undetectednt(k),:)
undet_rate[k] = (1.0 - det[i]) * alpha * old_state[5*i+1]
new_state = copy(old_state)
new_state[5*i + 1] -= 1
new_state[5*i + 3] += 1
undet_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(det_rate, (e_present, det_to)),
shape=(total_size, total_size))
Q_int += sparse(
(undet_rate, (e_present, undet_to)),
shape=(total_size, total_size))
# # disp('Incubaion events done')
# Now do recovery of detected cases
rec_to = zeros(len(d_present), dtype=my_int)
rec_rate = zeros(len(d_present))
for k in range(len(d_present)):
old_state = copy(states[d_present[k], :])
rec_rate[k] = gamma * old_state[5*i+2]
new_state = copy(old_state)
new_state[5*i+2] -= 1
new_state[5*i+4] += 1
rec_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(rec_rate, (d_present, rec_to)),
shape=(total_size, total_size))
# disp('Recovery events from detecteds done')
# Now do recovery of undetected cases
rec_to = zeros(len(u_present), dtype=my_int)
rec_rate = zeros(len(u_present))
for k in range(len(u_present)):
old_state = copy(states[u_present[k], :])
rec_rate[k] = gamma*old_state[5*i+3]
new_state = copy(old_state)
new_state[5*i+3] -= 1
new_state[5*i+4] += 1
rec_to[k] = index_vector[
new_state.dot(reverse_prod) +new_state[-1], 0]
Q_int = Q_int + sparse(
(rec_rate, (u_present, rec_to)),
shape=(total_size, total_size))
# disp('Recovery events from undetecteds done')
S = Q_int.sum(axis=1).getA().squeeze()
Q_int += sparse((
-S, (arange(total_size), arange(total_size))))
return \
Q_int, states, \
array(inf_event_row, dtype=my_int, ndmin=1), \
array(inf_event_col, dtype=my_int, ndmin=1), \
array(inf_event_class, dtype=my_int, ndmin=1)
def within_household_SEDURQ(
composition, model_input):
'''Assuming frequency-dependent homogeneous within-household mixing
composition[i] is the number of individuals in age-class i inside the
household'''
sus = model_input.sigma
det = model_input.det
tau = model_input.tau
K_home = model_input.k_home
alpha = model_input.alpha
gamma = model_input.gamma
D_iso_rate = model_input.D_iso_rate
U_iso_rate = model_input.U_iso_rate
discharge_rate = model_input.discharge_rate
adult_bd = model_input.adult_bd
class_is_isolating = model_input.class_is_isolating
# Set of individuals actually present here
classes_present = where(composition.ravel() > 0)[0]
# Check number of adults and whether children_present
no_adults = sum(composition[adult_bd:])
children_present = sum(composition[:adult_bd])>0
K_home = K_home[ix_(classes_present, classes_present)]
sus = sus[classes_present]
det = det[classes_present]
tau = tau[classes_present]
r_home = atleast_2d(diag(sus).dot(K_home))
states, total_size, reverse_prod, index_vector, rows = build_state_matrix(composition, classes_present, 6)
d_pos = 2 + 6 * arange(len(classes_present))
u_pos = 3 + 6 * arange(len(classes_present))
iso_pos = 5 + 6 * arange(len(classes_present))
iso_adjusted_comp = composition[classes_present] - states[:,iso_pos] # This is number of people of each age class present in the household given some may isolate
iso_adjusted_comp[iso_adjusted_comp==0] = 1 # Replace zeros with ones - we only ever use this as a denominator whose numerator will be zero anyway if it should be zero
if (iso_adjusted_comp<1).any():
pdb.set_trace()
adults_isolating = states[:,6*adult_bd+5::6].sum(axis=1) # Number of adults isolating by state
Q_int = sparse((total_size, total_size))
inf_event_row = array([], dtype=my_int)
inf_event_col = array([], dtype=my_int)
inf_event_class = array([], dtype=my_int)
# Add events for each age class
for i in range(len(classes_present)):
s_present = where(states[:, 6*i] > 0)[0]
e_present = where(states[:, 6*i+1] > 0)[0]
d_present = where(states[:, 6*i+2] > 0)[0]
u_present = where(states[:, 6*i+3] > 0)[0]
# First do infection events
inf_to = zeros(len(s_present), dtype=my_int)
inf_rate = zeros(len(s_present))
for k in range(len(s_present)):
old_state = copy(states[s_present[k], :])
inf_rate[k] = old_state[6*i] * (
r_home[i, :].dot(
(old_state[d_pos] / iso_adjusted_comp[k])
+ (old_state[u_pos] / iso_adjusted_comp[k]) * tau))
new_state = old_state.copy()
new_state[6*i] -= 1
new_state[6*i + 1] += 1
inf_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(inf_rate, (s_present, inf_to)),
shape=(total_size, total_size))
inf_event_row = concatenate((inf_event_row, s_present))
inf_event_col = concatenate((inf_event_col, inf_to))
inf_event_class = concatenate(
(inf_event_class, classes_present[i]*ones((len(s_present)))))
# input('Press enter to continue')
# # disp('Infection events done')
# # Now do exposure to detected or undetected
det_to = zeros(len(e_present), dtype=my_int)
det_rate = zeros(len(e_present))
undet_to = zeros(len(e_present), dtype=my_int)
undet_rate = zeros(len(e_present))
for k in range(len(e_present)):
# First do detected
old_state = copy(states[e_present[k], :])
det_rate[k] = det[i] * alpha * old_state[6*i+1]
new_state = copy(old_state)
new_state[6*i + 1] -= 1
new_state[6*i + 2] += 1
det_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
# First do undetectednt(k),:)
undet_rate[k] = (1.0 - det[i]) * alpha * old_state[6*i+1]
new_state = copy(old_state)
new_state[6*i + 1] -= 1
new_state[6*i + 3] += 1
undet_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(det_rate, (e_present, det_to)),
shape=(total_size, total_size))
Q_int += sparse(
(undet_rate, (e_present, undet_to)),
shape=(total_size, total_size))
# # disp('Incubaion events done')
# Now do recovery of detected cases
rec_to = zeros(len(d_present), dtype=my_int)
rec_rate = zeros(len(d_present))
for k in range(len(d_present)):
old_state = copy(states[d_present[k], :])
rec_rate[k] = gamma * old_state[6*i+2]
new_state = copy(old_state)
new_state[6*i+2] -= 1
new_state[6*i+4] += 1
rec_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(rec_rate, (d_present, rec_to)),
shape=(total_size, total_size))
# disp('Recovery events from detecteds done')
# Now do recovery of undetected cases
rec_to = zeros(len(u_present), dtype=my_int)
rec_rate = zeros(len(u_present))
for k in range(len(u_present)):
old_state = copy(states[u_present[k], :])
rec_rate[k] = gamma*old_state[6*i+3]
new_state = copy(old_state)
new_state[6*i+3] -= 1
new_state[6*i+4] += 1
rec_to[k] = index_vector[
new_state.dot(reverse_prod) +new_state[-1], 0]
Q_int = Q_int + sparse(
(rec_rate, (u_present, rec_to)),
shape=(total_size, total_size))
# disp('Recovery events from undetecteds done')
#Now do isolation
if (class_is_isolating[i,classes_present]).any():
if (i<adult_bd) or not children_present: # If i is a child class or there are no children around, anyone can isolate
d_can_isolate = d_present
u_can_isolate = u_present
else: # If children are present adults_isolating must stay below no_adults-1 so the children still have a guardian
d_can_isolate = where((states[:, 6*i+2] > 0)*(adults_isolating<no_adults-1))[0]
u_can_isolate = where((states[:, 6*i+3] > 0)*(adults_isolating<no_adults-1))[0]
iso_present = where(states[:, 6*i+5] > 0)[0]
# Isolation of detected cases
iso_to = zeros(len(d_can_isolate), dtype=my_int)
iso_rate = zeros(len(d_can_isolate))
for k in range(len(d_can_isolate)):
old_state = copy(states[d_can_isolate[k], :])
iso_rate[k] = D_iso_rate * old_state[6*i+2]
new_state = copy(old_state)
new_state[6*i+2] -= 1
new_state[6*i+5] += 1
iso_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(iso_rate, (d_can_isolate, iso_to)),
shape=(total_size, total_size))
# Isolation of undetected cases
iso_to = zeros(len(u_can_isolate), dtype=my_int)
iso_rate = zeros(len(u_can_isolate))
for k in range(len(u_can_isolate)):
old_state = copy(states[u_can_isolate[k], :])
iso_rate[k] = U_iso_rate * old_state[6*i+3]
new_state = copy(old_state)
new_state[6*i+3] -= 1
new_state[6*i+5] += 1
iso_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(iso_rate, (u_can_isolate, iso_to)),
shape=(total_size, total_size))
# Return home of isolated cases
return_to = zeros(len(iso_present), dtype=my_int)
return_rate = zeros(len(iso_present))
for k in range(len(iso_present)):
old_state = copy(states[iso_present[k], :])
return_rate[k] = discharge_rate * old_state[6*i+5]
new_state = copy(old_state)
new_state[6*i+5] -= 1
new_state[6*i+4] += 1
return_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(return_rate, (iso_present, return_to)),
shape = (total_size,total_size))
S = Q_int.sum(axis=1).getA().squeeze()
Q_int += sparse((
-S, (arange(total_size), arange(total_size))))
return \
Q_int, states, \
array(inf_event_row, dtype=my_int, ndmin=1), \
array(inf_event_col, dtype=my_int, ndmin=1), \
array(inf_event_class, dtype=my_int, ndmin=1)
def within_household_SEPIRQ(
composition, model_input):
'''Assuming frequency-dependent homogeneous within-household mixing
composition[i] is the number of individuals in age-class i inside the
household'''
sus = model_input.sus
tau = model_input.tau
K_home = model_input.k_home
alpha_1 = model_input.alpha_1
alpha_2 = model_input.alpha_2
gamma = model_input.gamma
E_iso_rate = model_input.E_iso_rate
P_iso_rate = model_input.P_iso_rate
I_iso_rate = model_input.I_iso_rate
discharge_rate = model_input.discharge_rate
adult_bd = model_input.adult_bd
class_is_isolating = model_input.class_is_isolating
iso_method = model_input.iso_method # Set to 0 if isolating externaly, 1 if isolating internally
tau_Q = (tau/alpha_2 + 1/gamma)/(1/alpha_1+1/alpha_2+1/gamma) # Scaling for infection from quarantined cases
# Set of individuals actually present here
classes_present = where(composition.ravel() > 0)[0]
# Check number of adults and whether children_present
no_adults = sum(composition[adult_bd:])
children_present = sum(composition[:adult_bd])>0
K_home = K_home[ix_(classes_present, classes_present)]
sus = sus[classes_present]
tau = tau[classes_present]
tau_Q = tau_Q[classes_present]
r_home = atleast_2d(diag(sus).dot(K_home))
states, total_size, reverse_prod, index_vector, rows = build_state_matrix(composition, classes_present, 6)
p_pos = 2 + 6 * arange(len(classes_present))
i_pos = 3 + 6 * arange(len(classes_present))
iso_pos = 5 + 6 * arange(len(classes_present))
iso_adjusted_comp = composition[classes_present] - (1-iso_method)*states[:,iso_pos] # This is number of people of each age class present in the household given some may isolate
iso_adjusted_comp[iso_adjusted_comp==0] = 1 # Replace zeros with ones - we only ever use this as a denominator whose numerator will be zero anyway if it should be zero
if (iso_adjusted_comp<1).any():
pdb.set_trace()
adults_isolating = states[:,6*adult_bd+5::6].sum(axis=1) # Number of adults isolating by state
Q_int = sparse((total_size, total_size))
inf_event_row = array([], dtype=my_int)
inf_event_col = array([], dtype=my_int)
inf_event_class = array([], dtype=my_int)
# Add events for each age class
for i in range(len(classes_present)):
s_present = where(states[:, 6*i] > 0)[0]
e_present = where(states[:, 6*i+1] > 0)[0]
p_present = where(states[:, 6*i+2] > 0)[0]
i_present = where(states[:, 6*i+3] > 0)[0]
# First do infection events
inf_to = zeros(len(s_present), dtype=my_int)
inf_rate = zeros(len(s_present))
for k in range(len(s_present)):
old_state = copy(states[s_present[k], :])
inf_rate[k] = old_state[6*i] * (
r_home[i, :].dot(
(old_state[i_pos] / iso_adjusted_comp[k])
+ (old_state[p_pos] / iso_adjusted_comp[k]) * tau # tau is prodromal reduction
+ iso_method*(old_state[iso_pos] / iso_adjusted_comp[k]) * tau_Q)) # if we are doing internal isolation we scale down by tau_Q
new_state = old_state.copy()
new_state[6*i] -= 1
new_state[6*i + 1] += 1
inf_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(inf_rate, (s_present, inf_to)),
shape=(total_size, total_size))
inf_event_row = | concatenate((inf_event_row, s_present)) | numpy.concatenate |
"""smp_base.models_actinf
..moduleauthor:: <NAME>, 2016-2017
Active inference models based on :mod:`smp.actinf` project code.
This file contains the models_learners which can be used as adaptive models
of sensorimotor contexts designed for an active inference
approach. Currently implemented models are
- k nearest neighbours (knn)
- sparse online gaussian process models powered by Harold Soh's OTL library (soesgp, storkgp)
- gaussian mixture model based on pypr's gmm (gmm)
- hebbian connected SOM via bruno lara, guido schillaci (hebbsom)
- incremental gaussian mixtures (igmm via juan acevedo-valle)
- SOMs connected with hebbian associative links
TODO:
- consolidate calling convention / api for all model types
-- init with single argument config dictionary
-- predict, fit, sample, conditionals, visualize
-- common test code
- implement missing models
- missing: single hidden layer networks: linear/elm/res with RLS/FORCE/MDN/EH, merge with otl
- missing: imol/models.py
- missing: im/models.py
- missing: smp/models_seq.py
- missing: smp/models_karpmdn.py
- MDN model: florens, karpathy, hardmaru, amjad, cbonnett, edward
- including 'predict_naive' and 'predict_full' methods that would capture returning confidences about the current prediction
- other variables that might be used by the context to modulate exploration, learning and behaviour
- disambiguate static and dynamic (conditional inference types) idim/odim
- consistent sampling from probabilistic models (gmm, hebbsom, ...): sample from prior, stick with last sample's vicinity
- model visualization
- def visualize for all models
- plot current / final som configuration
- plot densities
- hebbsom
- som track residual error from map training
- som use residual for adjusting rbf width
- som extend sampling to sample actual prediction from gaussian with unit's mu and sigma
"""
import pickle
from functools import partial
import numpy as np
import scipy.sparse as sparse
import scipy.stats as stats
import pylab as pl
import matplotlib.gridspec as gridspec
import pandas as pd
from pandas.plotting import scatter_matrix
from smp_base.models import smpModelInit, smpModel
from smp_base.plot_utils import savefig
from smp_base.plot_models import plot_nodes_over_data_1d_components_fig, plot_nodes_over_data_1d_components
# KNN
from sklearn.neighbors import KNeighborsRegressor
# Online Gaussian Processes
try:
from otl_oesgp import OESGP
from otl_storkgp import STORKGP
HAVE_SOESGP = True
except ImportError as e:
print("couldn't import online GP models:", e)
HAVE_SOESGP = False
# Gaussian mixtures PyPR
try:
import pypr.clustering.gmm as gmm
except ImportError as e:
print("Couldn't import pypr.clustering.gmm", e)
# hebbsom
try:
from kohonen.kohonen import Map, Parameters, ExponentialTimeseries, ConstantTimeseries
from kohonen.kohonen import Gas, GrowingGas, GrowingGasParameters, Filter
from kohonen.kohonen import argsample
except ImportError as e:
print("Couldn't import lmjohns3's kohonon SOM lib", e)
# IGMM
try:
from igmm_cond import IGMM_COND
except ImportError as e:
print("Couldn't import IGMM lib", e)
# requirements: otl, kohonen, pypr, igmm
from smp_base.models_reservoirs import LearningRules
import logging
from smp_base.common import get_module_logger
logger = get_module_logger(modulename = 'models_actinf', loglevel = logging.DEBUG)
saveplot = False # True
model_classes = ["KNN", "SOESGP", "STORKGP", "GMM", "HebbSOM", ",IGMM", "all"]
class smpKNN(smpModel):
"""smpKNN
k-NN function approximator smpmodel originally used for the active
inference developmental model but generally reusable.
"""
defaults = {
'idim': 1,
'odim': 1,
'n_neighbors': 5,
'prior': 'random', # ['random', 'linear']
'prior_width': 0.01,
}
@smpModelInit()
def __init__(self, conf):
"""smpKNN.__init__
init
"""
smpModel.__init__(self, conf)
# comply
if not hasattr(self, 'modelsize'):
self.modelsize = 1000 # self.n_neighbors
# the scikit base model
self.fwd = KNeighborsRegressor(n_neighbors = self.n_neighbors)
# the data store
self.X_ = []
self.y_ = []
self.hidden_dist = np.zeros((1, self.n_neighbors))
self.hidden_dist_sum = np.zeros((1, 1))
self.hidden_dist_sum_avg = np.zeros((1, 1))
self.hidden_idx = np.zeros((1, self.n_neighbors))
# bootstrap the model with prior
self.bootstrap()
def get_params(self, *args, **kwargs):
if 'param' in kwargs:
if 'w_norm' in kwargs['param']:
# return np.tile(np.array([(len(self.X_) + len(self.y_))/2.0]), (self.odim, 1))
return np.tile(np.array([len(self.y_)]), (self.odim, 1))
return self.fwd.get_params()
def visualize(self):
pass
def bootstrap(self):
"""smpKNN.bootstrap
Bootstrap the model with some initial dummy samples to prepare it for inference after init
"""
# bootstrap model
self.n_samples_bootstrap = max(10, self.n_neighbors)
logger.info("%s.bootstrapping with %s prior" % (self.__class__.__name__, self.prior))
if self.prior == 'random':
for i in range(self.n_samples_bootstrap):
if self.idim == self.odim:
self.X_.append(np.ones((self.idim, )) * i * 0.1)
self.y_.append(np.ones((self.odim, )) * i * 0.1)
else:
noise_amp = self.prior_width
self.X_.append(np.random.uniform(
-noise_amp, noise_amp, (self.idim,)))
self.y_.append(np.random.uniform(
-noise_amp, noise_amp, (self.odim,)))
elif self.prior == 'linear':
for i in range(self.n_samples_bootstrap):
p_ = -self.prior_width/2.0 + float(i)/self.n_samples_bootstrap
X = np.ones((self.idim, )) * p_ + np.random.uniform(-0.01, 0.01)
y = np.ones((self.odim, )) * p_ + np.random.uniform(-0.01, 0.01)
self.X_.append(X)
self.y_.append(y)
# print(self.X_, self.y_)
self.fwd.fit(self.X_, self.y_)
def predict(self, X):
"""smpKNN.predict
Predict Y using X on the current model state
"""
# FIXME: change scikit to store intermediate query results
# or: fully local predict def
self.hidden_dist, self.hidden_idx = self.fwd.kneighbors(X)
self.hidden_dist_sum = np.mean(self.hidden_dist)
self.hidden_dist_sum_avg = 0.1 * self.hidden_dist_sum + 0.9 * self.hidden_dist_sum_avg
# self.hidden_idx_norm = self.hidden_idx.astype(np.float) * self.hidden_dist_sum_avg/1000.0
self.hidden_idx_norm = self.hidden_idx.astype(np.float) * 1e-3
# logger.debug('hidden dist = %s, idx = %s', self.hidden_dist, self.hidden_idx)
return self.fwd.predict(X)
def fit(self, X, y):
"""smpKNN.fit
Single fit Y to X step. If the input is a batch of data, fit
that entire batch and forgetting existing data in X' and
Y'. If the input is a single data point, append to X' and Y'
and refit the model to that new data.
"""
if X.shape[0] > 1: # batch of data
# self.modelsize = X.shape[0]
return self.fit_batch(X, y)
# logger.debug("%s.fit[%d] len(X_) = %d, len(y_) = %d, modelsize = %d", self.__class__.__name__, self.cnt, len(self.X_), len(self.y_), self.modelsize)
self.cnt += 1
# if len(self.X_) > self.modelsize: return
self.X_.append(X[0,:])
# self.y_.append(self.m[0,:])
# self.y_.append(self.goal[0,:])
self.y_.append(y[0,:])
self.fwd.fit(self.X_, self.y_)
def fit_batch(self, X, y):
"""smpKNN.fit
Batch fit Y to X
"""
self.X_ = X.tolist()
self.y_ = y.tolist()
self.fwd.fit(self.X_, self.y_)
################################################################################
# ActiveInference OTL library based model, base class implementing predict,
# predict_step (otl can't handle batches), fit, save and load methods
class smpOTLModel(smpModel):
"""smpOTLModel
Sparse online echo state gaussian process function approximator
for active inference
"""
defaults = {
'idim': 1,
'odim': 1,
'otlmodel_type': 'soesgp',
'otlmodel': None,
'memory': 1,
'lag_off': 1,
}
@smpModelInit()
def __init__(self, conf):
# if conf is None: conf = self.defaults
smpModel.__init__(self, conf)
# self.otlmodel_type = "soesgp"
# self.otlmodel = None
# introspection
self.cnt = 0
# explicit short term memory needed for tapping across lag gaps
self.r_l = []
print( "otlmodel.memory", self.memory)
self.r_ = np.zeros((self.modelsize, self.memory))
# self.r_ = np.random.uniform(-1, 1, (self.modelsize, self.memory)) * 1.0
# output variables arrays
self.pred = np.zeros((self.odim, 1))
self.var = np.zeros((self.odim, 1))
# output variables lists
self.pred_l = []
self.var_l = []
def update(self, X_):
# update state
self.otlmodel.update(X_)
# store state
self.r_ = np.roll(self.r_, shift = -1, axis = -1)
self.otlmodel.getState(self.r_l)
tmp = np.array([self.r_l]).T
# print("%s r_ = %s, r[...,[-1] = %s, tmp = %s" % (self.__class__.__name__, self.r_.shape, self.r_[...,[-1]].shape, tmp.shape))
self.r_[...,[-1]] = tmp.copy()
def predict(self, X,rollback = False):
# row vector input
if X.shape[0] > 1: # batch input
ret = np.zeros((X.shape[0], self.odim))
for i in range(X.shape[0]):
ret[i] = self.predict_step(X[i].flatten().tolist(), rollback = rollback)
return ret
else:
X_ = X.flatten().tolist()
return self.predict_step(X_, rollback = rollback)
def predict_step(self, X_, rollback = False):
# update state and store it
self.update(X_)
# predict output variables from state
self.otlmodel.predict(self.pred_l, self.var_l)
# return np.zeros((1, self.odim))
# set prediction variables
self.pred = np.array(self.pred_l)
self.var = np.abs(np.array(self.var_l))
# roll back the reservoir state if rollback on
if rollback:
self.r_ = np.roll(self.r_, shift = 1, axis = -1)
self.otlmodel.setState(self.r_[...,[-1]].copy().flatten().tolist())
self.cnt += 1
return self.pred.reshape((1, self.odim))
def fit(self, X, y, update = True):
"""smpOTLModel.fit
Fit model to data X, y
"""
if self.cnt < self.memory: return
if X.shape[0] > 1: # batch of data
return self.fit_batch(X, y)
if update:
X_ = X.flatten().tolist()
self.update(X_)
# print("X.shape", X.shape, len(X_), X_)
# self.otlmodel.update(X_)
# copy state into predefined structure
# self.otlmodel.getState(self.r)
# consider lag and restore respective state
# print("otlmodel.fit lag_off", self.lag_off)
r_lagged = self.r_[...,[-self.lag_off]]
# print ("r_lagged", r_lagged.shape)
self.otlmodel.setState(r_lagged.flatten().tolist())
# prepare target and fit
# print("soesgp.fit y", type(y))
y_ = y.flatten().tolist()
self.otlmodel.train(y_)
# restore chronologically most recent state
r_lagged = self.r_[...,[-1]]
self.otlmodel.setState(r_lagged.flatten().tolist())
def fit_batch(self, X, y):
for i in range(X.shape[0]):
self.fit(X[[i]], y[[i]])
def save(self, filename):
otlmodel_ = self.otlmodel
self.otlmodel.save(filename + "_%s_model" % self.otlmodel_type)
print("otlmodel", otlmodel_)
self.otlmodel = None
print("otlmodel", otlmodel_)
pickle.dump(self, open(filename, "wb"))
self.otlmodel = otlmodel_
print("otlmodel", self.otlmodel)
@classmethod
def load(cls, filename):
# otlmodel_ = cls.otlmodel
otlmodel_wrap = pickle.load(open(filename, "rb"))
print("%s.load cls.otlmodel filename = %s, otlmodel_wrap.otlmodel_type = %s" % (cls.__name__, filename, otlmodel_wrap.otlmodel_type))
if otlmodel_wrap.otlmodel_type == "soesgp":
otlmodel_cls = OESGP
elif otlmodel_wrap.otlmodel_type == "storkgp":
otlmodel_cls = STORKGP
else:
otlmodel_cls = OESGP
otlmodel_wrap.otlmodel = otlmodel_cls()
print("otlmodel_wrap.otlmodel", otlmodel_wrap.otlmodel)
otlmodel_wrap.otlmodel.load(filename + "_%s_model" % otlmodel_wrap.otlmodel_type)
# print("otlmodel_wrap.otlmodel", dir(otlmodel_wrap.otlmodel))
# cls.bootstrap(otlmodel_wrap)
# otlmodel_wrap.otlmodel = otlmodel_
return otlmodel_wrap
################################################################################
# Sparse Online Echo State Gaussian Process (SOESGP) OTL library model
class smpSOESGP(smpOTLModel):
"""smpSOESGP
Sparse online echo state gaussian process function approximator
for active inference
"""
# # for input modulation style
# defaults = {
# 'idim': 1,
# 'odim': 1,
# 'otlmodel_type': 'soesgp',
# 'otlmodel': None,
# 'modelsize': 300,
# 'input_weight': 2.0,
# 'output_feedback_weight': 0.0,
# 'activation_function': 1,
# 'leak_rate': 0.8, # 0.9,
# 'connectivity': 0.1,
# 'spectral_radius': 0.99, # 0.999,
# # 'kernel_params': [10.0, 10.0], # [2.0, 2.0],
# # 'noise': 0.01,
# # 'kernel_params': [10.0, 10.0], # [2.0, 2.0],
# # 'noise': 1.0, # 0.01,
# 'kernel_params': [2.0, 2.0], # [2.0, 2.0],
# 'noise': 5e-2, # 0.01,
# 'epsilon': 1e-3,
# 'capacity': 100, # 10
# 'random_seed': 101,
# 'visualize': False,
# }
# for self-sampling style
defaults = {
'idim': 1,
'odim': 1,
'otlmodel_type': 'soesgp',
'otlmodel': None,
'memory': 1,
'lag_off': 1,
'modelsize': 200,
'output_feedback_weight': 0.0,
'use_inputs_in_state': False,
'activation_function': 0,
'connectivity': 0.1,
# 'kernel_params': [10.0, 10.0], # [2.0, 2.0],
# 'noise': 0.01,
# 'kernel_params': [10.0, 10.0], # [2.0, 2.0],
# 'noise': 1.0, # 0.01,
# pointmass
'input_weight': 1.0,
'kernel_params': [10.0, 1.5],
'noise': 5e-3, #8e-2, # 0.01,
'leak_rate': 0.1, # 0.9,
'spectral_radius': 0.9,
# # barrel
# 'input_weight': 1.0,
# 'kernel_params': [1.2, 1.2], # [2.0, 2.0],
# 'noise': 1e-2,
# 'leak_rate': 0.9, # 0.9,
# 'spectral_radius': 0.99, # 0.999,
'epsilon': 1e-4,
'capacity': 200, # 10
'random_seed': 106,
'visualize': False,
}
@smpModelInit()
def __init__(self, conf):
smpOTLModel.__init__(self, conf = conf)
# self.otlmodel_type = "soesgp"
self.otlmodel = OESGP()
# self.res_size = 100 # 20
# self.input_weight = 1.0 # 1.0
# self.output_feedback_weight = 0.0
# self.activation_function = 1
# # leak_rate: x <= (1-lr) * input + lr * x
# self.leak_rate = 0.96 # 0.05 # 0.0 # 0.1 # 0.3
# self.connectivity = 0.1
# self.spectral_radius = 0.99
# # covariances
# self.kernel_params = [2.0, 2.0]
# # self.kernel_params = [1.0, 1.0]
# # self.kernel_params = [0.1, 0.1]
# self.noise = 0.05
# self.epsilon = 1e-3
# self.capacity = 100
# self.random_seed = 100 # FIXME: constant?
# self.X_ = []
# self.y_ = []
self.bootstrap()
def bootstrap(self):
from .models_reservoirs import res_input_matrix_random_sparse
self.otlmodel.init(self.idim, self.odim, self.modelsize, self.input_weight,
self.output_feedback_weight, self.activation_function,
self.leak_rate, self.connectivity, self.spectral_radius,
False, self.kernel_params, self.noise, self.epsilon,
self.capacity, self.random_seed)
im = res_input_matrix_random_sparse(self.idim, self.modelsize, 0.2) * self.input_weight
# print("im", type(im))
self.otlmodel.setInputWeights(im.tolist())
################################################################################
# StorkGP OTL based model
class smpSTORKGP(smpOTLModel):
"""smpSTORKGP
Sparse online echo state gaussian process function approximator
for active inference
"""
defaults = {
'idim': 1,
'odim': 1,
'otlmodel_type': 'storkgp',
'otlmodel': None,
'modelsize': 50,
'memory': 1,
'lag_off': 1,
'input_weight': 1.0,
'output_feedback_weight': 0.0,
'activation_function': 1,
'leak_rate': 0.96,
'connectivity': 0.1,
'spectral_radius': 0.99,
'kernel_params': [2.0, 2.0],
'noise': 0.05,
'epsilon': 1e-3,
'capacity': 100,
'random_seed': 100,
'visualize': False,
}
@smpModelInit()
def __init__(self, conf):
smpOTLModel.__init__(self, conf = conf)
# self.otlmodel_type = "storkgp"
self.otlmodel = STORKGP()
# self.res_size = self.modelsize # 100 # 20
self.bootstrap()
def bootstrap(self):
self.otlmodel.init(
self.idim, self.odim,
self.modelsize, # window size
0, # kernel type
[0.5, 0.99, 1.0, self.idim],
1e-4,
1e-4,
100 # seed
)
self.otlmodel.getState(self.r_l)
# print("|self.r_l| = ", len(self.r_l))
self.r_ = np.zeros((len(self.r_l), self.memory))
################################################################################
# inference type multivalued models: GMM, SOMHebb, MDN
# these are somewhat different in operation than the models above
# - fit vs. fit_batch
# - can create conditional submodels
# GMM - gaussian mixture model
class smpGMM(smpModel):
"""smpGMM
Gaussian mixture model based on PyPR's gmm
"""
defaults = {
'idim': 1, 'odim': 1, 'K': 10, 'fit_interval': 100,
'numepisodes': 10, 'visualize': False, 'em_max_iter': 1000}
@smpModelInit()
def __init__(self, conf):
"""smpGMM.__init__
"""
smpModel.__init__(self, conf)
self.cdim = self.idim + self.odim
# data
self.Xy_ = []
self.X_ = []
self.y_ = []
self.Xy = np.zeros((1, self.cdim))
# fitting configuration
# self.fit_interval = 100
self.fitted = False
# number of mixture components
# self.K = K
# list of K component idim x 1 centroid vectors
# self.cen_lst = []
self.cen_lst = [] # np.random.uniform(-1, 1, (self.K,)).tolist()
# list of K component idim x idim covariances
self.cov_lst = [] # [np.eye(self.cdim) * 0.1 for _ in range(self.K)]
# K mixture coeffs
# self.p_k = None
self.p_k = None # [1.0/self.K for _ in range(self.K)]
# log loss after training
self.logL = 0
print("%s.__init__, idim = %d, odim = %d" % (self.__class__.__name__, self.idim, self.odim))
def fit(self, X, y):
"""smpGMM.fit
Single step fit: X, y are single patterns
"""
# print("%s.fit" % (self.__class__.__name__), X.shape, y.shape)
if X.shape[0] == 1:
# single step update, add to internal data and refit if length matches update intervale
self.Xy_.append(np.hstack((X[0], y[0])))
self.X_.append(X[0])
self.y_.append(y[0])
if len(self.Xy_) % self.fit_interval == 0:
# print("len(Xy_)", len(self.Xy_), self.Xy_[99])
# pl.plot(self.Xy_)
# pl.show()
# self.fit_batch(self.Xy)
self.fit_batch(self.X_, self.y_)
else:
# batch fit, just fit model to the input data batch
self.Xy_ += np.hstack((X, y)).tolist()
# self.X_ += X.tolist()
# self.y_ += y.tolist()
# self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
# print("X_, y_", self.X_, self.y_)
self.fit_batch(X, y)
def fit_batch(self, X, y):
"""smpGMM.fit_batch
Fit the GMM model with batch data
"""
# print("%s.fit X.shape = %s, y.shape = %s" % (self.__class__.__name__, X.shape, y.shape))
# self.Xy = np.hstack((X[:,3:], y[:,:]))
# self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
# self.Xy = Xy
# X = np.asarray(X_)
# y = np.asarray(y_)
self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
print("%s.fit_batch self.Xy.shape = %s" % (self.__class__.__name__, self.Xy.shape))
# fit gmm
# max_iter = 10
try:
self.cen_lst, self.cov_lst, self.p_k, self.logL = gmm.em_gm(
self.Xy, K = self.K, max_iter = self.em_max_iter,
verbose = False, iter_call = None)
self.fitted = True
except Exception as e:
print( "%s.fit_batch fit failed with %s" % (self.__class__.__name__, e.args ,))
# sys.exit()
print("%s.fit_batch Log likelihood (how well the data fits the model) = %f" % (self.__class__.__name__, self.logL))
def predict(self, X, rollback = False):
"""smpGMM.predict
Predict Y from X by forwarding to default sample call
"""
return self.sample(X, rollback = rollback)
def sample(self, X, rollback = False):
"""smpGMM.sample
Default sample function
Assumes the input is X with dims = idim located in
the first part of the conditional inference combined input vector
This method constructs the corresponding conditioning input from the reduced input
"""
print("%s.sample: X.shape = %s, idim = %d" % (self.__class__.__name__, X.shape, self.idim))
assert X.shape[1] == self.idim
# cond = np.zeros((, self.cdim))
uncond = np.empty((X.shape[0], self.odim))
uncond[:] = np.nan
# print("%s.sample: uncond.shape = %s" % (self.__class__.__name__, uncond.shape))
# np.array([np.nan for i in range(self.odim)])
cond = np.hstack((X, uncond))
# cond[:self.idim] = X.copy()
# cond[self.idim:] = np.nan
# print("%s.sample: cond.shape = %s" % (self.__class__.__name__, cond.shape))
if X.shape[0] > 1: # batch
return self.sample_batch(cond)
return self.sample_cond(cond)
def sample_cond(self, X):
"""smpGMM.sample_cond
Single sample from the GMM model with conditioning on single input pattern X
TODO: function conditional_dist, make predict/sample comply with sklearn and use the lowlevel
cond_dist for advanced uses like dynamic conditioning
"""
# gmm.cond_dist want's a (n, ) shape, not (1, n)
if len(X.shape) > 1:
cond = X[0]
else:
cond = X
# print("%s.sample_cond: cond.shape = %s" % (self.__class__.__name__, cond.shape))
if not self.fitted:
# return np.zeros((3,1))
# model has not been bootstrapped, return random goal
cond_sample = np.random.uniform(-1.0, 1.0, (1, self.odim)) # FIXME hardcoded shape
# cen_con = self.cen_lst
# cov_con = self.cov_lst
# new_p_k = self.p_k
else:
(cen_con, cov_con, new_p_k) = gmm.cond_dist(cond, self.cen_lst, self.cov_lst, self.p_k)
# print( "cen_con", cen_con, "cov_con", cov_con, "p_k", new_p_k)
cond_sample = gmm.sample_gaussian_mixture(cen_con, cov_con, new_p_k, samples = 1)
# print("%s.sample_cond: cond_sample.shape = %s" % (self.__class__.__name__, cond_sample.shape))
return cond_sample
def sample_batch(self, X):
"""smpGMM.sample_batch
If X has more than one rows, return batch of samples for
every condition row in X
"""
samples = np.zeros((X.shape[0], self.odim))
for i in range(X.shape[0]):
samples[i] = self.sample_cond(X[i])
return samples
# def sample_batch_legacy(self, X, cond_dims = [0], out_dims = [1], resample_interval = 1):
# """smpGMM.sample_batch_legacy
# Sample from gmm model with conditioning batch input X legacy function
# """
# # compute conditional
# sampmax = 20
# numsamplesteps = X.shape[0]
# odim = len(out_dims) # self.idim - X.shape[1]
# self.y_sample_ = np.zeros((odim,))
# self.y_sample = np.zeros((odim,))
# self.y_samples_ = np.zeros((sampmax, numsamplesteps, odim))
# self.y_samples = np.zeros((numsamplesteps, odim))
# self.cond = np.zeros_like(X[0])
# print("%s.sample_batch: y_samples_.shape = %s" % (self.__class__.__name__, self.y_samples_.shape))
# for i in range(numsamplesteps):
# # if i % 100 == 0:
# if i % resample_interval == 0:
# # print("%s.sample_batch: sampling gmm cond prob at step %d" % (self.__class__.__name__, i))
# ref_interval = 1
# # self.cond = self.logs["EP"][(i+ref_interval) % self.logs["EP"].shape[0]] # self.X__[i,:3]
# self.cond = X[(i+ref_interval) % numsamplesteps] # self.X__[i,:3]
# # self.cond = np.array()
# # self.cond[:2] = X_
# # print(self.cond, out_dims, X.shape)
# self.cond[out_dims] = np.nan
# (self.cen_con, self.cov_con, self.new_p_k) = gmm.cond_dist(self.cond, self.cen_lst, self.cov_lst, self.p_k)
# # print "run_hook_e2p_sample gmm.cond_dist:", np.array(self.cen_con).shape, np.array(self.cov_con).shape, self.new_p_k.shape
# samperr = 1e6
# j = 0
# while samperr > 0.1 and j < sampmax:
# self.y_sample = gmm.sample_gaussian_mixture(self.cen_con, self.cov_con, self.new_p_k, samples = 1)
# self.y_samples_[j,i] = self.y_sample
# samperr_ = np.linalg.norm(self.y_sample - X[(i+1) % numsamplesteps,:odim], 2)
# if samperr_ < samperr:
# samperr = samperr_
# self.y_sample_ = self.y_sample
# j += 1
# # print "sample/real err", samperr
# print("sampled", j, "times")
# else:
# # retain samples from last sampling interval boundary
# self.y_samples_[:,i] = self.y_samples_[:,i-1]
# # return sample array
# self.y_samples[i] = self.y_sample_
# return self.y_samples, self.y_samples_
# IGMM - incremental gaussian mixture model, from juan
class smpIGMM(smpModel):
"""smpIGMM
Gaussian mixture model based on PyPR's gmm
"""
defaults = {'idim': 1, 'odim': 1, 'K': 10, 'numepisodes': 10, 'visualize': False}
@smpModelInit()
def __init__(self, conf):
"""smpIGMM.__init__
"""
smpModel.__init__(self, conf)
self.cdim = self.idim + self.odim
# number of mixture components
# self.K = K
# list of K component idim x 1 centroid vectors
self.cen_lst = []
# list of K component idim x idim covariances
self.cov_lst = []
# K mixture coeffs
self.p_k = None
self.cen_lst = np.random.uniform(-1, 1, (self.K,)).tolist()
# list of K component idim x idim covariances
self.cov_lst = [np.eye(self.cdim) * 0.1 for _ in range(self.K)]
# K mixture coeffs
# self.p_k = None
self.p_k = [1.0/self.K for _ in range(self.K)]
# log loss after training
self.logL = 0
# data
self.Xy_ = []
self.X_ = []
self.y_ = []
self.Xy = np.zeros((1, self.cdim))
# fitting configuration
self.fit_interval = 100
self.fitted = False
self.model = IGMM_COND(min_components=3, forgetting_factor=0.5)
# print("%s.__init__, idim = %d, odim = %d" % (self.__class__.__name__, self.idim, self.odim))
def fit(self, X, y):
"""smpIGMM.fit
Single step fit: X, y are single patterns
"""
# print("%s.fit" % (self.__class__.__name__), X.shape, y.shape)
if X.shape[0] == 1:
# single step update, add to internal data and refit if length matches update intervale
self.Xy_.append(np.hstack((X[0], y[0])))
self.X_.append(X[0])
self.y_.append(y[0])
if len(self.Xy_) % self.fit_interval == 0:
# print("len(Xy_)", len(self.Xy_), self.Xy_[99])
# pl.plot(self.Xy_)
# pl.show()
# self.fit_batch(self.Xy)
self.fit_batch(self.X_, self.y_)
self.Xy_ = []
self.X_ = []
self.y_ = []
else:
# batch fit, just fit model to the input data batch
self.Xy_ += np.hstack((X, y)).tolist()
# self.X_ += X.tolist()
# self.y_ += y.tolist()
# self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
# print("X_, y_", self.X_, self.y_)
self.fit_batch(X, y)
def fit_batch(self, X, y):
"""smpIGMM.fit_batch
Fit the IGMM model with batch data
"""
# print("%s.fit X.shape = %s, y.shape = %s" % (self.__class__.__name__, X.shape, y.shape))
# self.Xy = np.hstack((X[:,3:], y[:,:]))
# self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
# self.Xy = Xy
# X = np.asarray(X_)
# y = np.asarray(y_)
self.Xy = np.hstack((X, y))
# self.Xy = np.asarray(self.Xy_)
print("%s.fit_batch self.Xy.shape = %s" % (self.__class__.__name__, self.Xy.shape))
# fit gmm
# self.cen_lst, self.cov_lst, self.p_k, self.logL = gmm.em_gm(self.Xy, K = self.K, max_iter = 1000,
# verbose = False, iter_call = None)
self.model.train(self.Xy)
self.fitted = True
# print("%s.fit_batch Log likelihood (how well the data fits the model) = %f" % (self.__class__.__name__, self.logL))
def predict(self, X):
"""smpIGMM.predict
Predict Y from X by forwarding to default sample call
"""
# print("IGMM.predict X.shape", X.shape, X)
return self.sample(X)
def sample(self, X):
"""smpIGMM.sample
Default sample function
Assumes the input is X with dims = idim located in
the first part of the conditional inference combined input vector
This method constructs the corresponding conditioning input from the reduced input
"""
# print("%s.sample: X.shape = %s, idim = %d" % (self.__class__.__name__, X.shape, self.idim))
assert X.shape[1] == self.idim
# cond = np.zeros((, self.cdim))
uncond = np.empty((X.shape[0], self.odim))
uncond[:] = np.nan
# print("%s.sample: uncond.shape = %s, %s" % (self.__class__.__name__, uncond.shape, uncond))
cond = np.hstack((X, uncond))
# cond[:self.idim] = X.copy()
# cond[self.idim:] = np.nan
# print("%s.sample: cond.shape = %s, %s" % (self.__class__.__name__, cond.shape, cond))
if X.shape[0] > 1: # batch
return self.sample_batch(cond)
sample = self.sample_cond(cond)
# print("%s.sample sample = %s, X = %s" % (self.__class__.__name__, sample.shape, X.shape))
# FIXME: fix that inference configuration
if sample.shape[1] == self.odim:
return sample
else:
return sample[...,X.shape[1]:]
def sample_cond(self, X):
"""smpIGMM.sample_cond
Single sample from the IGMM model with conditioning on single input pattern X
TODO: function conditional_dist, make predict/sample comply with sklearn and use the lowlevel
cond_dist for advanced uses like dynamic conditioning
"""
if not self.fitted:
# return np.zeros((3,1))
# model has not been bootstrapped, return random prediction
return np.random.uniform(-0.1, 0.1, (1, self.odim)) # FIXME hardcoded shape
# gmm.cond_dist want's a (n, ) shape, not (1, n)
if len(X.shape) > 1:
cond = X[0]
else:
cond = X
# print("%s.sample_cond: cond.shape = %s" % (self.__class__.__name__, cond.shape))
# (cen_con, cov_con, new_p_k) = gmm.cond_dist(cond, self.cen_lst, self.cov_lst, self.p_k)
# cond_sample = gmm.sample_gaussian_mixture(cen_con, cov_con, new_p_k, samples = 1)
cond_sample = self.model.sample_cond_dist(cond, 1)
# print("%s.sample_cond: cond_sample.shape = %s, %s" % (self.__class__.__name__, cond_sample.shape, cond_sample))
return cond_sample
def sample_batch(self, X):
"""smpIGMM.sample_batch
If X has more than one rows, return batch of samples for
every condition row in X
"""
samples = np.zeros((X.shape[0], self.odim))
for i in range(X.shape[0]):
samples[i] = self.sample_cond(X[i])
return samples
################################################################################
# Hebbian SOM model: connect to SOMs with hebbian links
class smpHebbianSOM(smpModel):
"""smpHebbianSOM class
Hebbian SOM model
FIXME: conf: kohonen/map.Map init distribution and scaling
FIXME: conf: fit_hebb onset delay
FIXME: conf: sampling mode (weights, gaussian(wgts, sigmas), ...
"""
defaults = {
'idim': 1, 'odim': 1, 'numepisodes': 100, 'visualize': False, 'mapsize_e': 10, 'mapsize_p': 10, 'som_lr': 1e-0,
'som_nhs': 3, 'init_range': (-1.0, 1.0)}
@smpModelInit()
def __init__(self, conf):
"""smpHebbianSOM
Two SOM's coding the input and output space connected by associative Hebbian links
"""
smpModel.__init__(self, conf)
# SOMs training self assessment
self.cnt_fit = 0
self.cnt_predict = 0
self.fitted = False
self.soms_cnt_fit = 0
self.soms_cnt_predict = 0
self.soms_fitted = False
self.hebb_cnt_fit = 0
self.hebb_cnt_predict = 0
self.hebb_fitted = False
self.decay_const = -1e-5
# learning rate proxy
self.ET = ExponentialTimeseries
self.CT = ConstantTimeseries
self.mapsize = 10 ** 2 # 100
# self.mapsize_e = mapsize_e # 100 # int(np.sqrt(self.mapsize)) # max(10, self.idim * 3)
# self.mapsize_p = mapsize_p # 150 # int(np.sqrt(self.mapsize)) # max(10, self.odim * 3)
self.numepisodes_som = self.numepisodes
self.numepisodes_hebb = self.numepisodes
# FIXME: make neighborhood_size decrease with time
# som_lr = som_lr # 1e0
# som_lr = 1e-1 # Haykin, p475
# som_lr = 5e-1
# som_lr = 5e-4
# self.som_nhs = 3 # 1.5
maptype = "som"
# maptype = "gas"
# SOM exteroceptive stimuli 2D input
if maptype == "som":
if self.idim == 1:
mapshape_e = (self.mapsize_e, )
else:
mapshape_e = (self.mapsize_e, self.mapsize_e)
# 1D better?
# mapshape_e = (self.mapsize_e, )
self.kw_e = self.kwargs(
shape = mapshape_e, dimension = self.idim, lr_init = self.som_lr,
neighborhood_size = self.som_nhs, init_variance = 1.0) #, z = 0.001)
# self.kw_e = self.kwargs(shape = (self.mapsize_e, self.mapsize_e), dimension = self.idim, lr_init = 0.5, neighborhood_size = 0.6)
self.som_e = Map(Parameters(**self.kw_e))
elif maptype == "gas":
self.kw_e = self.kwargs_gas(shape = (self.mapsize_e ** 2, ), dimension = self.idim, lr_init = self.som_lr, neighborhood_size = 0.5)
self.som_e = Gas(Parameters(**self.kw_e))
# SOM proprioceptive stimuli 3D input
if maptype == "som":
if self.idim == 1:
mapshape_p = (self.mapsize_p, )
else:
mapshape_p = (int(self.mapsize_p), int(self.mapsize_p))
# 1D better?
mapshape_p = (self.mapsize_p, )
self.kw_p = self.kwargs(shape = mapshape_p, dimension = self.odim, lr_init = self.som_lr,
neighborhood_size = self.som_nhs, init_variance = 0.2) #, z = 0.001)
# self.kw_p = self.kwargs(shape = (int(self.mapsize_p * 1.5), int(self.mapsize_p * 1.5)), dimension = self.odim, lr_init = 0.5, neighborhood_size = 0.7)
self.som_p = Map(Parameters(**self.kw_p))
elif maptype == "gas":
self.kw_p = self.kwargs_gas(shape = (self.mapsize_p ** 2, ), dimension = self.odim, lr_init = self.som_lr, neighborhood_size = 0.5)
self.som_p = Gas(Parameters(**self.kw_p))
print("HebbianSOM mapsize_e,p", self.mapsize_e, self.mapsize_p)
# FIXME: there was a nice trick for node distribution init in _some_ recently added paper
# create "filter" using existing SOM_e, filter computes activation on distance
self.filter_e = Filter(self.som_e, history=lambda: 0.0)
# print("neurons_e", self.filter_e.map.neurons)
self.filter_e.reset()
# print("neurons_e", self.filter_e.map.neurons)
self.filter_e_lr = self.filter_e.map._learning_rate
# kw_f_p = kwargs(shape = (mapsize * 3, mapsize * 3), dimension = 3, neighborhood_size = 0.5, lr_init = 0.1)
# filter_p = Filter(Map(Parameters(**kw_f_p)), history=lambda: 0.01)
# create "filter" using existing SOM_p, filter computes activation on distance
self.filter_p = Filter(self.som_p, history=lambda: 0.0)
self.filter_p.reset()
self.filter_p_lr = self.filter_p.map._learning_rate
# Hebbian links
# hebblink_som = np.random.uniform(-1e-4, 1e-4, (np.prod(som_e._shape), np.prod(som_p._shape)))
# hebblink_filter = np.random.uniform(-1e-4, 1e-4, (np.prod(filter_e.map._shape), np.prod(filter_p.map._shape)))
self.hebblink_som = np.zeros((np.prod(self.som_e._shape), np.prod(self.som_p._shape)))
# self.hebblink_filter = np.zeros((np.prod(self.filter_e.map._shape), np.prod(self.filter_p.map._shape)))
self.hebblink_filter = np.random.normal(0, 1e-6, (np.prod(self.filter_e.map._shape), np.prod(self.filter_p.map._shape)))
# # sparse hebblink
# self.hebblink_filter = sparse.rand(m = np.prod(self.filter_e.map._shape),
# n = np.prod(self.filter_p.map._shape)) * 1e-3
self.hebblink_use_activity = True # use activation or distance
# Hebbian learning rate
if self.hebblink_use_activity:
# self.hebblink_et = ExponentialTimeseries(self.decay_const, 1e-0, 0)
self.hebblink_et = ConstantTimeseries(1e-0)
# self.hebblink_et = ConstantTimeseries(0.0)
else:
self.hebblink_et = ConstantTimeseries(1e-12)
# visualization
if self.visualize:
self.figs.append(plot_nodes_over_data_1d_components_fig(title = self.__class__.__name__, numplots = self.idim + self.odim))
# SOM argument dict
def kwargs(self, shape=(10, 10), z=0.001, dimension=2, lr_init = 1.0, neighborhood_size = 1, init_variance = 1.0):
"""smpHebbianSOM params function for Map"""
return dict(
dimension = dimension,
shape = shape,
neighborhood_size = self.ET(self.decay_const, neighborhood_size, 0.1), # 1.0),
learning_rate=self.ET(self.decay_const, lr_init, 0.0),
# learning_rate=self.CT(lr_init),
noise_variance=z,
init_variance = init_variance)
def kwargs_gas(self, shape=(100,), z=0.001, dimension=3, lr_init = 1.0, neighborhood_size = 1):
"""smpHebbianSOM params function for Gas"""
return dict(
dimension=dimension,
shape=shape,
neighborhood_size = self.ET(self.decay_const, neighborhood_size, 1.0),
learning_rate=self.ET(self.decay_const, lr_init, 0.0),
noise_variance=z)
def visualize_model(self):
"""smpHebbianSOM.visualize_model
Plot the model state visualization
"""
e_nodes, p_nodes = hebbsom_get_map_nodes(self, self.idim, self.odim)
e_nodes_cov = np.tile(np.eye(self.idim) * 0.05, e_nodes.shape[0]).T.reshape((e_nodes.shape[0], self.idim, self.idim))
p_nodes_cov = np.tile(np.eye(self.odim) * 0.05, p_nodes.shape[0]).T.reshape((p_nodes.shape[0], self.odim, self.odim))
X = np.vstack(self.Xhist)
Y = np.vstack(self.Yhist)
# print(X.shape)
plot_nodes_over_data_1d_components(
fig = self.figs[0], X = X, Y = Y, mdl = self,
e_nodes = e_nodes, p_nodes = p_nodes, e_nodes_cov = e_nodes_cov, p_nodes_cov = p_nodes_cov,
saveplot = False
)
def set_learning_rate_constant(self, c = 0.0):
# print("fit_hebb", self.filter_e.map._learning_rate)
self.filter_e.map._learning_rate = self.CT(c)
self.filter_p.map._learning_rate = self.CT(c)
# fix the SOMs with learning rate constant 0
self.filter_e_lr = self.filter_e.map._learning_rate
self.filter_p_lr = self.filter_p.map._learning_rate
def fit_soms(self, X, y):
"""smpHebbianSOM"""
# print("%s.fit_soms fitting X = %s, y = %s" % (self.__class__.__name__, X.shape, y.shape))
# if X.shape[0] != 1, r
# e = EP[i,:dim_e]
# p = EP[i,dim_e:]
self.filter_e.map._learning_rate = self.filter_e_lr
self.filter_p.map._learning_rate = self.filter_p_lr
# don't learn twice
# som_e.learn(e)
# som_p.learn(p)
# TODO for j in numepisodes
if X.shape[0] > 1:
numepisodes = self.numepisodes_som
else:
numepisodes = 1
if X.shape[0] > 100:
print("%s.fit_soms batch fitting of size %d" % (self.__class__.__name__, X.shape[0]))
i = 0
j = 0
eps_convergence = 0.01
# eps_convergence = 0.005
dWnorm_e_ = 1 # short horizon
dWnorm_p_ = 1
dWnorm_e__ = dWnorm_e_ + 2 * eps_convergence # long horizon
dWnorm_p__ = dWnorm_p_ + 2 * eps_convergence
idx_shuffle = np.arange(X.shape[0])
# for j in range(numepisodes):
# (dWnorm_e_ == 0 and dWnorm_p_ == 0) or
# while (dWnorm_e_ > 0.05 and dWnorm_p_ > 0.05):
do_convergence = True
while (do_convergence) and (np.abs(dWnorm_e__ - dWnorm_e_) > eps_convergence and np.abs(dWnorm_p__ - dWnorm_p_) > eps_convergence): # and j < 10:
if j > 0 and j % 10 == 0:
print("%s.fit_soms episode %d / %d" % (self.__class__.__name__, j, numepisodes))
if X.shape[0] == 1:
# print("no convergence")
do_convergence = False
dWnorm_e = 0
dWnorm_p = 0
np.random.shuffle(idx_shuffle)
# print("neurons_e 1", self.filter_e.map.neurons.flatten())
for i in range(X.shape[0]):
# lidx = idx_shuffle[i]
lidx = i
self.filter_e.learn(X[lidx])
dWnorm_e += np.linalg.norm(self.filter_e.map.delta)
self.filter_p.learn(y[lidx])
dWnorm_p += np.linalg.norm(self.filter_p.map.delta)
# print("neurons_e 2", self.filter_e.map.neurons.flatten(), X, X[lidx])
dWnorm_e /= X.shape[0]
dWnorm_e /= self.filter_e.map.numunits
dWnorm_p /= X.shape[0]
dWnorm_p /= self.filter_p.map.numunits
# short
dWnorm_e_ = 0.8 * dWnorm_e_ + 0.2 * dWnorm_e
dWnorm_p_ = 0.8 * dWnorm_p_ + 0.2 * dWnorm_p
# long
dWnorm_e__ = 0.83 * dWnorm_e__ + 0.17 * dWnorm_e_
dWnorm_p__ = 0.83 * dWnorm_p__ + 0.17 * dWnorm_p_
# print("%s.fit_soms batch e |dW| = %f, %f, %f" % (self.__class__.__name__, dWnorm_e, dWnorm_e_, dWnorm_e__))
# print("%s.fit_soms batch p |dW| = %f, %f, %f" % (self.__class__.__name__, dWnorm_p, dWnorm_p_, dWnorm_p__))
j += 1
if True and self.soms_cnt_fit % 100 == 0:
print("%s.fit_soms batch e mean error = %f, min = %f, max = %f" % (
self.__class__.__name__,
np.asarray(self.filter_e.distances_).mean(),
np.asarray(self.filter_e.distances_[-1]).min(),
np.asarray(self.filter_e.distances_).max() ))
print("%s.fit_soms batch p mean error = %f, min = %f, max = %f" % (
self.__class__.__name__,
np.asarray(self.filter_p.distances_).mean(),
np.asarray(self.filter_p.distances_[-1]).min(),
np.asarray(self.filter_p.distances_).max() ))
# print np.argmin(som_e.distances(e)) # , som_e.distances(e)
self.soms_cnt_fit += 1
def fit_hebb(self, X, y):
"""smpHebbianSOM"""
# print("%s.fit_hebb fitting X = %s, y = %s" % (self.__class__.__name__, X.shape, y.shape))
if X.shape[0] == 1 and self.soms_cnt_fit < 200: # 200: # 1500:
return
# numepisodes_hebb = 1
if X.shape[0] > 100:
print("%s.fit_hebb batch fitting of size %d" % (self.__class__.__name__, X.shape[0]))
numsteps = X.shape[0]
################################################################################
# fix the SOMs with learning rate constant 0
self.filter_e_lr = self.filter_e.map._learning_rate
self.filter_p_lr = self.filter_p.map._learning_rate
# print("fit_hebb", self.filter_e.map._learning_rate)
self.filter_e.map._learning_rate = self.CT(0.0)
self.filter_p.map._learning_rate = self.CT(0.0)
e_shape = (np.prod(self.filter_e.map._shape), 1)
p_shape = (np.prod(self.filter_p.map._shape), 1)
eps_convergence = 0.05
z_err_coef_1 = 0.8
z_err_coef_2 = 0.83
z_err_norm_ = 1 # fast
z_err_norm__ = z_err_norm_ + 2 * eps_convergence # slow
Z_err_norm = np.zeros((self.numepisodes_hebb*numsteps,1))
Z_err_norm_ = np.zeros((self.numepisodes_hebb*numsteps,1))
W_norm = np.zeros((self.numepisodes_hebb*numsteps,1))
# # plotting
# pl.ion()
# fig = pl.figure()
# fig2 = pl.figure()
# TODO for j in numepisodes
# j = 0
if X.shape[0] > 1:
numepisodes = self.numepisodes_hebb
else:
numepisodes = 1
i = 0
dWnorm_ = 10.0
j = 0
# for j in range(numepisodes):
do_convergence = True
while do_convergence and z_err_norm_ > eps_convergence and np.abs(z_err_norm__ - z_err_norm_) > eps_convergence: # and j < 20:
if j > 0 and j % 10 == 0:
print("%s.fit_hebb episode %d / %d" % (self.__class__.__name__, j, numepisodes))
if X.shape[0] == 1:
# print("no convergence")
do_convergence = False
for i in range(X.shape[0]):
# just activate
self.filter_e.learn(X[i])
self.filter_p.learn(y[i])
# fetch data induced activity
if self.hebblink_use_activity:
p_ = self.filter_p.activity.reshape(p_shape)
# print(p_.shape)
else:
p_ = self.filter_p.distances(p).flatten().reshape(p_shape)
p__ = p_.copy()
# p_ = p_ ** 2
p_ = (p_ == np.max(p_)) * 1.0
e_ = self.filter_e.activity.reshape(e_shape) # flatten()
e__ = e_.copy()
# e_ = e_ ** 2
e_ = (e_ == np.max(e_)) * 1.0
# compute prediction for p using e activation and hebbian weights
if self.hebblink_use_activity:
# print(self.hebblink_filter.T.shape, self.filter_e.activity.reshape(e_shape).shape)
# p_bar = np.dot(self.hebblink_filter.T, self.filter_e.activity.reshape(e_shape))
# e_act = e_.reshape(e_shape)
# e_act
p_bar = np.dot(self.hebblink_filter.T, e_.reshape(e_shape))
# # sparse
# p_bar = self.hebblink_filter.T.dot(e_.reshape(e_shape))
# print("p_bar", type(p_bar))
else:
p_bar = np.dot(self.hebblink_filter.T, self.filter_e.distances(e).flatten().reshape(e_shape))
p_bar_ = p_bar.copy()
p_bar = (p_bar == np.max(p_bar)) * 1.0
# print("p_bar", type(p_bar), type(p_bar_))
# # plotting
# ax1 = fig.add_subplot(411)
# ax1.cla()
# ax1.plot(e_ * np.max(e__))
# ax1.plot(e__)
# ax2 = fig.add_subplot(412)
# ax2.cla()
# ax2.plot(p_ * np.max(p_bar_))
# ax2.plot(p__)
# ax2.plot(p_bar * np.max(p_bar_))
# ax2.plot(p_bar_)
# ax3 = fig.add_subplot(413)
# ax3.cla()
# ax3.plot(self.filter_e.distances_[-1])
# ax4 = fig.add_subplot(414)
# ax4.cla()
# ax4.plot(self.filter_p.distances_[-1])
# pl.pause(0.001)
# pl.draw()
# inject activity prediction
p_bar_sum = p_bar.sum()
if p_bar_sum > 0:
p_bar_normed = p_bar / p_bar_sum
else:
p_bar_normed = np.zeros(p_bar.shape)
# compute prediction error: data induced activity - prediction
# print("p_", np.linalg.norm(p_))
# print("p_bar", np.linalg.norm(p_bar))
z_err = p_ - p_bar
idx = np.argmax(p_bar_)
# print("sum E", np.sum(z_err))
# print("idx", p_bar_, idx, z_err[idx])
# z_err = (p_[idx] - p_bar[idx]) * np.ones_like(p_)
# z_err = np.ones_like(p_) *
# print("z_err", z_err)
# z_err = p_bar - p_
# z_err_norm = np.linalg.norm(z_err, 2)
z_err_norm = np.sum(np.abs(z_err))
# if j == 0 and i == 0:
# z_err_norm_ = z_err_norm
# else:
z_err_norm_ = z_err_coef_1 * z_err_norm_ + (1 - z_err_coef_1) * z_err_norm
z_err_norm__ = z_err_coef_2 * z_err_norm__ + (1 - z_err_coef_2) * z_err_norm
w_norm = np.linalg.norm(self.hebblink_filter)
# logidx = (j*numsteps) + i
# Z_err_norm [logidx] = z_err_norm
# Z_err_norm_[logidx] = z_err_norm_
# W_norm [logidx] = w_norm
# z_err = p_bar - self.filter_p.activity.reshape(p_bar.shape)
# print "p_bar.shape", p_bar.shape
# print "self.filter_p.activity.flatten().shape", self.filter_p.activity.flatten().shape
# if i % 100 == 0:
# print("%s.fit_hebb: iter %d/%d: z_err.shape = %s, |z_err| = %f, |W| = %f, |p_bar_normed| = %f" % (self.__class__.__name__, logidx, (self.numepisodes_hebb*numsteps), z_err.shape, z_err_norm_, w_norm, np.linalg.norm(p_bar_normed)))
# d_hebblink_filter = et() * np.outer(self.filter_e.activity.flatten(), self.filter_p.activity.flatten())
eta = self.hebblink_et()
if eta > 0.0:
if False and self.hebblink_use_activity:
# eta = 5e-4
# outer = np.outer(self.filter_e.activity.flatten(), np.clip(z_err, 0, 1))
# outer = np.outer(e_, np.clip(z_err, 0, 1))
# outer = np.outer(e_, p_)
# outer = np.outer(e_, p__ * np.clip(z_err, 0, 1))
# FIXME: this can be optimized with sparsity
# print("e_", e_, e__, p_)
outer = | np.outer(e_ * e__, p_) | numpy.outer |
import gym
import random
import pybullet
import numpy as np
import pybullet_envs
import scipy.stats as ss
from copy import deepcopy
from multiprocessing import Pool
MAX_SEED = 2**16 - 1
def compute_weight_decay(weight_decay, model_param_list):
"""
Compute weight decay penalty
:param weight_decay: (float) weight decay coefficient
:param model_param_list: (ndarray) weight parameters
:return: (float) weight decay penalty
"""
return -weight_decay * np.mean(np.square(model_param_list))
class FixedWeightModule:
def __init__(self, input_dim, output_dim, bias=False, recurrent=False):
self.bias = bias
self.parameters = list()
self.input_dim = input_dim
self.output_dim = output_dim
if self.bias:
self.bias_param = np.zeros((1, self.output_dim))
self.parameters.append((self.bias_param, "bias_param"))
self.recurrent = recurrent
if self.recurrent:
self.r_weight = np.zeros((self.output_dim, self.output_dim))
self.parameters.append((self.r_weight, "r_weight"))
self.recurrent_trace = np.zeros((1, self.output_dim))
k = np.sqrt(1/self.input_dim)*0.1
self.weights = np.random.uniform(
low=-k, high=k, size=(self.input_dim, self.output_dim))
self.parameters.append((self.weights, "weights"))
self.param_ref_list = self.parameters
self.parameters = np.concatenate([_p[0].flatten() for _p in self.param_ref_list])
def forward(self, spikes, func=None):
weighted_spikes = np.matmul(spikes, self.weights) + (self.bias_param if self.bias else 0.0)
if self.recurrent:
weighted_spikes += np.matmul(self.recurrent_trace, self.r_weight)
post_synaptic = weighted_spikes
if func is not None:
post_synaptic = func(post_synaptic)
weighted_spikes = post_synaptic
if self.recurrent:
self.recurrent_trace = weighted_spikes
return weighted_spikes
def reset(self):
if self.recurrent:
self.recurrent_trace = np.zeros((1, self.output_dim))
def params(self):
return self.parameters
def update_params(self, eps, add_eps=True):
eps_index = 0
for _ref in range(len(self.param_ref_list)):
_val, _str_ref = self.param_ref_list[_ref]
pre_eps = eps_index
eps_index = eps_index + _val.size
if add_eps:
new_val = _val.flatten() + eps[pre_eps:eps_index]
else:
new_val = eps[pre_eps:eps_index]
new_val = new_val.reshape(self.param_ref_list[_ref][0].shape)
self.param_ref_list[_ref] = new_val, _str_ref
setattr(self, _str_ref, new_val)
self.parameters = np.concatenate([_p[0].flatten() for _p in self.param_ref_list])
class GANetworkDef:
def __init__(self, params, noise_std=0.01, num_eps_samples=64):
"""
Genetic Algorithm Network Definition
:param noise_std: (float) noise perturbation standard deviation
:param num_eps_samples: (int) number of perturbation samples
"""
self.params = params
self.ga_optim = GAParamSampler(
noise_std=noise_std, num_eps_samples=num_eps_samples)
def parameters(self):
"""
Return list of network parameters
:return: (ndarray) network parameters
"""
params = list()
for _param in range(len(self.params)):
params.append(self.params[_param].params())
return np.concatenate(params, axis=0)
def generate_eps_samples(self, seed):
"""
Generate noise samples for list of parameters
:param seed: (int) random number seed
:return: (ndarray) parameter noise
"""
params = self.parameters()
sample = self.ga_optim.sample(params, seed)
return sample
def update_params(self, eps_sample, add_eps=True):
"""
Update internal network parameters
:param eps_sample: (ndarray) noise sample
:param add_eps (bool)
:return: None
"""
param_itr = 0
for _param in range(len(self.params)):
pre_param_itr = param_itr
param_itr += self.params[_param].parameters.size
param_sample = eps_sample[pre_param_itr:param_itr]
self.params[_param].update_params(param_sample, add_eps=add_eps)
class GANetwork(GANetworkDef):
def __init__(self, input_size, output_size, noise_std=0.01, num_eps_samples=64):
"""
Genetic Algorithm Network
:param input_size: (int) input dimensionality
:param output_size: (int) output/action dimensionality
:param noise_std: (float) noise perturbation standard deviation
:param num_eps_samples: (int) number of perturbation samples
"""
self.params = list()
self.recurrent = True
self.hidden_base_1 = 16
self.hidden_base_2 = 16
self.input_dim = input_size
self.output_dim = output_size
self.neuron_module_1 = FixedWeightModule(
self.input_dim, self.hidden_base_1
)
self.neuron_module_2 = FixedWeightModule(
self.hidden_base_1, self.hidden_base_2
)
self.action_module = FixedWeightModule(
self.hidden_base_2, self.output_dim
)
self.params.append(self.neuron_module_1)
self.params.append(self.neuron_module_2)
self.params.append(self.action_module)
super(GANetwork, self).__init__(noise_std=noise_std,
params=self.params, num_eps_samples=num_eps_samples)
def sigmoid(self, v):
return 1 / (1 + | np.exp(-v) | numpy.exp |