192 lines
5.7 KiB
Python

import numpy as np
from ctypes import c_float
import ctypes
import pathlib
import numpy as np
C_LIB = (pathlib.Path(__file__).parent / "libprocessing.so").resolve()
LIBPROCESS = ctypes.CDLL(C_LIB)
def umeyama(src, dst, estimate_scale):
"""Estimate N-D similarity transformation with or without scaling.
Parameters
----------
src : (M, N) array
Source coordinates.
dst : (M, N) array
Destination coordinates.
estimate_scale : bool
Whether to estimate scaling factor.
Returns
-------
T : (N + 1, N + 1)
The homogeneous similarity transformation matrix. The matrix contains
NaN values only if the problem is not well-conditioned.
References
----------
.. [1] "Least-squares estimation of transformation parameters between two
point patterns", Shinji Umeyama, PAMI 1991, :DOI:`10.1109/34.88573`
"""
num = src.shape[0]
dim = src.shape[1]
# Compute mean of src and dst.
src_mean = src.mean(axis=0)
dst_mean = dst.mean(axis=0)
# Subtract mean from src and dst.
src_demean = src - src_mean
dst_demean = dst - dst_mean
# Eq. (38).
A = dst_demean.T @ src_demean / num
# Eq. (39).
d = np.ones((dim,), dtype=np.float)
if np.linalg.det(A) < 0:
d[dim - 1] = -1
T = np.eye(dim + 1, dtype=np.float)
U, S, V = np.linalg.svd(A)
# Eq. (40) and (43).
rank = np.linalg.matrix_rank(A)
if rank == 0:
return np.nan * T
elif rank == dim - 1:
if np.linalg.det(U) * np.linalg.det(V) > 0:
T[:dim, :dim] = U @ V
else:
s = d[dim - 1]
d[dim - 1] = -1
T[:dim, :dim] = U @ np.diag(d) @ V
d[dim - 1] = s
else:
T[:dim, :dim] = U @ np.diag(d) @ V
if estimate_scale:
# Eq. (41) and (42).
scale = c_float(1.0 / src_demean.var(axis=0).sum() * (S @ d)).value
else:
scale = 1.0
T[:dim, dim] = dst_mean - scale * (T[:dim, :dim] @ src_mean.T)
T[:dim, :dim] *= scale
return T
def similarity_transform(landmarks, src_vec):
"""Wrapper to similarity transform with the input landmarks"""
c_function = LIBPROCESS.similarity_transform
c_function.argtypes = [ctypes.POINTER(ctypes.c_float), ctypes.POINTER(ctypes.c_float), ctypes.POINTER(ctypes.c_float)]
c_function.restype = ctypes.c_int
c_landmarks = (ctypes.c_float * len(landmarks))(*landmarks)
c_src_vector = (ctypes.c_float * len(src_vec))(*src_vec)
c_m = (ctypes.c_float * 6)()
ret = c_function(c_src_vector, c_landmarks, c_m)
m_res = ctypes.cast(
ctypes.byref(c_m), ctypes.POINTER(ctypes.c_float * 6)).contents
listdata = [byte for byte in m_res]
return ret, listdata
def warpAffine_so(image, Matrix, warp_size):
'''
# image_info: [rotate, height, width, channel, image_data_format]
# image_data_format: (0:NIR888 (size of w*h*1), 1:RGB565 (size of w*h*2), 2:YUV422 (future usage), 3:RGB888 size of w*h*3)
'''
##
m =[Matrix[0,0],Matrix[0,1],Matrix[0,2],Matrix[1,0],Matrix[1,1],Matrix[1,2]]
image_info = [0, image.shape[0], image.shape[1], 3, 3,warp_size[1],warp_size[0]]
##
if any((0, m, False)) == False:
print("m is NULL", m)
return -1, 0
##
c_function = LIBPROCESS.cv_warp_affine
c_function.argtypes = [ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_float),
ctypes.c_char_p, ctypes.c_char_p]
c_function.restype = ctypes.c_int
img_len = image.shape[0] * image.shape[1] * image.shape[2]
frame_data = image.reshape(img_len)
c_char_p = ctypes.POINTER(ctypes.c_char)
frame_data = frame_data.astype(np.uint8)
data_p = frame_data.ctypes.data_as(c_char_p)
c_image_info = (ctypes.c_int * 7)(*image_info)
c_m = (ctypes.c_float * 6)(*m)
out_len = image_info[5]*image_info[6]*4
c_rgba_data = (ctypes.c_char * out_len)()
c_function(c_image_info, c_m, data_p, c_rgba_data)
m_res = ctypes.cast(ctypes.byref(c_rgba_data), ctypes.POINTER(ctypes.c_char * out_len)).contents
listdata = [ord(byte) for byte in m_res]
npdata = np.asarray(listdata)
# npdata = npdata.astype("int8")
npdata = (npdata+128).astype("uint8")
npdata = np.array(npdata)
npdata = npdata.reshape(warp_size[1], warp_size[0], 4)
return 0, npdata[:,:,0:3]
def warpAffine_c(image, Matrix, warp_size):
assert isinstance(image, np.ndarray)
assert isinstance(Matrix, list) | isinstance(Matrix, tuple) | isinstance(Matrix, np.ndarray)
assert isinstance(warp_size, list) | isinstance(warp_size, tuple)
M = np.array(Matrix)
M = Matrix.reshape((-1,1))
M[0] = c_float(M[0]).value
M[1] = c_float(M[1]).value
M[2] = c_float(M[2]).value
M[3] = c_float(M[3]).value
M[4] = c_float(M[4]).value
M[5] = c_float(M[5]).value
D = c_float(M[0]*M[4]-M[1]*M[3]).value
if D != 0:
D = c_float(1./D).value
A11 = c_float(M[4]*D).value
A22 = c_float(M[0]*D).value
M[0] = A11
M[1] = c_float(M[1]*(-D)).value
M[3] = c_float(M[3]*(-D)).value
M[4] = A22
b1 = c_float(-M[0]*M[2]-M[1]*M[5]).value
b2 = c_float(-M[3]*M[2]-M[4]*M[5]).value
M[2] = b1
M[5] = b2
dst = np.zeros((warp_size[1],warp_size[0],4),dtype='uint8')
for y in range(warp_size[1]):
X0 = int((M[1]*y + M[2]) * 1024)
Y0 = int((M[4]*y + M[5]) * 1024)
for x in range(warp_size[0]):
adelta = int(M[0]*x*1024)
bdelta = int(M[3]*x*1024)
X = (X0 + adelta + 512) >> 10
Y = (Y0 + bdelta + 512) >> 10
if ((X < image.shape[1]) & (Y < image.shape[0])):
dst[y,x,0] = image[Y,X,0]
dst[y,x,1] = image[Y,X,1]
dst[y,x,2] = image[Y,X,2]
return dst