168 lines
6.1 KiB
Python
168 lines
6.1 KiB
Python
|
# import required libraries
|
||
|
import cv2
|
||
|
import numpy as np
|
||
|
import matplotlib
|
||
|
import math
|
||
|
|
||
|
MAX = 10 # should currently be 10
|
||
|
NormUpperBound = 2.0 #Highest Expected Number
|
||
|
GrayscaleThreshhold = 200
|
||
|
RotationAngle = 45
|
||
|
|
||
|
|
||
|
### Read Images from Disk, Assumed Naming: 0.png 1.png ... n.png depending on MAX
|
||
|
|
||
|
#Turns Images into Contours based on Greyscale Threshhold
|
||
|
def GetContoursFromFiles():
|
||
|
cnt = []
|
||
|
for i in range (0, MAX, 1):
|
||
|
# Read image as grayscale images
|
||
|
img = cv2.imread(str(i)+'.png',0)
|
||
|
# Apply thresholding on the images to convert to binary images
|
||
|
ret, thresh1 = cv2.threshold(img, GrayscaleThreshhold, 255,cv2.THRESH_BINARY_INV)
|
||
|
# find the contours in the binary image
|
||
|
contours,hierarchy = cv2.findContours(thresh1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
|
||
|
print("Number of Shapes detected in Image "+str(i)+":",len(contours))
|
||
|
cnt.append(contours[0])
|
||
|
return cnt
|
||
|
|
||
|
|
||
|
#Does N to N matching, resulting in a 1d array that should be interpreted as a 2d array
|
||
|
# Could easily be modified for 1 to N matching
|
||
|
def MatchAll(cnt):
|
||
|
mat = []
|
||
|
for i in range(0, MAX, 1):
|
||
|
for j in range(0, MAX, 1):
|
||
|
mat.append(cv2.matchShapes(cnt[i],cnt[j],1,0.0))
|
||
|
return mat
|
||
|
|
||
|
|
||
|
def PrintMatchingMatrix(mat):
|
||
|
print("Similarity of Images: \n")
|
||
|
for i in range(0, MAX, 1):
|
||
|
for j in range(0, MAX, 1):
|
||
|
print(f"{mat[i*MAX+j]:.2f}", end='')
|
||
|
print("\t", end='')
|
||
|
print("\n")
|
||
|
|
||
|
def PrintMatchingMatrixNormalized(mat):
|
||
|
print("Similarity of Images Normalized: \n")
|
||
|
norm = matplotlib.colors.Normalize(vmin=0.0, vmax=NormUpperBound)
|
||
|
for i in range(0, MAX, 1):
|
||
|
for j in range(0, MAX, 1):
|
||
|
print(f"{norm(mat[i*MAX+j]):.2f}", end='')
|
||
|
print("\t", end='')
|
||
|
print("\n")
|
||
|
|
||
|
|
||
|
|
||
|
# Does all of the heavy lifting when it comes to displaying it in a nice graphical way
|
||
|
# Compact but also quickly hacked together
|
||
|
# Builds the Visual matrix using images, 1 image = 1 coordinate unit
|
||
|
def CreateMatchingColorMatrix(mat):
|
||
|
|
||
|
im_res = cv2.imread(str(0)+'.png',cv2.IMREAD_COLOR)
|
||
|
height, width, channels = im_res.shape
|
||
|
# (Coordinate 0/0) Color (white)
|
||
|
im_temp = np.full((height, width, 3), 255, np.uint8)
|
||
|
|
||
|
norm = matplotlib.colors.Normalize(vmin=0.0, vmax=NormUpperBound)
|
||
|
|
||
|
# Build Topmost row (just iterate through all images and concat them sequentially)
|
||
|
for i in range(0, MAX, 1):
|
||
|
img = cv2.imread(str(i)+'.png',cv2.IMREAD_COLOR)
|
||
|
im_temp = cv2.hconcat([im_temp, img])
|
||
|
# This top row is now our first row
|
||
|
im_res = im_temp
|
||
|
# Build The matrix row by row
|
||
|
for i in range(0, MAX, 1):
|
||
|
im_temp = cv2.imread(str(i)+'.png',cv2.IMREAD_COLOR)
|
||
|
img = np.full((height, width, 3), 255, np.uint8)
|
||
|
img[:] = (0, 0, 255)
|
||
|
# Individual row here, current sequential image gets chosen above, so here, we can do square coloring
|
||
|
for j in range(0, MAX, 1):
|
||
|
cmap = matplotlib.cm.get_cmap('brg_r')
|
||
|
cmap.set_over((0.0,0.0,0.0))
|
||
|
# Gets current weight, normalises it, looks it up in color map, converts it to full scale, colors
|
||
|
img[:] = NtoF(cmap(norm(mat[i*MAX+j]))[:-1])
|
||
|
# build up row
|
||
|
im_temp = cv2.hconcat([im_temp, img])
|
||
|
#build up matrix
|
||
|
im_res = cv2.vconcat([im_res, im_temp])
|
||
|
DebugDrawImage(im_res)
|
||
|
|
||
|
|
||
|
# Helper to convert Normalized color to Full-scale (255) color
|
||
|
def NtoF(rgb):
|
||
|
return tuple([255*x for x in rgb])
|
||
|
|
||
|
|
||
|
|
||
|
|
||
|
def DebugDrawImage(img):
|
||
|
cv2.imshow("Image", img)
|
||
|
cv2.waitKey(0)
|
||
|
|
||
|
### Rotates Image 0.png 4 times to test how rotations affect outcome (they don't)
|
||
|
|
||
|
def RotationTest():
|
||
|
global MAX
|
||
|
MAX = math.ceil(360/RotationAngle)
|
||
|
|
||
|
cnt = []
|
||
|
img = cv2.imread(str(0)+'.png',0)
|
||
|
for i in range (0, MAX, 1):
|
||
|
ret, thresh1 = cv2.threshold(img, GrayscaleThreshhold, 255,cv2.THRESH_BINARY_INV)
|
||
|
contours,hierarchy = cv2.findContours(thresh1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
|
||
|
cnt.append(contours[0])
|
||
|
print("Number of Shapes detected in Rotation "+str(i)+":",len(contours))
|
||
|
img = rotate_image(img, RotationAngle)
|
||
|
|
||
|
mat = MatchAll(cnt)
|
||
|
PrintMatchingMatrix(mat)
|
||
|
CreateMatchingColorMatrixRotation(mat)
|
||
|
|
||
|
# Quick hack of above function to work with rotation instead of multiple images
|
||
|
def CreateMatchingColorMatrixRotation(mat):
|
||
|
im_rot = cv2.imread(str(0)+'.png',cv2.IMREAD_COLOR)
|
||
|
im_res = cv2.imread(str(0)+'.png',cv2.IMREAD_COLOR)
|
||
|
height, width, channels = im_res.shape
|
||
|
im_temp = np.full((height, width, 3), 255, np.uint8)
|
||
|
norm = matplotlib.colors.Normalize(vmin=0.0, vmax=NormUpperBound)
|
||
|
for i in range(0, MAX, 1):
|
||
|
img = im_rot
|
||
|
im_temp = cv2.hconcat([im_temp, img])
|
||
|
im_rot = rotate_image(img, RotationAngle)
|
||
|
im_res = im_temp
|
||
|
#reset
|
||
|
im_rot = cv2.imread(str(0)+'.png',cv2.IMREAD_COLOR)
|
||
|
for i in range(0, MAX, 1):
|
||
|
im_temp = cv2.imread(str(0)+'.png',cv2.IMREAD_COLOR)
|
||
|
im_temp = im_rot
|
||
|
img = np.full((height, width, 3), 255, np.uint8)
|
||
|
img[:] = (0, 0, 255)
|
||
|
for j in range(0, MAX, 1):
|
||
|
cmap = matplotlib.cm.get_cmap('brg_r')
|
||
|
cmap.set_over((0.0,0.0,0.0))
|
||
|
img[:] = NtoF(cmap(norm(mat[i*MAX+j]))[:-1])
|
||
|
im_temp = cv2.hconcat([im_temp, img])
|
||
|
im_res = cv2.vconcat([im_res, im_temp])
|
||
|
im_rot = rotate_image(im_rot, RotationAngle)
|
||
|
DebugDrawImage(im_res)
|
||
|
|
||
|
# Stolen from Stackoverflow and modified: https://stackoverflow.com/a/9042907
|
||
|
# NOTE: this function assumes the background is a lighter shade than the form to detect
|
||
|
# NOTE: INTER_NEAREST or other interpolation strategies might work better, depending on rotation and some other factors
|
||
|
def rotate_image(image, angle):
|
||
|
image_center = tuple(np.array(image.shape[1::-1]) / 2)
|
||
|
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
|
||
|
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=(255,255,255))
|
||
|
return result
|
||
|
|
||
|
|
||
|
if __name__ == "__main__":
|
||
|
mat = MatchAll(GetContoursFromFiles())
|
||
|
PrintMatchingMatrix(mat)
|
||
|
PrintMatchingMatrixNormalized(mat)
|
||
|
CreateMatchingColorMatrix(mat)
|
||
|
#RotationTest()
|