-
Notifications
You must be signed in to change notification settings - Fork 15
/
PcbFunctions.py
executable file
·768 lines (619 loc) · 28.9 KB
/
PcbFunctions.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
"""
This file holds various functions to process data.
"""
import math
import numpy as np
import cv2
import sys
import io
from skidl import search,show
import os
import re
import logging
from point import *
# images of points, used for image matching
RectPointRight_img = cv2.imread(
'assets/Example_images/Board_points/rectPoint.png', cv2.IMREAD_COLOR)
CircPoint_img = cv2.imread(
'assets/Example_images/Board_points/CircPoint.png', cv2.IMREAD_COLOR)
pointImages = [RectPointRight_img, CircPoint_img]
# from: https://stackoverflow.com/questions/59345532/error-log-count-and-error-log-messages-as-list-from-logger
class CustomStreamHandler(logging.StreamHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.error_logs = []
self.warning_logs = []
def emit(self, record):
if record.levelno == logging.ERROR:
self.error_logs.append(record)
elif record.levelno == logging.WARNING:
self.warning_logs.append(record)
super().emit(record)
def get_ordered_list(points, x, y):
points.sort(key = lambda p: (p.x - x)**2 + (p.y - y)**2)
return points
# Points File functions
def GetPointsFromFile(File):
'''
a function thats returning the file as a string and numpy array containing all the points
expects this format:
Point: [x1,y1]
Point: [x2,y2]
Point: [x3,y3]
returns that as a string and a numpy array in this format:
[[x1,y1],
[x2,y2],
[x3,y3]]
'''
# moving to a string so i could manipulatie it
EBP_String = File.read()
# variable that stores the entire board x,y coordinates
# dummy array initialisation
EntireBoardPoints = np.array([[1, 2], [3, 4]])
# clearing the array for inputs
EntireBoardPoints = np.delete(EntireBoardPoints, [0, 1], axis=0)
#regex to find list of all expressions matching [number, number]
points = re.findall("\[[0-9]+\,[0-9]+\]", EBP_String)
#adds all points to numpy array
if points:
for i in range(len(points)):
EntireBoardPoints = np.append(EntireBoardPoints, [np.fromstring(points[i][1:-1], dtype=int, sep=',')], axis=0)
return EBP_String, EntireBoardPoints
def formatize_EBP_string(EBP_String):
"""
:param EBP_string entire Board points string, got from file
By @ObaidAshraf
"""
EBP_String = re.sub("[C|c]onnected [T|t]o", "connected to", EBP_String)
allIndices = []
allLinesLen = []
maxBreakPoint = 0
maxLineLen = 0
finalStr = ""
for line in EBP_String.split('\n'):
idx = line.find("connected to")
allIndices.append(idx)
if (idx < 0):
allLinesLen.append(len(line))
maxBreakPoint = max(allIndices)
maxLineLen = max(allLinesLen)
if (maxLineLen > maxBreakPoint):
maxBreakPoint = maxLineLen
for line in EBP_String.split('\n'):
breakPoint = line.find("connected to")
if (breakPoint < 0):
finalStr += line
finalStr += '\n'
else:
finalStr += line[0:breakPoint]
for i in range(breakPoint, maxBreakPoint+3):
finalStr += ' '
finalStr += "=>"
for i in range(0, 2):
finalStr += ' '
finalStr += line[breakPoint:]
finalStr += '\n'
return finalStr
# OpenCV functions
def GetContours(img):
'''
One of the core functions of this whole algorithm.
returns Contours.
'''
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#cv2.imshow('HSV Image', hsv)
# cv2.waitKey(0)
hue, saturation, value = cv2.split(hsv)
#cv2.imshow('Saturation Image', saturation)
# cv2.waitKey(0)
retval, thresholded = cv2.threshold(
saturation, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
#cv2.imshow('Thresholded Image', thresholded)
# cv2.waitKey(0)
medianFiltered = cv2.medianBlur(thresholded, 5)
#cv2.imshow('Median Filtered Image', medianFiltered)
# cv2.waitKey(0)
cnts, hierarchy = cv2.findContours(
medianFiltered, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
return cnts
def GetDominotColor(img):
'''
a function that returns the most dominot color in an image.\n
returns a bgr format of the color: [B,G,R]
'''
colors, count = np.unique(
img.reshape(-1, img.shape[-1]), axis=0, return_counts=True)
return colors[count.argmax()]
def PutOnTopBigBlack(image):
s_img = image
l_img = cv2.imread('black.png')
x_offset = 300
y_offset = 200
l_img[y_offset:y_offset+s_img.shape[0],
x_offset:x_offset+s_img.shape[1]] = s_img
cv2.imshow("test", l_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
return l_img
def DetectPointsV2(image, Debugging_Enabled = False, AlwaysUseTM = False, logger = None):
'''
## version 2 of DetectingCircles.\n
This function takes an image and returns a numpy 3-diminisonal array contining all points.
the array would look like this:\n
[[x1,y1],\n
[x2,y2],\n
[x3.y3]]\n
Got from => https://stackoverflow.com/questions/60637120/detect-circles-in-opencv second answer.
@image an image that the algorithm should find the points in
'''
copy = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.medianBlur(gray, 11)
# This also captures the rectangles points
thresh = cv2.threshold(
blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
if Debugging_Enabled:
cv2.imshow('blur', blur)
cv2.imshow('thresh', thresh)
# if this variable is too low, try to use other methods to detect points
Num_Points_Found = 0
BoardPointsArray = []
# finding rectangles
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
area = cv2.contourArea(c)
if Debugging_Enabled:
print("Approx: ", len(approx))
print(area)
if len(approx) == 4 and area > 50 and area < 200:
(x, y, w, h) = cv2.boundingRect(approx)
#ar = w / float(h)
#cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
cv2.rectangle(copy, (x, y), (x+w, y+h), (0, 255, 0), 2)
BoardPointsArray.append(point(int(x + (w/2)), int(y + (h/2))))
Num_Points_Found += 1
#cv2.imshow('RectanglesDetectedByV2', copy)
# finding circles
# Morph open
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=3)
#cv2.imshow('opening', opening)
# Find contours and filter using contour area and aspect ratio
cnts = cv2.findContours(opening, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
area = cv2.contourArea(c)
if Debugging_Enabled:
print("Approx: ", len(approx))
print(area)
if len(approx) > 5 and area > 100 and area < 500000:
((x, y), r) = cv2.minEnclosingCircle(c)
cv2.circle(copy, (int(x), int(y)), int(r), (36, 255, 12), 2)
BoardPointsArray.append(point(int(x), int(y)))
Num_Points_Found += 1
if Debugging_Enabled:
cv2.imshow('Rec&Circs DetectedByV2 contours', copy)
print("Num_Points_Found before image matching: ", Num_Points_Found)
print(BoardPointsArray)
# if we found only one point or less, try to find others using image matching
# OR if flag AlwaysUseTM is True, useful for making sure we got everything
if Num_Points_Found < 2 or AlwaysUseTM:
if not AlwaysUseTM: print("[ii] on this run, I found less than two points -> trying to find others using image matching!")
# Because were using image matching, we need to try each of our images of how the pcb points looks like
# Template_matching returns x,y,w,h of the image of the point inside the contour
# It also returns an image with the found point removed so we could try to find other points
for pointImage in pointImages:
foundMatch = Template_matching(image, pointImage, 0.81, Debugging_Enabled, BoardPointsArray)
while foundMatch is not None:
# -1,-1,-1,-1 code for duplicate, still need to remove it and retry
if foundMatch[0:4] == [-1,-1,-1,-1]:
ReturnedImage = foundMatch[4]
foundMatch = Template_matching(ReturnedImage, pointImage, 0.81, Debugging_Enabled, BoardPointsArray)
continue
ReturnedImage = foundMatch[4]
foundMatch = foundMatch[:4]
if Debugging_Enabled:
print("found a point at: x1,y1: {},{}; x2,y2: {},{}".format(
foundMatch[0], foundMatch[1], foundMatch[0] + foundMatch[2], foundMatch[1]+foundMatch[3]))
cv2.imshow("ReturnedImage", ReturnedImage)
cv2.waitKey(0)
cv2.rectangle(copy, (foundMatch[0], foundMatch[1]), (foundMatch[0] +
foundMatch[2], foundMatch[1]+foundMatch[3]), (0, 0, 255), 2)
# entering the middle point of the foundMatch - for best accuarcy
# [ [ w / 2 + x, h / 2 + y ] ]
BoardPointsArray.append(point(foundMatch[0] + (foundMatch[2]/2), foundMatch[1] + (foundMatch[3]/2)) )
Num_Points_Found += 1
#elif Debugging_Enabled: print("NOTE: Image matching returned None.")
foundMatch = Template_matching(ReturnedImage, pointImage, 0.81, Debugging_Enabled, BoardPointsArray)
if Num_Points_Found < 2 and logger:
logger.warning("[WW] Image matching Cannot find all points.")
if Debugging_Enabled: print("Num_Points_Found after image matching: ", Num_Points_Found)
#cv2.imshow('thresh', thresh)
#cv2.imshow('opening', opening)
#cv2.imshow('CirclesDetectedByV2', image)
return BoardPointsArray, copy
def Template_matching(img, Img_Point, DesValue = 0.81, Debug_Enable = False, AlreadyFoundPoints = None):
'''
A function that looks for an image inside of another img.\n
If the threshold is less than DesValue, the function would return None
Input: the image that The point should be inside it, The image of how the point should look like,
A value used to filter false positives.
Output: The x,y,w,h of where the Img_Point is inside img OR a None if threshold not meeted. The function always returns something.
@img the image that the algorthim should find Img_Point inside
@Img_Point the image that should be found inside img
@DesValue a value used to filter false positives
@Debug_Enable show verbose printing and image processing
@AlreadyFoundPoints optinal parameter to avoid getting points that are already detected
@IsRect - because this function flag circle point middle point as the left-up most point and rect point as sort middle point, need to diff between them
'''
'''
# a variable to store the matched cordienates instances of the rect of where
# the Matched image was found. [x,y,w,h]
#MatchingRectCordArray = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
# clearing the array for inputs
#MatchingRectCordArray = np.delete(MatchingRectCordArray, np.s_[:], axis=0)
#img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Img_Point_gray = cv2.cvtColor(Img_Point, cv2.COLOR_BGR2GRAY)
w, h = Img_Point.shape[:2]
result = cv2.matchTemplate(img_gray, Img_Point_gray,
cv2.TM_CCOEFF_NORMED)
(yCoords, xCoords) = np.where(result >= 0.8)
for (x, y) in zip(xCoords, yCoords):
# draw the bounding box on the image
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 3)
MatchingRectCordArray = np.append(
MatchingRectCordArray, [[x, y, x + w, y + h]], axis=0)
#w, h, _ = Img_Point.shape[::-1]
res = cv2.matchTemplate(img_gray, Img_Point_gray, cv2.TM_SQDIFF_NORMED)
threshold = 0.9
loc = np.where(res >= threshold)
i = 0
for pt in zip(*loc[::-1]):
if(i > 3):
break
cv2.rectangle(img, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)
MatchingRectCordArray = np.append(
MatchingRectCordArray, [[pt[0], pt[1], pt[0] + w, pt[1] + h]], axis=0)
i += 1
'''
DominotColor = GetDominotColor(img)
#if(Debug_Enable):
#cv2.imshow("TMStartImg", img)
#cv2.imshow("imgpoint", Img_Point)
#result = cv2.matchTemplate(Img_Point, img, cv2.TM_SQDIFF_NORMED)
result = cv2.matchTemplate(Img_Point, img, cv2.TM_CCOEFF_NORMED)
if(Debug_Enable):
cv2.imshow('O_TM_Template', result)
# We want the minimum squared difference
(mn, maxVal, mnLoc, maxLoc) = cv2.minMaxLoc(result)
if(Debug_Enable):
conf = result.max()
print("maxVal: ", maxVal)
print("conf: ", conf)
# senstivity check
if(maxVal > DesValue):
# Draw the rectangle:
# Extract the coordinates of our best match
x, y = maxLoc
# Step 2: Get the size of the template. This is the same size as the match.
h, w = Img_Point.shape[:2]
# If we get something in alreadyFoundPoints, Lets throw those points in a check
# as we got an area, just take the middle point and compare that.
if AlreadyFoundPoints is not None:
for Point in AlreadyFoundPoints:
if isinstance(Point, point):
if Point.IsCloseToOtherPoint(point(int(x+ w/2), int(y+h/2))):
return None
elif isThosePointsTheSame( (w / 2 + x), (h / 2 + y), Point[0], Point[1] ):
return None
cv2.rectangle(img, (x, y), (x+w, y+h), (int(DominotColor[0]), int(DominotColor[1]), int(DominotColor[2])), -1)
if Debug_Enable:
print(f"Threshold meeted: boxInTemplateMatching: x1,y1: {x},{y}; x2,y2: {x+w},{y+h}")
# Display the original image with the rectangle around the match.
cv2.imshow('O_TM', img)
# print(MatchingRectCordArray)
cv2.waitKey(0)
return x, y, w, h, img
else:
if Debug_Enable:
print("Threshold not meeted!")
# for threshold
#cv2.waitKey(0)
return None
# Math functions
def calculateDistance(x1,y1,x2,y2):
"""
This function calculates the distance between two points.
TODO: input validation;
"""
return math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
"""
def sortPointsByDistToRefPoint(refPoint, Points):
This function sorts an array of points based on their distance to a reference point.
return np.array(sorted(Points,key=lambda point:calculateDistance(refPoint[0],refPoint[1],*point)))
"""
def sortPointsByDistToRefPoint2(refPoint, Points):
"""
This function sorts an array of points based on their distance to a reference point.
"""
return sorted(Points,key=lambda point:calculateDistance(refPoint.x,refPoint.y,*[point.x,point.y]))
def isThosePointsTheSame(x1: int, y1: int, x2: int, y2: int, rel_tol: float = 0.15, abs_tol: float = 10.0) -> bool :
"""
This function return True/False based on if two input points are close enough to be called
The same.
"""
#print(f"are those the same? x1,y1: {x1},{y1} x2,y2: {x2},{y2}")
# checking if x is the same; ~10 abs pixels margin
if(math.isclose(x1, x2, rel_tol=rel_tol, abs_tol=abs_tol)):
# x is close enough
# check for y; ~10 pixels abs margin of error
if(math.isclose(y1, y2, rel_tol=rel_tol, abs_tol=abs_tol)):
return True
return False
# IC info string functions
def GetEstimatedPins(IC_image):
"""Return how many pins are there in an IC image, using standard silver color of pins"""
pass
def GetAmountOfPins(IcPinInfo):
"""
This function takes skidl's format of ic information and returns the total
pins of that ic. takes this format:
['ATtiny841-SS ():', 'Pin None/1/VCC/POWER-IN', 'Pin None/10/PA3/BIDIRECTIONAL', 'Pin None/11/PA2/BIDIRECTIONAL', 'Pin None/12/PA1/BIDIRECTIONAL', 'Pin None/13/AREF/PA0/BIDIRECTIONAL', 'Pin None/14/GND/POWER-IN', 'Pin None/2/XTAL1/PB0/BIDIRECTIONAL', 'Pin None/3/XTAL2/PB1/BIDIRECTIONAL', 'Pin None/4/~{RESET}/PB3/BIDIRECTIONAL', 'Pin None/5/PB2/BIDIRECTIONAL', 'Pin None/6/PA7/BIDIRECTIONAL', 'Pin None/7/PA6/BIDIRECTIONAL', 'Pin None/8/PA5/BIDIRECTIONAL', 'Pin None/9/PA4/BIDIRECTIONAL']
"""
i = 0
for pin in IcPinInfo:
if "Pin" in pin:
i = i + 1
return i
def ICimageToSkidl(IC_image, reader, MinICchars: int = 4, Debugging_Enable = False):
"""
This function takes an image of an IC and anaylse it to extract the name and pinout from skidl search algorithm.
@IC_image image of an IC
@reader EasyOCR reader object
"""
TextResults = reader.readtext(IC_image)
candidates = FilterResults(TextResults)
chipAngle = 0
if Debugging_Enable:
cv2.imshow("IC_image",IC_image)
print(f"TextResults: {TextResults}")
print(f"candidates: {candidates}")
cv2.waitKey(0)
# for faster testing with Board11.png
#candidates = [([[22, 12], [120, 12], [120, 38], [22, 38]], '74HC595', 0.8238449835122339)]
# if 0, then rotate 90 ccw
if len(candidates) == 0:
IC_image_ROT90ccw = cv2.rotate(IC_image, cv2.ROTATE_90_COUNTERCLOCKWISE)
chipAngle = 90
TextResults = reader.readtext(IC_image_ROT90ccw)
candidates = FilterResults(TextResults)
if Debugging_Enable:
cv2.imshow("IC_image",IC_image_ROT90ccw)
print(f"TextResults: {TextResults}")
print(f"candidates: {candidates}")
cv2.waitKey(0)
# if still 0, rotate 180
if len(candidates) == 0:
IC_image_ROT180 = cv2.rotate(IC_image, cv2.ROTATE_180)
chipAngle = 180
TextResults = reader.readtext(IC_image_ROT180)
candidates = FilterResults(TextResults)
if Debugging_Enable:
cv2.imshow("IC_image",IC_image_ROT180)
print(f"TextResults: {TextResults}")
print(f"candidates: {candidates}")
if len(candidates) == 0: return "Unknown IC name", "Unknown IC desc", None
# loop over candidates, search every one with skidl search(); if there is a match just return that
# because skidl search function actually outputs to stdout, i have to redirect it
old_stdout = sys.stdout
new_stdout = io.StringIO()
sys.stdout = new_stdout
output = []
for candidate in candidates:
SearchResult = search(str(candidate[1]))
results = "".join(str(new_stdout.getvalue()).split()).split('...')
# reversing list to detect junk output faster
for result in reversed(results):
if "Searching" in result: break
else:
# example: '74xx.kicad_sym:74HC595()' => [1] = 74xx.kicad_sym [2] => 74HC595
output.append(result[:result.find(':')])
output.append(result[(result.find(':') + 1):-2])
# revert back
sys.stdout = old_stdout
if Debugging_Enable: print(output)
# validating output
i = 0
while i < len(output):
show_Message = show(output[i], output[i+1])
if "ERROR" and "WARNING" not in show_Message:
print(f"[ii] Found IC: ICname: {output[i+1]} ICdesc: {output[i]}")
# return Chip name, Chip description, angle
return output[i+1], output[i], chipAngle
i += 2
return "Unknown IC name", "Unknown IC desc", None
def FilterResults(Results, MinICchars = 4, minThreshold = 0.3):
candidates = []
for Result in Results:
# First filter all with len < 4 => There is propaly no IC with that few chars
# Second filter out too low threshold scores
if len(Result[1]) > MinICchars and Result[2] > minThreshold:
candidates.append(Result)
return candidates
###############################################################################
# All of those methods uses SIFT OR SUFT and dont work as of 13/12 in opencv-contrib 4.3.x
###############################################################################
def Feature_matching2(img, Img_Point):
'''
A function that looks for an image inside of another img.\n
Input: the image that The point should be inside it, The image of how the point should look like,
And a value used to filter false positives.
Output: The x,y,w,h of where the Img_Point is inside img. Or a -1 if threshold not meeted. The function always returns something.
@img the image that the algorthim should find Img_Point inside
@Img_Point the image that should be found inside img
@DesValue a value used to filter false positives - a threshold
'''
cv2.imshow("test", img)
cv2.imshow("imgpoint", Img_Point)
# a variable to store the matched cordienates instances of the rect of where
# the Matched image was found. [x,y,w,h]
MatchingRectCordArray = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
# clearing the array for inputs
MatchingRectCordArray = np.delete(MatchingRectCordArray, np.s_[:], axis=0)
#img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Img_Point_gray = cv2.cvtColor(Img_Point, cv2.COLOR_BGR2GRAY)
# -- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
minHessian = 400
detector = cv2.xfeatures2d_SURF.create(hessianThreshold=minHessian)
keypoints1, descriptors1 = detector.detectAndCompute(img, None)
keypoints2, descriptors2 = detector.detectAndCompute(Img_Point, None)
# -- Step 2: Matching descriptor vectors with a FLANN based matcher
# Since SURF is a floating-point descriptor NORM_L2 is used
matcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_FLANNBASED)
knn_matches = matcher.knnMatch(descriptors1, descriptors2, 2)
# -- Filter matches using the Lowe's ratio test
ratio_thresh = 0.7
good_matches = []
for m, n in knn_matches:
if m.distance < ratio_thresh * n.distance:
good_matches.append(m)
# -- Draw matches
img_matches = np.empty(
(max(img.shape[0], Img_Point.shape[0]), img.shape[1]+Img_Point.shape[1], 3), dtype=np.uint8)
cv2.drawMatches(img, keypoints1, Img_Point, keypoints2, good_matches,
img_matches, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
# Display the original image with the rectangle around the match.
cv2.imshow('outputImageMatching', img_matches)
print(MatchingRectCordArray)
cv2.waitKey(0)
def match_images(img1, img2, img1_features=None, img2_features=None):
"""Given two images, returns the matches"""
detector = cv2.SURF(3200)
matcher = cv2.BFMatcher(cv2.NORM_L2)
if img1_features is None:
kp1, desc1 = detector.detectAndCompute(img1, None)
else:
kp1, desc1 = img1_features
if img2_features is None:
kp2, desc2 = detector.detectAndCompute(img2, None)
else:
kp2, desc2 = img2_features
# print 'img1 - %d features, img2 - %d features' % (len(kp1), len(kp2))
raw_matches = matcher.knnMatch(desc1, trainDescriptors=desc2, k=2)
kp_pairs = filter_matches(kp1, kp2, raw_matches)
return kp_pairs
def filter_matches(kp1, kp2, matches, ratio=0.75):
"""Filters features that are common to both images"""
mkp1, mkp2 = [], []
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
m = m[0]
mkp1.append(kp1[m.queryIdx])
mkp2.append(kp2[m.trainIdx])
kp_pairs = zip(mkp1, mkp2)
return kp_pairs
# Match Diplaying
def draw_matches(window_name, kp_pairs, img1, img2):
"""Draws the matches"""
mkp1, mkp2 = zip(*kp_pairs)
H = None
status = None
if len(kp_pairs) >= 4:
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
if len(kp_pairs):
explore_match(window_name, img1, img2, kp_pairs, status, H)
def explore_match(win, img1, img2, kp_pairs, status=None, H=None):
"""Draws lines between the matched features"""
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)
vis[:h1, :w1] = img1
vis[:h2, w1:w1 + w2] = img2
vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
if H is not None:
corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
reshaped = cv2.perspectiveTransform(corners.reshape(1, -1, 2), H)
reshaped = reshaped.reshape(-1, 2)
corners = np.int32(reshaped + (w1, 0))
cv2.polylines(vis, [corners], True, (255, 255, 255))
if status is None:
status = np.ones(len(kp_pairs), np.bool_)
p1 = np.int32([kpp[0].pt for kpp in kp_pairs])
p2 = np.int32([kpp[1].pt for kpp in kp_pairs]) + (w1, 0)
green = (0, 255, 0)
red = (0, 0, 255)
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
col = green
cv2.circle(vis, (x1, y1), 2, col, -1)
cv2.circle(vis, (x2, y2), 2, col, -1)
else:
col = red
r = 2
thickness = 3
cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), col, thickness)
cv2.line(vis, (x1 - r, y1 + r), (x1 + r, y1 - r), col, thickness)
cv2.line(vis, (x2 - r, y2 - r), (x2 + r, y2 + r), col, thickness)
cv2.line(vis, (x2 - r, y2 + r), (x2 + r, y2 - r), col, thickness)
vis0 = vis.copy()
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
cv2.line(vis, (x1, y1), (x2, y2), green)
cv2.imshow(win, vis)
def Feature_matching3(img, Img_Point):
'''
A function that looks for an image inside of another img.\n
Note: Super dumb algorithm that just sweeps through the image trying to find the perfect match.\n
Should not be used for anything sensitive.
Input: the image that The point should be inside it, The image of how the point should look like,\n
A value used to filter false positives.
Output: The x,y,w,h of where the Img_Point is insdie img. The function always returns something.
@img the image that the algorthim should find Img_Point inside
@Img_Point the image that should be found inside img
@DesValue a value used to filter false positives
'''
cv2.imshow("test", img)
cv2.imshow("imgpoint", Img_Point)
# a variable to store the matched cordienates instances of the rect of where
# the Matched image was found. [x,y,w,h]
MatchingRectCordArray = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
# clearing the array for inputs
MatchingRectCordArray = np.delete(MatchingRectCordArray, np.s_[:], axis=0)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
Img_Point_gray = cv2.cvtColor(Img_Point, cv2.COLOR_BGR2GRAY)
# Initiate ORB detector
orb = cv2.ORB_create()
# find the keypoints and descriptors with ORB
kp1, des1 = orb.detectAndCompute(img_gray, None)
kp2, des2 = orb.detectAndCompute(Img_Point_gray, None)
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match descriptors.
matches = bf.match(des1, des2)
# Sort them in the order of their distance.
matches = sorted(matches, key=lambda x: x.distance)
# Draw first 10 matches.
img3 = cv2.drawMatches(img_gray, kp1, Img_Point_gray, kp2,
matches[:10], None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
# Display the original image with the rectangle around the match.
cv2.imshow('outputImageMatching', img3)
print(MatchingRectCordArray)
cv2.waitKey(0)
def create_blank(width, height, bgr_color=(0, 0, 0)):
"""Create new image(numpy array) filled with certain color in RGB"""
# Create black blank image
image = np.zeros((height, width, 3), np.uint8)
# Since OpenCV uses BGR, convert the color first
#color = tuple(reversed(rgb_color))
# Fill image with color
image[:] = bgr_color
return image