Skip to content

Commit 5dd0b6d

Browse files
committed
lower case peaks
1 parent c6fca3e commit 5dd0b6d

File tree

1 file changed

+96
-96
lines changed

1 file changed

+96
-96
lines changed

MTM/__init__.py

Lines changed: 96 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -15,28 +15,28 @@ def _findLocalMax_(corrMap, score_threshold=0.6):
1515
"""Get coordinates of the local maximas with values above a threshold in the image of the correlation map."""
1616
# IF depending on the shape of the correlation map
1717
if corrMap.shape == (1,1): ## Template size = Image size -> Correlation map is a single digit')
18-
18+
1919
if corrMap[0,0]>=score_threshold:
20-
Peaks = np.array([[0,0]])
20+
peaks = np.array([[0,0]])
2121
else:
22-
Peaks = []
22+
peaks = []
2323

2424
# use scipy findpeaks for the 1D cases (would allow to specify the relative threshold for the score directly here rather than in the NMS
2525
elif corrMap.shape[0] == 1: ## Template is as high as the image, the correlation map is a 1D-array
26-
Peaks = find_peaks(corrMap[0], height=score_threshold) # corrMap[0] to have a proper 1D-array
27-
Peaks = [[0,i] for i in Peaks[0]] # 0,i since one coordinate is fixed (the one for which Template = Image)
28-
26+
peaks = find_peaks(corrMap[0], height=score_threshold) # corrMap[0] to have a proper 1D-array
27+
peaks = [[0,i] for i in peaks[0]] # 0,i since one coordinate is fixed (the one for which Template = Image)
28+
2929

3030
elif corrMap.shape[1] == 1: ## Template is as wide as the image, the correlation map is a 1D-array
31-
#Peaks = argrelmax(corrMap, mode="wrap")
32-
Peaks = find_peaks(corrMap[:,0], height=score_threshold)
33-
Peaks = [[i,0] for i in Peaks[0]]
31+
#peaks = argrelmax(corrMap, mode="wrap")
32+
peaks = find_peaks(corrMap[:,0], height=score_threshold)
33+
peaks = [[i,0] for i in peaks[0]]
3434

3535

3636
else: # Correlatin map is 2D
37-
Peaks = peak_local_max(corrMap, threshold_abs=score_threshold, exclude_border=False).tolist()
37+
peaks = peak_local_max(corrMap, threshold_abs=score_threshold, exclude_border=False).tolist()
3838

39-
return Peaks
39+
return peaks
4040

4141

4242

@@ -50,132 +50,132 @@ def computeScoreMap(template, image, method=cv2.TM_CCOEFF_NORMED, mask=None):
5050
Compute score map provided numpy array for template and image (automatically converts images if necessary).
5151
A mask can be provided to limit the comparison of the pixel values to a fraction of the template region.
5252
The mask should have the same dimensions and image type than the template.
53-
54-
Return
53+
54+
Return
5555
------
5656
score map as numpy array
5757
"""
58-
if template.dtype == "float64" or image.dtype == "float64":
58+
if template.dtype == "float64" or image.dtype == "float64":
5959
raise ValueError("64-bit images not supported, max 32-bit")
60-
60+
6161
# Convert images if not both 8-bit (OpenCV matchTemplate is only defined for 8-bit OR 32-bit)
6262
if not (template.dtype == "uint8" and image.dtype == "uint8"):
6363
template = np.float32(template)
6464
image = np.float32(image)
6565
if mask is not None: mask = np.float32(mask)
66-
67-
if mask is not None:
68-
66+
67+
if mask is not None:
68+
6969
if method not in (0,3):
7070
mask = None
7171
warnings.warn("Template matching method not compatible with use of mask (only 0/TM_SQDIFF or 3/TM_CCORR_NORMED).\n-> Ignoring mask.")
72-
72+
7373
else: # correct method
7474
# Check that mask has the same dimensions and type than template
7575
sameDimension = mask.shape == template.shape
7676
sameType = mask.dtype == template.dtype
77-
if not (sameDimension and sameType):
78-
mask = None
77+
if not (sameDimension and sameType):
78+
mask = None
7979
warnings.warn("Mask does not have the same dimension or bit depth than the template.\n-> Ignoring mask.")
8080

81-
81+
8282
# Compute correlation map
8383
return cv2.matchTemplate(image, template, method, mask=mask)
8484

8585

8686
def findMatches(listTemplates, image, method=cv2.TM_CCOEFF_NORMED, N_object=float("inf"), score_threshold=0.5, searchBox=None):
8787
"""
8888
Find all possible templates locations provided a list of templates to search and an image.
89-
89+
9090
Parameters
9191
----------
9292
- listTemplates : list of tuples (LabelString, template, mask (optional))
9393
templates to search in each image, associated to a label
9494
labelstring : string
9595
template : numpy array (grayscale or RGB)
96-
mask (optional): numpy array, should have the same dimensions and type than the template
97-
96+
mask (optional): numpy array, should have the same dimensions and type than the template
97+
9898
- image : Grayscale or RGB numpy array
9999
image in which to perform the search, it should be the same bitDepth and number of channels than the templates
100-
101-
- method : int
100+
101+
- method : int
102102
one of OpenCV template matching method (0 to 5), default 5=0-mean cross-correlation
103-
103+
104104
- N_object: int or float("inf")
105105
expected number of objects in the image, default to infinity if unknown
106-
106+
107107
- score_threshold: float in range [0,1]
108108
if N_object>1, returns local minima/maxima respectively below/above the score_threshold
109-
109+
110110
- searchBox : tuple (X, Y, Width, Height) in pixel unit
111111
optional rectangular search region as a tuple
112-
112+
113113
Returns
114114
-------
115-
- Pandas DataFrame with 1 row per hit and column "TemplateName"(string), "BBox":(X, Y, Width, Height), "Score":float
115+
- Pandas DataFrame with 1 row per hit and column "TemplateName"(string), "BBox":(X, Y, Width, Height), "Score":float
116116
"""
117117
if N_object!=float("inf") and type(N_object)!=int:
118118
raise TypeError("N_object must be an integer")
119-
119+
120120
## Crop image to search region if provided
121-
if searchBox != None:
121+
if searchBox != None:
122122
xOffset, yOffset, searchWidth, searchHeight = searchBox
123123
image = image[yOffset:yOffset+searchHeight, xOffset:xOffset+searchWidth]
124124
else:
125125
xOffset=yOffset=0
126-
126+
127127
listHit = []
128128
for tempTuple in listTemplates:
129-
129+
130130
if not isinstance(tempTuple, tuple) or len(tempTuple)==1:
131131
raise ValueError("listTemplates should be a list of tuples as ('name','array') or ('name', 'array', 'mask')")
132-
132+
133133
templateName, template = tempTuple[:2]
134134
mask = None
135-
135+
136136
if len(tempTuple)>=3:
137-
if method in (0,3):
137+
if method in (0,3):
138138
mask = tempTuple[2]
139139
else:
140140
warnings.warn("Template matching method not supporting the use of Mask. Use 0/TM_SQDIFF or 3/TM_CCORR_NORMED.")
141-
141+
142142
#print('\nSearch with template : ',templateName)
143143
corrMap = computeScoreMap(template, image, method, mask=mask)
144144

145-
## Find possible location of the object
145+
## Find possible location of the object
146146
if N_object==1: # Detect global Min/Max
147147
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(corrMap)
148-
148+
149149
if method in (0,1):
150-
Peaks = [minLoc[::-1]] # opposite sorting than in the multiple detection
151-
150+
peaks = [minLoc[::-1]] # opposite sorting than in the multiple detection
151+
152152
else:
153-
Peaks = [maxLoc[::-1]]
154-
155-
153+
peaks = [maxLoc[::-1]]
154+
155+
156156
else:# Detect local max or min
157157
if method in (0,1): # Difference => look for local minima
158-
Peaks = _findLocalMin_(corrMap, score_threshold)
159-
158+
peaks = _findLocalMin_(corrMap, score_threshold)
159+
160160
else:
161-
Peaks = _findLocalMax_(corrMap, score_threshold)
162-
163-
164-
#print('Initially found',len(Peaks),'hit with this template')
165-
166-
161+
peaks = _findLocalMax_(corrMap, score_threshold)
162+
163+
164+
#print('Initially found',len(peaks),'hit with this template')
165+
166+
167167
# Once every peak was detected for this given template
168168
## Create a dictionnary for each hit with {'TemplateName':, 'BBox': (x,y,Width, Height), 'Score':coeff}
169-
169+
170170
height, width = template.shape[0:2] # slicing make sure it works for RGB too
171-
172-
for peak in Peaks :
171+
172+
for peak in peaks :
173173
coeff = corrMap[tuple(peak)]
174174
newHit = {'TemplateName':templateName, 'BBox': ( int(peak[1])+xOffset, int(peak[0])+yOffset, width, height ) , 'Score':coeff}
175175

176176
# append to list of potential hit before Non maxima suppression
177177
listHit.append(newHit)
178-
178+
179179
if listHit:
180180
return pd.DataFrame(listHit) # All possible hits before Non-Maxima Supression
181181
else:
@@ -185,115 +185,115 @@ def findMatches(listTemplates, image, method=cv2.TM_CCOEFF_NORMED, N_object=floa
185185
def matchTemplates(listTemplates, image, method=cv2.TM_CCOEFF_NORMED, N_object=float("inf"), score_threshold=0.5, maxOverlap=0.25, searchBox=None):
186186
"""
187187
Search each template in the image, and return the best N_object locations which offer the best score and which do not overlap above the maxOverlap threshold.
188-
188+
189189
Parameters
190190
----------
191191
- listTemplates : list of tuples as (LabelString, template, mask (optional))
192-
templates to search in each image, associated to a label
192+
templates to search in each image, associated to a label
193193
labelstring : string
194194
template : numpy array (grayscale or RGB)
195-
mask (optional): numpy array, should have the same dimensions and type than the template
196-
195+
mask (optional): numpy array, should have the same dimensions and type than the template
196+
197197
- image : Grayscale or RGB numpy array
198198
image in which to perform the search, it should be the same bitDepth and number of channels than the templates
199-
200-
- method : int
199+
200+
- method : int
201201
one of OpenCV template matching method (1 to 5), default 5=0-mean cross-correlation
202-
method 0 is not supported (no NMS implemented for non-bound difference score), use 1 instead
203-
202+
method 0 is not supported (no NMS implemented for non-bound difference score), use 1 instead
203+
204204
- N_object: int or foat("inf")
205205
expected number of objects in the image, default to infinity if unknown
206-
206+
207207
- score_threshold: float in range [0,1]
208208
if N>1, returns local minima/maxima respectively below/above the score_threshold
209-
209+
210210
- maxOverlap: float in range [0,1]
211211
This is the maximal value for the ratio of the Intersection Over Union (IoU) area between a pair of bounding boxes.
212212
If the ratio is over the maxOverlap, the lower score bounding box is discarded.
213-
213+
214214
- searchBox : tuple (X, Y, Width, Height) in pixel unit
215215
optional rectangular search region as a tuple
216-
216+
217217
Returns
218218
-------
219-
Pandas DataFrame with 1 row per hit and column "TemplateName"(string), "BBox":(X, Y, Width, Height), "Score":float
219+
Pandas DataFrame with 1 row per hit and column "TemplateName"(string), "BBox":(X, Y, Width, Height), "Score":float
220220
if N=1, return the best matches independently of the score_threshold
221221
if N<inf, returns up to N best matches that passed the NMS
222222
if N=inf, returns all matches that passed the NMS
223223
"""
224224
if maxOverlap<0 or maxOverlap>1:
225225
raise ValueError("Maximal overlap between bounding box is in range [0-1]")
226-
226+
227227
tableHit = findMatches(listTemplates, image, method, N_object, score_threshold, searchBox)
228-
228+
229229
if method == 0: raise ValueError("The method TM_SQDIFF is not supported. Use TM_SQDIFF_NORMED instead.")
230230
sortAscending = True if method==1 else False
231-
231+
232232
return NMS(tableHit, score_threshold, sortAscending, N_object, maxOverlap)
233-
233+
234234

235235
def drawBoxesOnRGB(image, tableHit, boxThickness=2, boxColor=(255, 255, 00), showLabel=False, labelColor=(255, 255, 0), labelScale=0.5 ):
236236
"""
237237
Return a copy of the image with predicted template locations as bounding boxes overlaid on the image
238238
The name of the template can also be displayed on top of the bounding box with showLabel=True
239-
239+
240240
Parameters
241241
----------
242242
- image : image in which the search was performed
243-
243+
244244
- tableHit: list of hit as returned by matchTemplates or findMatches
245-
245+
246246
- boxThickness: int
247247
thickness of bounding box contour in pixels
248248
- boxColor: (int, int, int)
249249
RGB color for the bounding box
250-
250+
251251
- showLabel: Boolean
252252
Display label of the bounding box (field TemplateName)
253-
253+
254254
- labelColor: (int, int, int)
255255
RGB color for the label
256-
256+
257257
Returns
258258
-------
259259
outImage: RGB image
260-
original image with predicted template locations depicted as bounding boxes
260+
original image with predicted template locations depicted as bounding boxes
261261
"""
262262
# Convert Grayscale to RGB to be able to see the color bboxes
263263
if image.ndim == 2: outImage = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) # convert to RGB to be able to show detections as color box on grayscale image
264264
else: outImage = image.copy()
265-
265+
266266
for index, row in tableHit.iterrows():
267267
x,y,w,h = row['BBox']
268268
cv2.rectangle(outImage, (x, y), (x+w, y+h), color=boxColor, thickness=boxThickness)
269-
if showLabel: cv2.putText(outImage, text=row['TemplateName'], org=(x, y), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=labelScale, color=labelColor, lineType=cv2.LINE_AA)
270-
269+
if showLabel: cv2.putText(outImage, text=row['TemplateName'], org=(x, y), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=labelScale, color=labelColor, lineType=cv2.LINE_AA)
270+
271271
return outImage
272272

273273

274274
def drawBoxesOnGray(image, tableHit, boxThickness=2, boxColor=255, showLabel=False, labelColor=255, labelScale=0.5):
275275
"""
276276
Same as drawBoxesOnRGB but with Graylevel.
277277
If a RGB image is provided, the output image will be a grayscale image
278-
278+
279279
Parameters
280280
----------
281281
- image : image in which the search was performed
282-
282+
283283
- tableHit: list of hit as returned by matchTemplates or findMatches
284-
284+
285285
- boxThickness: int
286286
thickness of bounding box contour in pixels
287-
287+
288288
- boxColor: int
289289
Gray level for the bounding box
290-
290+
291291
- showLabel: Boolean
292292
Display label of the bounding box (field TemplateName)
293-
293+
294294
- labelColor: int
295295
Gray level for the label
296-
296+
297297
Returns
298298
-------
299299
outImage: Single channel grayscale image
@@ -302,10 +302,10 @@ def drawBoxesOnGray(image, tableHit, boxThickness=2, boxColor=255, showLabel=Fal
302302
# Convert RGB to grayscale
303303
if image.ndim == 3: outImage = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) # convert to RGB to be able to show detections as color box on grayscale image
304304
else: outImage = image.copy()
305-
305+
306306
for index, row in tableHit.iterrows():
307307
x,y,w,h = row['BBox']
308308
cv2.rectangle(outImage, (x, y), (x+w, y+h), color=boxColor, thickness=boxThickness)
309-
if showLabel: cv2.putText(outImage, text=row['TemplateName'], org=(x, y), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=labelScale, color=labelColor, lineType=cv2.LINE_AA)
310-
309+
if showLabel: cv2.putText(outImage, text=row['TemplateName'], org=(x, y), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=labelScale, color=labelColor, lineType=cv2.LINE_AA)
310+
311311
return outImage

0 commit comments

Comments
 (0)