Commit 1da28f10 authored by Robert Ohmacht's avatar Robert Ohmacht

Merge remote-tracking branch 'origin/SegmentationRefactoring' into Tiling2Develop

parents 5a37c671 d9b6effc
......@@ -176,12 +176,14 @@ class ImageView(QtWidgets.QLabel):
else:
self.drag = "add"
p0 = event.pos()
if self.drag=="add":
self.seedpoints.append([p0.x(),p0.y(),self.seedradius])
elif self.drag=="delete":
self.seeddeletepoints.append([p0.x(),p0.y(),self.seedradius])
else:
self.removeSeeds([p0.x(),p0.y()])
self.appendSeedPoints(p0)
# print(p0)
# if self.drag =="add":
# self.seedpoints.append([p0.x(), p0.y(), self.seedradius])
# elif self.drag =="delete":
# self.seeddeletepoints.append([p0.x(), p0.y(), self.seedradius])
# elif self.drag == "remove":
# self.removeSeeds([p0.x(), p0.y()])
self.update()
super().mousePressEvent(event)
......@@ -189,12 +191,13 @@ class ImageView(QtWidgets.QLabel):
def mouseMoveEvent(self, event):
if self.drag:
p0 = event.pos()
if self.drag=="add":
self.seedpoints.append([p0.x(),p0.y(),self.seedradius])
elif self.drag=="delete":
self.seeddeletepoints.append([p0.x(),p0.y(),self.seedradius])
else:
self.removeSeeds([p0.x(),p0.y()])
self.appendSeedPoints(p0)
# if self.drag == "add":
# self.seedpoints.append([p0.x(),p0.y(),self.seedradius])
# elif self.drag == "delete":
# self.seeddeletepoints.append([p0.x(),p0.y(),self.seedradius])
# else:
# self.removeSeeds([p0.x(),p0.y()])
self.update()
super().mouseMoveEvent(event)
......@@ -203,6 +206,16 @@ class ImageView(QtWidgets.QLabel):
self.seedChanged.emit()
self.drag = False
super().mouseReleaseEvent(event)
def appendSeedPoints(self, pos):
if 0 <= pos.x() < Nscreen and 0 <= pos.y() < Nscreen:
print(pos)
if self.drag == "add":
self.seedpoints.append([pos.x(), pos.y(), self.seedradius])
elif self.drag == "delete":
self.seeddeletepoints.append([pos.x(), pos.y(), self.seedradius])
elif self.drag == "remove":
self.removeSeeds([pos.x(), pos.y()])
def clearData(self):
self.contours = []
......@@ -301,6 +314,7 @@ class ParticleDetectionView(QtWidgets.QWidget):
self.showseedpoints.stateChanged.connect(self.imglabel.changeSeedDisplay)
self.showseedpoints.setChecked(True)
self.setImageCenter()
group = QtWidgets.QGroupBox("Detection settings", self)
grid = QtWidgets.QGridLayout()
self.parameters = []
......@@ -316,6 +330,7 @@ class ParticleDetectionView(QtWidgets.QWidget):
colstretch = 2
if p.dtype == np.bool:
paramui = QtWidgets.QCheckBox(p.helptext, self)
paramui.stateChanged.connect(self.autoUpdateIfDesired)
paramui.setChecked(p.value)
valuefunc = makeValueLambda(paramui.isChecked)
colstretch = 2
......@@ -335,6 +350,7 @@ class ParticleDetectionView(QtWidgets.QWidget):
paramui.setSingleStep(p.stepsize)
paramui.setValue(p.value)
paramui.setMinimumWidth(70)
paramui.valueChanged.connect(self.autoUpdateIfDesired)
valuefunc = makeValueLambda(paramui.value)
elif p.dtype is None:
label = QtWidgets.QLabel(p.helptext, self)
......@@ -358,10 +374,7 @@ class ParticleDetectionView(QtWidgets.QWidget):
if paramui is not None:
self.parameters[-1][3] = pshow
#link checkboxes to other parameters:
def makeEnableLambda(checkbox, parameter):
return lambda: parameter.setEnabled(checkbox.isChecked())
......@@ -401,6 +414,12 @@ class ParticleDetectionView(QtWidgets.QWidget):
self.slider.setOrientation(QtCore.Qt.Horizontal)
self.slider.sliderMoved.connect(self.imglabel.resetAlpha)
vbox.addWidget(self.slider)
self.autoUpdateCheckBox = QtWidgets.QCheckBox('Auto-update detection')
self.autoUpdateCheckBox.setMaximumWidth(200)
self.autoUpdateCheckBox.setChecked(True)
vbox.addWidget(self.autoUpdateCheckBox)
hbox2 = QtWidgets.QHBoxLayout()
self.pdetectsub = QtWidgets.QPushButton("Detect", self)
self.pdetectall = QtWidgets.QPushButton("Detect all", self)
......@@ -490,11 +509,11 @@ class ParticleDetectionView(QtWidgets.QWidget):
arr1 = self.dataset.seedpoints
# what seeds are actually in image view?
for point in arr1: #Josef says: I replaced the commented logic with the one right here below, as the old one somehow did not work.... The for-loop might become slow at some point??
for point in arr1:
if point[0] > (m1-point[2]) and point[0] <= (m2+point[2]) and point[1] > (n1-point[2]) and point[1] <= (n2+point[2]):
seedpoints.append([point[0] - p0[0][0], point[1] - p0[0][1], point[2]])
arr2 = self.dataset.seeddeletepoints
for point in arr2: #Josef says: I replaced the commented logic with the one right here below, as the old one somehow did not work.... The for-loop might become slow at some point??
for point in arr2:
if point[0] > (m1-point[2]) and point[0] <= (m2+point[2]) and point[1] > (n1-point[2]) and point[1] <= (n2+point[2]):
seeddeletepoints.append([point[0] - p0[0][0], point[1] - p0[0][1], point[2]])
......@@ -524,8 +543,15 @@ class ParticleDetectionView(QtWidgets.QWidget):
if self.drag:
self.lastcenter = self.lastmove
self.drag = False
self.detectShow(None)
self.autoUpdateIfDesired()
def autoUpdateIfDesired(self):
try:
if self.autoUpdateCheckBox.isChecked():
self.detectShow(None)
except: #the method is already called when automatically setting default parameters, which causes various errors because initialization is not yet finished...
pass
def setDataSet(self, ds):
self.dataset = ds
self.updateImageSeeds()
......
......@@ -26,6 +26,7 @@ from scipy.interpolate import InterpolatedUnivariateSpline
from scipy import ndimage as ndi
from skimage.feature import peak_local_max
from skimage.morphology import watershed
import skfuzzy as fuzz
import random
def closeHolesOfSubImage(subimg):
......@@ -39,6 +40,11 @@ def closeHolesOfSubImage(subimg):
return im_out[1:-1, 1:-1]
class Parameter(object):
"""
A Parameter for driving the image segmentation. All Parameters are initialized in the Segmentation Class.
The DetectionView-Widget reads these parameters and creates and connects the necessary items in the ui.
:return:
"""
def __init__(self, name, dtype, value=None, minval=None, maxval=None,
decimals=0, stepsize=1, helptext=None, show=False, linkedParameter=None):
self.name = name
......@@ -77,6 +83,7 @@ class Segmentation(object):
'maxparticlearea': 100000,
'minparticledistance': 20,
'closeBackground': False,
'fuzzycluster': False,
'measurefrac': 1,
'compactness': 0.0,
'seedRad': 3}
......@@ -99,7 +106,7 @@ class Segmentation(object):
Parameter("activateLowThresh", np.bool, self.detectParams['activateLowThresh'], helptext="activate lower threshold", show=False, linkedParameter='lowThresh'),
Parameter("lowThresh", float, self.detectParams['lowThresh'], .01, .9, 2, .02, helptext="Lower threshold", show=True),
Parameter("activateUpThresh", np.bool, self.detectParams['activateUpThresh'], helptext="activate upper threshold", show=False, linkedParameter='upThresh'),
Parameter("upThresh", float, self.detectParams['upThresh'], .01, 1.0, 2, .02, helptext="Upper threshold", show=True),
Parameter("upThresh", float, self.detectParams['upThresh'], .01, 1.0, 2, .02, helptext="Upper threshold", show=False),
Parameter("maxholebrightness", float, self.detectParams['maxholebrightness'], 0, 1, 2, 0.02, helptext="Close holes brighter than..", show = True),
Parameter("minparticlearea", int, self.detectParams['minparticlearea'], 1, 1000, 0, 50, helptext="Min. particle pixel area", show=False),
Parameter("enableMaxArea", np.bool, self.detectParams['enableMaxArea'], helptext="enable filtering for maximal pixel area", show=False, linkedParameter='maxparticlearea'),
......@@ -107,13 +114,14 @@ class Segmentation(object):
Parameter("minparticledistance", int, self.detectParams['minparticledistance'], 5, 1000, 0, 5, helptext="Min. distance between particles", show=False),
Parameter("measurefrac", float, self.detectParams['measurefrac'], 0, 1, 2, stepsize = 0.05, helptext="measure fraction of particles", show=False),
Parameter("closeBackground", np.bool, self.detectParams['closeBackground'], helptext="close holes in sure background", show=False),
Parameter("fuzzycluster", np.bool, self.detectParams['fuzzycluster'], helptext='Enable Fuzzy Clustering', show=False),
Parameter("sure_fg", None, helptext="Show sure foreground", show=True),
Parameter("compactness", float, self.detectParams['compactness'], 0, 1, 2, 0.05, helptext="watershed compactness", show=False),
Parameter("watershed", None, helptext="Show watershed markers", show=True),
]
# make each parameter accessible via self.name
# the variables are defined as properties and because of how the local context
# in for loops works the actural setter and getter functions are defined inside
# in for loops works the actual setter and getter functions are defined inside
# a separate contex in a local function
def makeGetter(p):
return lambda : p.value
......@@ -126,164 +134,13 @@ class Segmentation(object):
self.__dict__[p.name] = property(makeGetter(p), makeSetter(p))
self.parlist = parlist
def setParameters(self, **kwargs):
for key in kwargs:
self.__dict__[key] = kwargs[key]
def convert2Gray(self, img):
gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
return gray
def calculateHist(self, gray):
hist = cv2.calcHist([gray],[0],None,[256],[0,256])
return hist
def calculateHistFunction(self, points):
t = np.linspace(0,1,800)
x0 = np.concatenate(([-1.],points[:,0],[256.]))
y0 = np.concatenate(([0.],points[:,1],[255.]))
t0 = np.concatenate(([0.],np.cumsum(np.sqrt(np.diff(x0)**2+np.diff(y0)**2))))
t0 /= t0[-1]
fx = InterpolatedUnivariateSpline(t0, x0, k=3)
fy = InterpolatedUnivariateSpline(t0, y0, k=3)
x = fx(t)
y = fy(t)
arr = np.zeros(256, dtype=np.uint8)
xi = np.arange(256)
ind = np.searchsorted(xi, x)
arr[ind[ind<256]] = y[ind<256]
arr[xi>points[:,0].max()] = 255
arr[xi<points[:,0].min()] = 0.
arr[arr>255] = 255.
arr[arr<0] = 0.
return xi, arr
def closeHoles(self, thresh):
n, labels, stats, centroids = cv2.connectedComponentsWithStats(thresh, 8, cv2.CV_32S)
newthresh = np.zeros_like(thresh)
for label in range(1, n):
up = stats[label, cv2.CC_STAT_TOP]
left = stats[label, cv2.CC_STAT_LEFT]
width = stats[label, cv2.CC_STAT_WIDTH]
height = stats[label, cv2.CC_STAT_HEIGHT]
subimg = np.uint8(255 * (labels[up:(up+height), left:(left+width)] == label))
newthresh[up:(up+height), left:(left+width)] += closeHolesOfSubImage(subimg)
return newthresh
def closeBrightHoles(self, thresh, grayimage, maxbrightness):
n, labels, stats, centroids = cv2.connectedComponentsWithStats(thresh, 8, cv2.CV_32S)
maxbrightness = np.uint8(maxbrightness * 255)
print('num comps in brightHoles:', n)
for label in range(1, n):
up = stats[label, cv2.CC_STAT_TOP]
left = stats[label, cv2.CC_STAT_LEFT]
width = stats[label, cv2.CC_STAT_WIDTH]
height = stats[label, cv2.CC_STAT_HEIGHT]
subimg = np.uint8(255 * (labels[up:(up+height), left:(left+width)] == label))
# Add padding to TrehsholdImage
subimg = cv2.copyMakeBorder(subimg, 1, 1, 1, 1, 0)
# Copy the thresholded image.
im_floodfill = subimg.copy()
# Mask used to flood filling.
# Notice the size needs to be 2 pixels than the image.
h, w = subimg.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
# Floodfill from point (0, 0)
cv2.floodFill(im_floodfill, mask, (0,0), 255);
indices = np.where(im_floodfill == 0)[0]
if len(indices) > 0:
if np.mean(grayimage[indices[0]]) > maxbrightness:
# close hole and add closed image to thresh:
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
# Combine the two images to get the foreground.
im_out = subimg | im_floodfill_inv
thresh[up:(up+height), left:(left+width)] += im_out[1:-1, 1:-1]
return thresh
def getEdgeBorders(self, image):
edges = abs(cv2.Laplacian(image, cv2.CV_64F))
edges = cv2.blur(edges, (5, 5))
edges = edges**0.6
edges = edges/edges.max()
return edges
def filterThresholdByAreas(self, thresh, minarea, maxarea):
newthresh = np.zeros_like(thresh)
n, labels, stats, centroids = cv2.connectedComponentsWithStats(thresh, 8, cv2.CV_32S)
print('num comps:', n)
for label in range(1, n):
area = stats[label, cv2.CC_STAT_AREA]
if minarea < area < maxarea:
up = stats[label, cv2.CC_STAT_TOP]
left = stats[label, cv2.CC_STAT_LEFT]
width = stats[label, cv2.CC_STAT_WIDTH]
height = stats[label, cv2.CC_STAT_HEIGHT]
subthresh = np.uint8(255 * (labels[up:(up+height), left:(left+width)] == label))
newthresh[up:(up+height), left:(left+width)] += subthresh
return newthresh
def getSureForeground(self, thresh, mindistance):
sure_fg = np.zeros_like(thresh)
n, labels, stats, centroids = cv2.connectedComponentsWithStats(thresh, 8, cv2.CV_32S)
for label in range(1, n):
up = stats[label, cv2.CC_STAT_TOP]
left = stats[label, cv2.CC_STAT_LEFT]
width = stats[label, cv2.CC_STAT_WIDTH]
height = stats[label, cv2.CC_STAT_HEIGHT]
subimg = np.uint8(255 * (labels[up:(up+height), left:(left+width)] == label))
subdist = cv2.distanceTransform(subimg, cv2.DIST_L2,3)
subfg = np.uint8(peak_local_max(subdist, mindistance, indices = False))
# if subfg.max() > 0 and random() < self.measurefrac: #i.e., at least one maximum value was added
# sure_fg[up:(up+height), left:(left+width)] += subfg
#
# elif random() < self.measurefrac:
# #simply get maximum of subdist
# submax = np.where(subdist == subdist.max())
# sure_fg[up+submax[0][0], left+submax[1][0]] = 1
if subfg.max() > 0: #i.e., at least one maximum value was added
sure_fg[up:(up+height), left:(left+width)] += subfg
else:
#simply get maximum of subdist
submax = np.where(subdist == subdist.max())
sure_fg[up+submax[0][0], left+submax[1][0]] = 1
sure_fg = cv2.dilate(sure_fg, np.ones((3, 3)))
return sure_fg
def getMeasurementPoints(self, binParticle, numPoints=1):
binParticle = cv2.copyMakeBorder(binParticle, 1, 1, 1, 1, 0)
dist = cv2.distanceTransform(np.uint8(binParticle), cv2.DIST_L2,3)
ind = np.argmax(dist)
y = [ind//dist.shape[1]-1]
x = [ind%dist.shape[1]-1]
for i in range(numPoints-1):
binParticle.flat[ind] = 0
dist = cv2.distanceTransform(np.uint8(binParticle), cv2.DIST_L2,3)
ind = np.argmax(dist)
y.append(ind//dist.shape[1]-1)
x.append(ind%dist.shape[1]-1)
return y, x
def apply2Image(self, img, seedpoints, deletepoints, seedradius, dataset, return_step=None):
"""
Takes an image with seedpoints and seeddeletepoints and runs segmentation on it.
:return:
"""
t0 = time()
# convert to gray image and do histrogram normalization
gray = self.convert2Gray(img)
print("gray")
......@@ -356,6 +213,12 @@ class Segmentation(object):
thresh = self.closeBrightHoles(thresh, blur, self.maxholebrightness)
print("closed holes")
# modify thresh with seedpoints and deletepoints
for p in np.int32(seedpoints):
cv2.circle(thresh, tuple([p[0], p[1]]), int(p[2]), 255, -1)
for p in np.int32(deletepoints):
cv2.circle(thresh, tuple([p[0], p[1]]), int(p[2]), 0, -1)
if return_step=='maxholebrightness': return thresh, 0
if self.cancelcomputation:
return None, None, None
......@@ -364,110 +227,119 @@ class Segmentation(object):
maxArea = self.maxparticlearea
else:
maxArea = np.inf
thresh = self.filterThresholdByAreas(thresh, self.minparticlearea, maxArea)
print('filter threshold by areas')
if return_step == 'maxparticlearea': return thresh, 0
if self.cancelcomputation:
return None, None, None
####get sure_fg
##get sure_fg
'''the peak_local_max function takes the min distance between peaks. Unfortunately, that means that individual
particles smaller than that distance are consequently disregarded. Hence, we need a connectec_components approach'''
n, labels, stats, centroids = cv2.connectedComponentsWithStats(thresh, 8, cv2.CV_32S)
del thresh
sure_fg = self.getSureForeground(thresh, self.minparticledistance)
measurementPoints = {}
finalcontours = []
particleIndex = 0
sure_bg = cv2.dilate(thresh, np.ones((5, 5)), iterations = 1)
if self.closeBackground:
sure_bg = self.closeHoles(sure_bg)
if return_step == "sure_fg":
preview_surefg = np.zeros(img.shape[:2])
preview_surebg = np.zeros(img.shape[:2])
elif return_step is None:
previewImage = None
else:
previewImage = np.zeros(img.shape[:2])
# modify sure_fg and sure_bg with seedpoints and deletepoints
if len(deletepoints)>0:
h, w = sure_fg.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
for p in np.int32(deletepoints):
if 0 < p[0] < h and 0 < p[1] < w: #point has to be within image, otherwise the floodFill fails
cv2.floodFill(sure_fg, mask, tuple([p[0], p[1]]), 0)
for p in np.int32(seedpoints):
cv2.circle(sure_fg, tuple([p[0], p[1]]), int(p[2]), 1, -1)
cv2.circle(sure_bg, tuple([p[0], p[1]]), int(p[2]), 1, -1)
for p in np.int32(deletepoints):
cv2.circle(sure_fg, tuple([p[0], p[1]]), int(p[2]), 0, -1)
cv2.circle(sure_bg, tuple([p[0], p[1]]), int(p[2]), 0, -1)
print("sure_fg, sure_bg")
if self.cancelcomputation:
return None, None, None
for label in range(1, n):
area = stats[label, cv2.CC_STAT_AREA]
if self.minparticlearea < area < maxArea:
up = stats[label, cv2.CC_STAT_TOP]
left = stats[label, cv2.CC_STAT_LEFT]
width = stats[label, cv2.CC_STAT_WIDTH]
height = stats[label, cv2.CC_STAT_HEIGHT]
subthresh = np.uint8(255 * (labels[up:(up+height), left:(left+width)] == label))
subdist = cv2.distanceTransform(subthresh, cv2.DIST_L2,3)
sure_fg = self.getSureForeground(subthresh, subdist, self.minparticledistance)
sure_bg = cv2.dilate(subthresh, np.ones((5, 5)), iterations = 1)
if self.closeBackground:
sure_bg = self.closeHoles(sure_bg)
unknown = cv2.subtract(sure_bg, sure_fg)
ret, markers = cv2.connectedComponents(sure_fg)
markers = markers+1
markers[unknown==255] = 0
print("connectedComponents")
if self.cancelcomputation:
return None, None, None
if return_step=="sure_fg":
img = np.zeros_like(sure_fg)
img[np.nonzero(sure_fg)] |= 1 #dilation of sure_fg is included in self.getSureForeground
img[np.nonzero(sure_bg)] |= 2
return img, 1
dist_transform = cv2.distanceTransform(thresh, cv2.DIST_L2,5)
print("distanceTransform")
if self.cancelcomputation:
return None, None, None
twatershed = time()
#ich habe jetzt nur noch den Skimage Watershed integriert. Oben auskommentiert der opencv watershed, falls wir ihn doch nochmal für irgendwas brauchen...
markers = ndi.label(sure_fg)[0]
markers = watershed(-dist_transform, markers, mask=sure_bg, compactness = self.compactness, watershed_line = True) #labels = 0 for background, 1... for particles
# modify sure_fg and sure_bg with seedpoints and deletepoints
for p in np.int32(seedpoints):
cv2.circle(sure_fg, tuple([p[0]-left, p[1]-up]), int(p[2]), 1, -1)
cv2.circle(sure_bg, tuple([p[0]-left, p[1]-up]), int(p[2]), 1, -1)
for p in np.int32(deletepoints):
cv2.circle(sure_fg, tuple([p[0]-left, p[1]-up]), int(p[2]), 0, -1)
cv2.circle(sure_bg, tuple([p[0]-left, p[1]-up]), int(p[2]), 0, -1)
print("watershed, elapsed time:", time()-twatershed, "seconds")
if self.cancelcomputation:
return None, None, None
if return_step=="watershed":
return np.uint8(255*(markers!=0)), 0
tcont = time()
if cv2.__version__ > '3.5':
contours, hierarchy = cv2.findContours(markers, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
else:
temp, contours, hierarchy = cv2.findContours(markers, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
print("contour detection, elapsed time:", time()-tcont, "seconds")
if self.cancelcomputation:
return None, None, None
tstats = time()
measurementPoints = {}
tmpcontours = [contours[i] for i in range(len(contours)) if hierarchy[0,i,3]<0]
contours = []
particleIndex = 0
if self.cancelcomputation:
return None, None, None
if return_step=="sure_fg":
preview_surefg = self.addToPreviewImage(sure_fg, up, left, preview_surefg)
preview_surebg = self.addToPreviewImage(sure_bg, up, left, preview_surebg)
continue
unknown = cv2.subtract(sure_bg, sure_fg)
ret, markers = cv2.connectedComponents(sure_fg)
markers = markers+1
markers[unknown==255] = 0
for cnt in tmpcontours:
label = markers[cnt[0,0,1],cnt[0,0,0]]
if label==0:
continue
markers = ndi.label(sure_fg)[0]
markers = watershed(-subdist, markers, mask=sure_bg, compactness = self.compactness, watershed_line = True) #labels = 0 for background, 1... for particles
x0, x1 = cnt[:,0,0].min(), cnt[:,0,0].max()
y0, y1 = cnt[:,0,1].min(), cnt[:,0,1].max()
subimg = (markers[y0:y1+1,x0:x1+1]).copy()
subimg[subimg!=label] = 0
y, x = self.getMeasurementPoints(subimg)
contours.append(cnt)
measurementPoints[particleIndex] = []
for index in range(0, len(x)):
newMeasPoint = MeasurementPoint(particleIndex, x[index] + x0, y[index] + y0)
measurementPoints[particleIndex].append(newMeasPoint)
particleIndex += 1
print("contour stats, elapsed time:", time()-tstats, "seconds")
if self.cancelcomputation:
return None, None, None
if return_step is not None:
raise NotImplementedError(f"this particular return_step: {return_step} is not implemented yet")
if return_step=="watershed":
previewImage = self.addToPreviewImage(markers, up, left, previewImage)
continue
if cv2.__version__ > '3.5':
contours, hierarchy = cv2.findContours(markers, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
else:
temp, contours, hierarchy = cv2.findContours(markers, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
if self.cancelcomputation:
return None, None, None
tmpcontours = [contours[i] for i in range(len(contours)) if hierarchy[0,i,3]<0]
for cnt in tmpcontours:
if cv2.contourArea(cnt) >= self.minparticlearea:
label = markers[cnt[0,0,1],cnt[0,0,0]]
if label==0:
continue
x0, x1 = cnt[:,0,0].min(), cnt[:,0,0].max()
y0, y1 = cnt[:,0,1].min(), cnt[:,0,1].max()
subimg = (markers[y0:y1+1,x0:x1+1]).copy()
subimg[subimg!=label] = 0
y, x = self.getMeasurementPoints(subimg)
for i in range(len(cnt)):
cnt[i][0][0] += left
cnt[i][0][1] += up
finalcontours.append(cnt)
measurementPoints[particleIndex] = []
for index in range(0, len(x)):
newMeasPoint = MeasurementPoint(particleIndex, x[index] + x0 + left, y[index] + y0 + up)
measurementPoints[particleIndex].append(newMeasPoint)
particleIndex += 1
if return_step == 'sure_fg':
img = np.zeros_like(preview_surefg)
img[np.nonzero(preview_surefg)] |= 1
img[np.nonzero(preview_surebg)] |= 2
return img, 1
tf = time()
print("particle detection took:", tf-t0, "seconds")
elif return_step == 'watershed':
return np.uint8(255*(previewImage!=0)), 0
elif return_step is not None:
raise NotImplementedError(f"this particular return_step: {return_step} is not implemented yet")
print("particle detection took:", time()-t0, "seconds")
if self.measurefrac < 1.0:
nMeasurementsDesired = int(np.round(self.measurefrac * len(measurementPoints)))
......@@ -477,5 +349,210 @@ class Segmentation(object):
for index in partIndicesToMeasure:
newMeasPoints[index] = measurementPoints[index]
measurementPoints = newMeasPoints
total_time = time()-t0
print('segmentation took', total_time, 'seconds')
return measurementPoints, finalcontours
def addToPreviewImage(self, subimg, up, left, previewImage):
"""
Adds a subimage at given position to the previewimage
:return: