diff --git a/analysis/particleContainer.py b/analysis/particleContainer.py
index ffa674743a5512ba848d2f24039e2700a9f76b16..f465a8aea5fe8eb631fbf4b48976568298d210c6 100644
--- a/analysis/particleContainer.py
+++ b/analysis/particleContainer.py
@@ -42,7 +42,13 @@ class ParticleContainer(object):
self.measurements.append(Measurement())
indexOfNewMeas = len(self.measurements)-1
return indexOfNewMeas
-
+
+ def clearParticles(self):
+ self.particles = []
+
+ def clearMeasurements(self):
+ self.measurements = []
+
def setMeasurementScanIndex(self, indexOfMeasurment, scanIndex):
self.measurements[indexOfMeasurment].ramanScanIndex = scanIndex
diff --git a/dataset.py b/dataset.py
index 265ea042d85c157e6bf74589a66a52491be8ca26..1c8d502a44ea1dcd1e9680419b06ff1bb4ac1255 100644
--- a/dataset.py
+++ b/dataset.py
@@ -149,6 +149,7 @@ class DataSet(object):
# self.particlestats = []
# self.ramanscansortindex = None
self.particleContainer = ParticleContainer(self)
+ self.particleDetectionDone = False
self.ramanscandone = False
# self.results = {'polymers': None,
@@ -246,40 +247,30 @@ class DataSet(object):
self.imagedim_bf = self.imagedim
self.imagedim_df = self.imagedim
del self.imagedim
-
+
+
+ if not hasattr(self, 'particles2spectra'):
+ self.particles2spectra = [[int(np.where(self.ramanscansortindex == i)[0])] for i in range(len(self.ramanscansortindex))]
+
self.version = 2
if self.version == 2:
self.particleContainer = ParticleContainer(self)
- def recreateMeasurement2ParticleFromScanIndices():
- measurements2particles = [[int(np.where(self.ramanscansortindex == i)[0])] for i in range(len(self.ramanscansortindex))]
- return measurements2particles
-
-
+
self.particleContainer.initializeParticles(len(self.particlestats))
self.particleContainer.setParticleContours(self.particlecontours)
self.particleContainer.setParticleStats(self.particlestats)
self.particleContainer.applyPixelScaleToParticleStats(self.getPixelScale())
- if hasattr(self, 'particles2spectra'):
- if self.particles2spectra is not None:
- measurements2particles = self.particles2spectra
- else:
- measurements2particles = recreateMeasurement2ParticleFromScanIndices()
- else:
- measurements2particles = recreateMeasurement2ParticleFromScanIndices()
-
- for particleIndex, listOfScanIndices in enumerate(measurements2particles):
- curParticle = self.particleContainer.getParticleOfIndex(particleIndex)
- for scanIndex in listOfScanIndices:
-# curParticle.addEmptyMeasurement()
-# curParticle.setMeasurementPixelCoords(measIndex, x, y)
-# curParticle.setMeasurementScanIndex(measIndex, scanIndex)
- indexOfNewMeas = self.particleContainer.addEmptyMeasurement()
- x, y = self.ramanpoints[particleIndex][0], self.ramanpoints[particleIndex][1]
- self.particleContainer.setMeasurementPixelCoords(indexOfNewMeas, x, y)
- self.particleContainer.setMeasurementScanIndex(indexOfNewMeas, scanIndex)
- curParticle.addMeasurement(self.particleContainer.measurements[indexOfNewMeas])
+ if len(self.particlestats) > 0: #i.e., particle detection was completed and particle data is there
+ for particleIndex, listOfScanIndices in enumerate(self.particles2spectra):
+ curParticle = self.particleContainer.getParticleOfIndex(particleIndex)
+ for scanIndex in listOfScanIndices:
+ indexOfNewMeas = self.particleContainer.addEmptyMeasurement()
+ x, y = self.ramanpoints[particleIndex][0], self.ramanpoints[particleIndex][1]
+ self.particleContainer.setMeasurementPixelCoords(indexOfNewMeas, x, y)
+ self.particleContainer.setMeasurementScanIndex(indexOfNewMeas, scanIndex)
+ curParticle.addMeasurement(self.particleContainer.measurements[indexOfNewMeas])
for particle in self.particleContainer.particles:
for meas in particle.measurements:
@@ -292,8 +283,6 @@ class DataSet(object):
# self.version = 3
# add later conversion for higher version numbers here
-
-
def getSubImage(self, img, index, draw=True):
contour = self.particlecontours[index]
x0, x1 = contour[:,0,0].min(), contour[:,0,0].max()
@@ -395,28 +384,14 @@ class DataSet(object):
def getLegacyDetectImageName(self):
return os.path.join(self.path, "detectimage.png")
+ def getBackgroundImageName(self):
+ return os.path.join(self.path, "background.bmp")
+
def getDetectImageName(self):
raise NotImplementedError("No longer implemented due to change in API")
def getTmpImageName(self):
return os.path.join(self.path, "tmp.bmp")
-
- def saveParticleData(self):
- print('Not saving ParticleData into text file...:\nThe current output format might be wrong, if multiple spectra per particle are present...')
-# if len(self.ramanscansortindex)>0:
-# data = []
-# pixelscale = (self.pixelscale_df if self.imagescanMode == 'df' else self.pixelscale_bf)
-# for i in self.ramanscansortindex:
-# data.append(list(self.ramanpoints[i])+list(self.particlestats[i]))
-# data = np.array(data)
-# data[:,0], data[:,1], z = self.mapToLengthRaman((data[:,0], data[:,1]), microscopeMode=self.imagescanMode, noz=True)
-# data[:,2:7] *= pixelscale
-# header = "x [µm], y [µm], length [µm], height [µm], length_ellipse [µm], height_ellipse [µm]"
-# if data.shape[1]>6:
-# header = header + ", area [µm^2]"
-# data[:,6] *= pixelscale
-# np.savetxt(os.path.join(self.path, "particledata.txt"), data,
-# header=header)
def save(self):
saveData(self, self.fname)
diff --git a/detectionview.py b/detectionview.py
index cec7d72a164c6b7aae57202dbab0f567d8039f14..8c6423571a89b0072e9b18a940c78413bc3f759a 100644
--- a/detectionview.py
+++ b/detectionview.py
@@ -20,7 +20,7 @@ If not, see .
"""
import numpy as np
from PyQt5 import QtCore, QtWidgets, QtGui
-from segmentation import Segmentation
+from segmentation import Segmentation, MeasurementPoint
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
import matplotlib.pyplot as plt
from threading import Thread
@@ -248,7 +248,9 @@ class ImageView(QtWidgets.QLabel):
painter.setPen(QtCore.Qt.red)
painter.setBrush(QtCore.Qt.red)
for p in self.measpoints:
- painter.drawEllipse(p[0]-2, p[1]-2, 5, 5)
+ for point in self.measpoints[p]:
+# painter.drawEllipse(p[0]-2, p[1]-2, 5, 5)
+ painter.drawEllipse(point.x-2, point.y-2, 5, 5)
if self.showseedpoints:
painter.setPen(QtCore.Qt.white)
@@ -581,10 +583,9 @@ class ParticleDetectionView(QtWidgets.QWidget):
@QtCore.pyqtSlot()
def clearDetection(self):
if self.dataset is not None:
- self.dataset.ramanpoints = []
- self.dataset.particlecontours = []
- self.dataset.particlestats = []
- self.dataset.ramanscansortindex = []
+ self.dataset.particleContainer.clearParticles()
+ self.dataset.particleContainer.clearMeasurements()
+ self.dataset.particleDetectionDone = False
self.dataset.ramanscandone = False
self.dataset.mode = "opticalscan"
self.dataset.save()
@@ -676,7 +677,7 @@ class ParticleDetectionView(QtWidgets.QWidget):
curParticle.addMeasurement(particleContainer.measurements[indexOfNewMeas])
-
+ self.dataset.particleDetectionDone = True
# self.dataset.ramanpoints = measurementPoints #consider moving that to particleContainer
# self.dataset.particlecontours = contours
# self.dataset.particlestats = particlestats
diff --git a/gepard.py b/gepard.py
index 5e09e868e85f5baab8142ce28360fd3760ee2ac5..61146e590c3cd102b1ed2cbcfe1766812e9c2777 100644
--- a/gepard.py
+++ b/gepard.py
@@ -102,12 +102,13 @@ class GEPARDMainWindow(QtWidgets.QMainWindow):
if fileName:
isValid, msg = self.testFilename(fileName)
if isValid:
- self.fname = str(fileName) #TODO: No spaces for Renishaw Interface!!
+ self.fname = str(fileName)
self.view.new(self.fname)
self.scalingChanged(1.)
else:
QtWidgets.QMessageBox.critical(self, "Error", msg)
-
+
+ @QtCore.pyqtSlot()
def testFilename(self, fileName):
if self.view.ramanctrl.name == 'RenishawCOM': #the renishawCom does not allow Spaces within filePath
if fileName.find(' ') == 0:
@@ -116,7 +117,7 @@ class GEPARDMainWindow(QtWidgets.QMainWindow):
return True, ""
else:
return True, ""
-
+
@QtCore.pyqtSlot()
def about(self):
QtWidgets.QMessageBox.about(self, 'GEPARD',
@@ -202,7 +203,7 @@ class GEPARDMainWindow(QtWidgets.QMainWindow):
self.configRamanCtrlAct.triggered.connect(self.view.configureRamanControl)
if self.view.simulatedRaman:
self.configRamanCtrlAct.setDisabled(True)
-
+
def updateModes(self, active=None, maxenabled=None):
ose, osc, pde, pdc, rse, rsc, pae, pac = [False]*8
if maxenabled=="OpticalScan":
diff --git a/imagestitch.py b/imagestitch.py
index 89d8da5fd664cdffdd6780e78f737b592af5d0fa..bd55765acaccd7d311408cf2c551538ed49b0bc7 100644
--- a/imagestitch.py
+++ b/imagestitch.py
@@ -48,15 +48,17 @@ def imageStacking(colimgs):
return im, zval
def combineImages(path, nx, ny, nk, width, height, angle):
- imgs = []
full = None
for i in range(nx):
for j in range(ny):
- colimgs = []
- for k in range(nk):
- colimgs.append(cv2.imread(path + f'test_{i}_{j}_{k}.bmp'))
- img = imageStacking(colimgs)
- imgs.append(img)
+ if nk > 1:
+ colimgs = []
+ for k in range(nk):
+ colimgs.append(cv2.imread(path + f'test_{i}_{j}_{k}.bmp'))
+ img = imageStacking(colimgs)
+ else:
+ img = cv2.imread(path + f'test_{i}_{j}_1.bmp')
+
dx = i*.9*img.shape[1]
dy = j*.8*img.shape[0]
c, s = np.cos(np.radians(angle)), np.sin(np.radians(angle))
@@ -67,6 +69,7 @@ def combineImages(path, nx, ny, nk, width, height, angle):
full = dst
else:
full = cv2.max(full,dst)
+
cv2.imwrite("full_dunkel.png", full)
diff --git a/opticalbackground.py b/opticalbackground.py
new file mode 100644
index 0000000000000000000000000000000000000000..75d8cafbed2a96ef1199b33feacae8308916374c
--- /dev/null
+++ b/opticalbackground.py
@@ -0,0 +1,251 @@
+# -*- coding: utf-8 -*-
+"""
+GEPARD - Gepard-Enabled PARticle Detection
+Copyright (C) 2018 Lars Bittrich and Josef Brandt, Leibniz-Institut für
+Polymerforschung Dresden e. V.
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program, see COPYING.
+If not, see .
+"""
+
+from PyQt5 import QtCore, QtWidgets, QtGui
+import cv2
+import numpy as np
+import os
+
+from helperfunctions import cv2imread_fix
+
+class BackGroundManager(QtWidgets.QWidget):
+ managerClosed = QtCore.pyqtSignal()
+ readBackground = QtCore.pyqtSignal(int)
+
+ def __init__(self, parentOSwidget):
+ super(BackGroundManager, self).__init__()
+ self.setFixedSize(1500, 900)
+ self.setWindowTitle('Optical Background Manager')
+ self.parentOSwidget = parentOSwidget
+ self.parentOSwidget.backGroundSavedToPath.connect(self.updateChildImage)
+ self.ramanctrl = self.parentOSwidget.ramanctrl
+
+ layout = QtWidgets.QHBoxLayout()
+ self.setLayout(layout)
+
+ self.imagesGroup = QtWidgets.QGroupBox('Current Background Images')
+ self.imagesLayout = QtWidgets.QGridLayout()
+
+ self.imgContainers = []
+ self.avgImg = None
+ self.presetIndividualImages()
+
+ self.previewImage = QtWidgets.QGraphicsView()
+ self.setupGraphicsView(self.previewImage, scaleFactor=0.8)
+
+ previewGroup = QtWidgets.QGroupBox()
+ previewLayout = QtWidgets.QVBoxLayout()
+ self.blurspinbox = QtWidgets.QSpinBox(self)
+ self.blurspinbox.setMinimum(3)
+ self.blurspinbox.setMaximum(99)
+ self.blurspinbox.setSingleStep(2)
+ self.blurspinbox.setValue(5)
+ self.blurspinbox.valueChanged.connect(self.calculateAverageImage)
+ self.blurspinbox.setMaximumWidth(150)
+
+ self.previewCurrentViewBtn = QtWidgets.QPushButton('Acquire 3x3 area and preview subtracted result')
+ self.previewCurrentViewBtn.clicked.connect(self.previewStitchedArea)
+ self.previewArea = QtWidgets.QGraphicsView()
+ self.setupGraphicsView(self.previewArea, scaleFactor=0.5)
+
+ previewLayout.addWidget(QtWidgets.QLabel('Radius for blur'))
+ previewLayout.addWidget(self.blurspinbox)
+ previewLayout.addWidget(QtWidgets.QLabel('Preview of averaged and smoothed image'))
+ previewLayout.addWidget(self.previewImage)
+ previewLayout.addWidget(self.previewCurrentViewBtn)
+ previewLayout.addWidget(self.previewArea)
+
+ previewGroup.setLayout(previewLayout)
+
+ layout.addWidget(self.imagesGroup)
+ layout.addWidget(previewGroup)
+
+ def presetIndividualImages(self, nrows=3, ncols=2):
+ index = 0
+ for row in range(nrows):
+ for col in range(ncols):
+ self.imgContainers.append(SingleImageContainer(self, index))
+ self.imagesLayout.addWidget(self.imgContainers[-1], row, col)
+ index += 1
+
+ self.imagesGroup.setLayout(self.imagesLayout)
+
+ def previewStitchedArea(self):
+ if self.avgImg is None:
+ QtWidgets.QMessageBox.about(self, 'Warning', 'No Background Image is aquired')
+ return
+ else:
+ from opticalscan import loadAndPasteImage
+ self.dataset = self.parentOSwidget.dataset
+
+ #acquire images in 3x3 area to preview quality of background subtraction
+ x, y, z = self.ramanctrl.getPosition()
+ micMode = self.parentOSwidget.view.microscopeMode
+ width, height, angle = self.ramanctrl.getImageDimensions(micMode)
+ startPoint = [x-width, y-height]
+ endPoint = [x+width, y+height]
+ points = np.concatenate(([startPoint], [endPoint]), axis=0)
+ p0 = [points[:,0].min(), points[:,1].max()]
+ p1 = [points[:,0].max(), points[:,1].min()]
+
+ reply = QtWidgets.QMessageBox.question(self, 'Message',f"The stage will move {round(3*width)} in x and {round(3*height)} in y.\nContinue?",
+ QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)
+ if reply == QtWidgets.QMessageBox.Yes:
+ fullimg = None
+ zimg = None
+
+ for row in range(3):
+ for col in range(3):
+ curPoint = [startPoint[0] + row*width, startPoint[1] + col*height]
+ self.ramanctrl.moveToAbsolutePosition(curPoint[0], curPoint[1])
+ self.ramanctrl.saveImage(self.dataset.getTmpImageName())
+
+ fullimg, zimg = loadAndPasteImage([self.dataset.getTmpImageName()], fullimg, zimg, width, height, angle, p0, p1, curPoint, background=self.avgImg)
+ self.updateGraphicsView(self.previewArea, fullimg, convertColors=True)
+
+
+ def setupGraphicsView(self, graphView, scaleFactor=1.):
+ graphView.item = QtWidgets.QGraphicsPixmapItem()
+ scene = QtWidgets.QGraphicsScene(graphView)
+ scene.addItem(graphView.item)
+ graphView.setScene(scene)
+ graphView.scale(scaleFactor, scaleFactor)
+
+ def updateGraphicsView(self, graphView, img, convertColors=False):
+ if img is not None:
+ prevImg = img
+ else:
+ prevImg = np.zeros((300, 300))
+
+ if convertColors:
+ prevImg = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
+
+ height, width = prevImg.shape[:2]
+ bytesPerLine = 3 * width
+ pix = QtGui.QPixmap()
+ pix.convertFromImage(QtGui.QImage(prevImg, width, height, bytesPerLine, QtGui.QImage.Format_RGB888))
+ graphView.item.setPixmap(pix)
+
+ @QtCore.pyqtSlot(int, str)
+ def updateChildImage(self, index, path):
+ self.imgContainers[index].updateImage(path)
+ self.calculateAverageImage()
+
+ def calculateAverageImage(self):
+ curImgs = [i.getImage() for i in self.imgContainers if i.getImage() is not None]
+
+ if len(curImgs) > 0:
+ curImgs = np.array(curImgs)
+ self.avgImg = np.sum(curImgs, axis=0)/len(curImgs)
+ radius = self.blurspinbox.value()
+ if radius %2 == 0:
+ radius += 1
+ if radius < 0:
+ radius = 1
+ self.avgImg = cv2.GaussianBlur(self.avgImg, (radius, radius), 0)
+ self.avgImg = np.uint8(self.avgImg)
+ self.parentOSwidget.writeBackGroundImage(self.avgImg)
+ else:
+ self.avgImg = None
+ self.parentOSwidget.deleteBackGroundImage()
+
+ self.updateGraphicsView(self.previewImage, self.avgImg)
+
+ def closeEvent(self, event):
+ self.managerClosed.emit()
+ event.accept()
+
+
+class SingleImageContainer(QtWidgets.QGroupBox):
+ def __init__(self, parent, index):
+ super(SingleImageContainer, self).__init__()
+ self.index = index
+ self.parent = parent
+
+ layout = QtWidgets.QVBoxLayout()
+ layout.addWidget(QtWidgets.QLabel(f'Background {index+1}'))
+
+ readBtn = QtWidgets.QPushButton('ReadImage')
+ readBtn.clicked.connect(self.readImage)
+
+ self.image = ImagePixmap()
+
+ delBtn = QtWidgets.QPushButton('Delete Image')
+ delBtn.clicked.connect(self.clearImage)
+
+ layout.addWidget(readBtn)
+ layout.addWidget(self.image)
+ layout.addWidget(delBtn)
+
+ self.setLayout(layout)
+
+ def readImage(self):
+ self.parent.readBackground.emit(self.index)
+
+ def updateImage(self, path):
+ self.image.updateImage(path)
+
+ def clearImage(self):
+ self.image.clearImage()
+ self.parent.calculateAverageImage()
+
+ def getImage(self):
+ return self.image.imgdata
+
+
+class ImagePixmap(QtWidgets.QGraphicsView):
+ def __init__(self):
+ super(ImagePixmap, self).__init__()
+ self.item = QtWidgets.QGraphicsPixmapItem()
+ self.item.setPos(0, 0)
+ self.item.setAcceptedMouseButtons(QtCore.Qt.NoButton)
+ self.imgdata = None
+
+ scene = QtWidgets.QGraphicsScene(self)
+ scene.addItem(self.item)
+ self.setScene(scene)
+
+ self.scale(0.4, 0.4)
+ self.updateImage()
+
+ def updateImage(self, img_path=None):
+ if img_path is None:
+ self.loadImageIntoPixmap(self.createBlancImage())
+
+ elif os.path.exists(img_path):
+ self.imgdata = cv2.cvtColor(cv2imread_fix(img_path), cv2.COLOR_BGR2RGB)
+ self.loadImageIntoPixmap(self.imgdata)
+
+ def createBlancImage(self):
+ blancImg = np.zeros((300, 500, 3))
+ cv2.putText(blancImg, 'None selected', (150, 150), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
+ return blancImg
+
+ def loadImageIntoPixmap(self, img):
+ height, width = img.shape[:2]
+ bytesPerLine = 3 * width
+ pix = QtGui.QPixmap()
+ pix.convertFromImage(QtGui.QImage(img, width, height, bytesPerLine, QtGui.QImage.Format_RGB888))
+ self.item.setPixmap(pix)
+
+ def clearImage(self):
+ self.imgdata = None
+ self.updateImage()
diff --git a/opticalscan.py b/opticalscan.py
index 6f185bfe953372254882f4b7bf2704383cc78bb4..adca3dc5732315aa858cdc2f13a0a74b5aa72086 100644
--- a/opticalscan.py
+++ b/opticalscan.py
@@ -30,12 +30,13 @@ from helperfunctions import cv2imread_fix, cv2imwrite_fix
from time import time
import datetime
import sys
+from opticalbackground import BackGroundManager
def scan(path, sol, zpositions, grid, controlclass, dataqueue,
stopevent, logpath='', ishdr=False):
if ishdr:
merge_mertens = cv2.createMergeMertens()
-
+
fp = None
if logpath != '':
try:
@@ -81,18 +82,21 @@ def scan(path, sol, zpositions, grid, controlclass, dataqueue,
if fp is not None:
fp.close()
+def subtractBackground(image, background):
+ avg = np.mean(background)
+ subtracted = np.clip(np.array(image - background + avg, dtype = np.uint8), 0, 255)
+ return subtracted
def loadAndPasteImage(srcnames, fullimage, fullzval, width, height,
- rotationvalue, p0, p1, p, halfResolution = False):
+ rotationvalue, p0, p1, p, background=None):
colimgs = []
for name in srcnames:
- colimgs.append(cv2.cvtColor(cv2imread_fix(name), cv2.COLOR_BGR2RGB))
+ curImg = cv2imread_fix(name)
+ if background is not None:
+ curImg = subtractBackground(curImg, background)
+ colimgs.append(cv2.cvtColor(curImg, cv2.COLOR_BGR2RGB))
img, zval = imageStacking(colimgs)
- if halfResolution: #halve resolution, if fullimage would become too large otherwise
- img = cv2.resize(img, None, fx = 0.5, fy = 0.5, interpolation = cv2.INTER_CUBIC)
- zval= cv2.resize(zval, None, fx = 0.5, fy = 0.5, interpolation = cv2.INTER_CUBIC)
-
x, y = p
Nx, Ny = int((p1[0]-p0[0]+width)/width*img.shape[1]), int((p0[1]-p1[1]+height)/height*img.shape[0]) + 10 # + 10 because of rotation and hopefully it will be small
c, s = np.cos(np.radians(rotationvalue)), np.sin(np.radians(rotationvalue))
@@ -179,6 +183,7 @@ class PointCoordinates(QtWidgets.QGridLayout):
self.itemAtPosition(i+1,5).setVisible(False)
self.itemAtPosition(i+1,6).setVisible(False)
self.N = N
+
for i, p in pointsgiven:
wx, wy, wz = self.dswidgets[i]
x, y, z = p
@@ -216,10 +221,12 @@ class PointCoordinates(QtWidgets.QGridLayout):
else:
points[i,:] = np.nan
return points
+
class OpticalScan(QtWidgets.QWidget):
imageUpdate = QtCore.pyqtSignal(str, name='imageUpdate') #str = 'df' (= darkfield) or 'bf' (=bright field)
boundaryUpdate = QtCore.pyqtSignal()
+ backGroundSavedToPath = QtCore.pyqtSignal(int, str)
def __init__(self, ramanctrl, dataset, logpath='', parent=None):
super().__init__(parent, QtCore.Qt.Window)
@@ -235,6 +242,22 @@ class OpticalScan(QtWidgets.QWidget):
pointgroup.setLayout(self.points)
self.points.readPoint.connect(self.takePoint)
+ bkggroup = QtWidgets.QGroupBox('Manage Background Images')
+ self.enableBackGround = QtWidgets.QCheckBox('Enable BackgroundSubtraction')
+ self.enableBackGround.setChecked(False)
+ self.enableBackGround.stateChanged.connect(self.enableDisableBackground)
+ self.backGroundManager = BackGroundManager(self)
+ self.backGroundManager.managerClosed.connect(self.managerWasClosed)
+ self.backGroundManager.readBackground.connect(self.readBackground)
+ self.showBgkManagerBtn = QtWidgets.QPushButton('Show Background Manager Window')
+ self.showBgkManagerBtn.setDisabled(True)
+ self.showBgkManagerBtn.clicked.connect(self.showHideBackgroundWindow)
+
+ bkglayout = QtWidgets.QVBoxLayout()
+ bkglayout.addWidget(self.enableBackGround)
+ bkglayout.addWidget(self.showBgkManagerBtn)
+ bkggroup.setLayout(bkglayout)
+
self.pareaselect = QtWidgets.QPushButton("Area select", self)
label = QtWidgets.QLabel("Size increase:", self)
self.radiusincreaseedit = QtWidgets.QDoubleSpinBox(self)
@@ -253,7 +276,7 @@ class OpticalScan(QtWidgets.QWidget):
self.zmaxedit.setMaximumWidth(100)
label3 = QtWidgets.QLabel("Focus steps:", self)
self.nzedit = QtWidgets.QSpinBox(self)
- self.nzedit.setRange(2,10)
+ self.nzedit.setRange(2,20)
self.nzedit.setValue(3)
self.nzedit.setMaximumWidth(100)
self.hdrcheck = QtWidgets.QCheckBox("High dynamic range", self)
@@ -293,11 +316,7 @@ class OpticalScan(QtWidgets.QWidget):
micModeLayout.addWidget(self.df_btn)
micModeLayout.addWidget(self.bf_btn)
micModeGroup.setLayout(micModeLayout)
-
- self.halfResChecker = QtWidgets.QCheckBox('Half resolution')
- self.halfResChecker.setChecked(False)
- self.halfResChecker.setToolTip('Enable for very high resolution images.\nFull resolution slows down the scan too much..')
-
+
self.deleteImgChecker = QtWidgets.QCheckBox('Delete image files after run')
self.deleteImgChecker.setChecked(True)
@@ -318,16 +337,15 @@ class OpticalScan(QtWidgets.QWidget):
furtherOptionsLayout.addRow(label3, self.nzedit)
furtherOptionsLayout.addRow(self.hdrcheck)
furtherOptionsLayout.addRow(self.deleteImgChecker)
- furtherOptionsLayout.addRow(self.halfResChecker)
furtherOptionsGroup.setLayout(furtherOptionsLayout)
-
btnLayout = QtWidgets.QHBoxLayout()
btnLayout.addWidget(self.prun)
btnLayout.addWidget(self.pexit)
btnLayout.addStretch()
vbox.addWidget(pointgroup)
+ vbox.addWidget(bkggroup)
vbox.addWidget(self.areaOptionsGroup)
vbox.addWidget(furtherOptionsGroup)
vbox.addLayout(btnLayout)
@@ -335,9 +353,27 @@ class OpticalScan(QtWidgets.QWidget):
vbox.addWidget(self.progressbar)
self.setLayout(vbox)
- #self.show()
self.setVisible(False)
+
+ def enableDisableBackground(self):
+ self.showBgkManagerBtn.setEnabled(self.enableBackGround.isChecked())
+ if self.enableBackGround.isChecked():
+ self.backGroundManager.calculateAverageImage()
+ else:
+ self.deleteBackGroundImage()
+
+ def showHideBackgroundWindow(self):
+ if self.backGroundManager.isHidden():
+ self.backGroundManager.show()
+ self.showBgkManagerBtn.setText('Hide Background Manager Window')
+ else:
+ self.backGroundManager.hide()
+ self.showBgkManagerBtn.setText('Show Background Manager Window')
+
+ def managerWasClosed(self):
+ self.showBgkManagerBtn.setText('Show Background Manager Window')
+
@QtCore.pyqtSlot()
def stopScan(self):
if self.process is not None and self.process.is_alive():
@@ -380,7 +416,6 @@ class OpticalScan(QtWidgets.QWidget):
[[xi, yi] for xi, yi in zip(x[::-1]+dx,y1*np.ones_like(x))] + \
[[xi, yi] for xi, yi in zip(x0*np.ones_like(y),y[::-1]+dy)]
-
self.boundaryUpdate.emit()
self.prun.setEnabled(True)
@@ -388,7 +423,6 @@ class OpticalScan(QtWidgets.QWidget):
self.dataset = ds
self.points.createWidgets(5, list(zip(ds.fitindices,ds.fitpoints)))
if len(self.dataset.fitindices)>1:
-# self.pareaselect.setEnabled(True)
self.areaOptionsGroup.setEnabled(True)
softwarez = self.ramanctrl.getSoftwareZ()
if abs(softwarez) >0.1:
@@ -418,16 +452,12 @@ class OpticalScan(QtWidgets.QWidget):
pshift = self.ramanctrl.getRamanPositionShift()
self.dataset.pshift = pshift
img = cv2.cvtColor(cv2imread_fix(self.dataset.getTmpImageName()), cv2.COLOR_BGR2RGB)
- if self.halfResChecker.isChecked():
- img = cv2.resize(img, None, fx = 0.5, fy = 0.5, interpolation = cv2.INTER_CUBIC)
self.dataset.imagedim_bf = self.ramanctrl.getImageDimensions('bf')
self.dataset.pixelscale_bf = self.dataset.imagedim_bf[0]/img.shape[1] #=imagedim_width/shape[1]
self.dataset.imagedim_df = self.ramanctrl.getImageDimensions('df')
self.dataset.pixelscale_df = self.dataset.imagedim_df[0]/img.shape[1] #=imagedim_width/shape[1]
-
-
points = self.points.getPoints()
ind = np.isfinite(points[:,0])
self.dataset.fitindices = np.arange(points.shape[0])[ind]
@@ -457,7 +487,8 @@ class OpticalScan(QtWidgets.QWidget):
full = cv2.warpAffine(full, M, (Nx, Ny)) #fails, if image dimensions are >32767x32767px...
dst = cv2.max(full, dst)
except:
- QtWidgets.QMessageBox.critical(self, 'Error', 'Image is too large\nPlease repeat with "scale image" checked.')
+ QtWidgets.QMessageBox.critical(self, 'Error', 'Image is too large\nSelect smaller region.')
+ raise
return
self.view.imgdata = dst
@@ -466,15 +497,30 @@ class OpticalScan(QtWidgets.QWidget):
self.dataset.readin = False
self.imageUpdate.emit(self.view.microscopeMode)
+ @QtCore.pyqtSlot(int)
+ def readBackground(self, indexOfCallingImage):
+ tmp_path = self.dataset.getTmpImageName()
+ self.ramanctrl.saveImage(tmp_path)
+ self.backGroundSavedToPath.emit(indexOfCallingImage, tmp_path)
+
+ def writeBackGroundImage(self, backgroundImg):
+ cv2imwrite_fix(self.dataset.getBackgroundImageName(), cv2.cvtColor(backgroundImg, cv2.COLOR_RGB2BGR))
+
+ def deleteBackGroundImage(self):
+ if os.path.exists(self.dataset.getBackgroundImageName()):
+ os.remove(self.dataset.getBackgroundImageName())
+
@QtCore.pyqtSlot()
def run(self):
- if self.dataset.ramanscansortindex is not None:
+ ramanPoints = self.dataset.particleContainer.getMeasurementPixelCoords()
+ if len(ramanPoints) != 0:
reply = QtWidgets.QMessageBox.critical(self, 'Dataset already contains raman scan points',
"Continuation will invalidate all previous results! Continue anyway?",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)
if reply != QtWidgets.QMessageBox.Yes:
return
+
if self.dataset.readin:
reply = QtWidgets.QMessageBox.critical(self, 'Dataset is newly read from disk!',
"Coordinate systems might have changed since. Do you want to continue with saved coordinates?",
@@ -485,6 +531,14 @@ class OpticalScan(QtWidgets.QWidget):
else:
return
+ if os.path.exists(self.dataset.getBackgroundImageName()):
+ reply = QtWidgets.QMessageBox.critical(self, 'Background correction info.',
+ "A background image was saved. All acquired images will be corrected. Continue?\nOtherwise delete images in background manager",
+ QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.Yes)
+
+ if reply != QtWidgets.QMessageBox.Yes:
+ return
+
self.view.imparent.ramanSwitch.df_btn.setChecked(self.df_btn.isChecked())
self.view.imparent.ramanSwitch.setDisabled(True)
self.view.setMicroscopeMode()
@@ -493,11 +547,7 @@ class OpticalScan(QtWidgets.QWidget):
self.view.dataset.imagescanMode = 'df'
else:
self.view.dataset.imagescanMode = 'bf'
-
- #TODO:
- #DISABLE OPTION GROUPS when scanning, reactivate upon cancelling
-
-
+
points = np.float32(self.dataset.fitpoints)
# convert z to software z, which is relative to current user z
softwarez = self.ramanctrl.getSoftwareZ() # get current software z
@@ -544,11 +594,6 @@ class OpticalScan(QtWidgets.QWidget):
QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
-# if self.halfResChecker.isChecked():
-# #reset pixelscales!!!
-# self.dataset.pixelscale_df /= 2
-# self.dataset.pixelscale_bf /= 2
-
self.prun.setEnabled(False)
self.ramanctrl.disconnect()
self.processstopevent = Event()
@@ -593,8 +638,14 @@ class OpticalScan(QtWidgets.QWidget):
width, height, rotationvalue = (self.dataset.imagedim_df if self.view.imparent.ramanSwitch.df_btn.isChecked() else self.dataset.imagedim_bf)
p = self.dataset.grid[i]
p0, p1 = self.dataset.maxdim[:2], self.dataset.maxdim[2:]
+
+ if os.path.exists(self.dataset.getBackgroundImageName()):
+ background_img = cv2imread_fix(self.dataset.getBackgroundImageName())
+ else:
+ background_img = None
+
self.view.imgdata, self.dataset.zvalimg = loadAndPasteImage(names, self.view.imgdata, self.dataset.zvalimg, width, height,
- rotationvalue, p0, p1, p, halfResolution = self.halfResChecker.isChecked())
+ rotationvalue, p0, p1, p, background=background_img)
self.progressbar.setValue(i+1)
if i>3:
timerunning = time()-self.starttime
@@ -627,6 +678,10 @@ class OpticalScan(QtWidgets.QWidget):
self.close()
return
self.timer.start(100.)
+
+ def closeEvent(self, event):
+ self.backGroundManager.close()
+ event.accept()
if __name__ == "__main__":
from ramancom.simulatedraman import SimulatedRaman
diff --git a/ramancom/WITecCOM.py b/ramancom/WITecCOM.py
index fab141544250eaeb8e03c418d4903bcd412c8da0..9eb857fe7e3923fd1a04e8ad2f103e9e585fa9c8 100644
--- a/ramancom/WITecCOM.py
+++ b/ramancom/WITecCOM.py
@@ -211,9 +211,22 @@ class WITecCOM(RamanBase):
# move only if new position is really different; repeat if new position is ignored (happens some times)
while max(abs(initpos[0]-x), abs(initpos[1]-y))>epsxy:
t0 = time()
- self.PosXFloatMan.SetValue(x)
- self.PosYFloatMan.SetValue(y)
- self.GoToTrigger.OperateTrigger()
+ numFails = 0
+ maxFails = 50
+ positionSubmitted = False
+ while numFails < maxFails and not positionSubmitted:
+ try:
+ self.PosXFloatMan.SetValue(x)
+ self.PosYFloatMan.SetValue(y)
+ self.GoToTrigger.OperateTrigger()
+ positionSubmitted = True
+ except pythoncom.com_error:
+ numFails += 1
+ sleep(.1)
+ if numFails > 0:
+ print(f'{numFails} of max. {maxFails} unsuccessfull position submits to Position: {x}, {y}', flush=True)
+ if not positionSubmitted:
+ print(f'Error setting Position: {x}, {y}\nExpecting \"signal ignored\" warning', flush=True)
# wait till position is found within accuracy of epsxy; check if position changes at all
distance = 2*epsxy
@@ -297,7 +310,6 @@ class WITecCOM(RamanBase):
print("Waiting for measurement ready...")
t1 = time()
-
def triggerMeasurement(self, num):
assert self.timeseries
self.TimeSeriesSlowNextMan.OperateTrigger()
diff --git a/ramanscanui.py b/ramanscanui.py
index 84bbbae98a5314569e013d557f6407f69b267138..a1d21b39c964de82ac083cd7d0953d2c25f81e33 100644
--- a/ramanscanui.py
+++ b/ramanscanui.py
@@ -235,8 +235,6 @@ class RamanScanUI(QtWidgets.QWidget):
for measIndex, ramanScanIndex in enumerate(cmin):
self.particleContainer.setMeasurementScaneIndex(measIndex, ramanScanIndex)
-# self.dataset.ramanscansortindex = cmin
- self.dataset.saveParticleData()
self.view.saveDataSet()
self.view.prepareAnalysis()
self.view.scaleImage(2.0)
@@ -272,8 +270,9 @@ class RamanScanUI(QtWidgets.QWidget):
if i>=0:
self.progressbar.setValue(i+1)
- self.view.highLightRamanIndex(i+1)
- Npoints = len(self.dataset.ramanpoints)
+ self.view.highLightRamanIndex(i+1) #go to next scanmarker
+
+ Npoints = len(self.dataset.particleContainer.getMeasurementPixelCoords())
if i>3:
timerunning = time()-self.starttime
ttot = timerunning*Npoints/(i+1)
@@ -286,7 +285,6 @@ class RamanScanUI(QtWidgets.QWidget):
self.dataset.ramanscandone = True
self.view.saveDataSet()
self.view.unblockUI()
-# self.view.switchMode("ParticleAnalysis") #directly going to analysis is not feasible... We first have to export spectra etc...
self.progressbar.setValue(0)
self.progressbar.setEnabled(False)
self.progresstime.setEnabled(False)
diff --git a/sampleview.py b/sampleview.py
index d5eb765babcd5aa20d5ee5eb66c09d991a827e84..0a0cb8b231c23326803ad2fa1c1e4704a3370cbb 100644
--- a/sampleview.py
+++ b/sampleview.py
@@ -100,7 +100,6 @@ class SampleView(QtWidgets.QGraphicsView):
self.update()
-
def takeScreenshot(self):
#TODO:
#LIMIT SCREENSHOT TO ACTUAL VIEWSIZE OF LOADED IMAGE...
@@ -179,9 +178,6 @@ class SampleView(QtWidgets.QGraphicsView):
return
assert mode in ["OpticalScan", "ParticleDetection", "RamanScan", "ParticleAnalysis"]
self.oscanwidget.setVisible(False)
- if self.detectionwidget is not None:
- self.detectionwidget.close()
- self.detectionwidget.destroy()
self.ramanwidget.setVisible(False)
self.mode = mode
self.loadPixmap(self.microscopeMode)
@@ -192,9 +188,9 @@ class SampleView(QtWidgets.QGraphicsView):
elif mode == "ParticleDetection":
if self.detectionwidget is None:
self.detectionwidget = ParticleDetectionView(self.imgdata, self.dataset, self)
- self.detectionwidget.show()
self.detectionwidget.imageUpdate.connect(self.detectionUpdate)
self.detectionwidget.detectionFinished.connect(self.activateMaxMode)
+ self.detectionwidget.show()
elif mode == "RamanScan":
self.ramanwidget.resetDataset(self.dataset)
@@ -304,7 +300,7 @@ class SampleView(QtWidgets.QGraphicsView):
maxmode = "OpticalScan"
if os.path.exists(self.dataset.getImageName()):
maxmode = "ParticleDetection"
- if len(self.dataset.ramanpoints)>0:
+ if self.dataset.particleDetectionDone:
maxmode = "RamanScan"
if self.dataset.ramanscandone:
maxmode = "ParticleAnalysis"
@@ -325,11 +321,11 @@ class SampleView(QtWidgets.QGraphicsView):
elif self.mode=="ParticleDetection":
p0 = self.mapToScene(event.pos())
self.detectionwidget.setImageCenter([p0.x(), p0.y()])
+
+ else:
+ p0 = self.mapToScene(event.pos())
+ super(SampleView, self).mousePressEvent(event)
- else:
- p0 = self.mapToScene(event.pos())
- super(SampleView, self).mousePressEvent(event)
-
else:
self.particlePainter.mousePressEvent(event)
diff --git a/segmentation.py b/segmentation.py
index ed10111b71ec765ad7a19d4464770d2c6c3df92b..9bffe7cadbcf4dd9164c9cd7ed5444ebfbceca7f 100644
--- a/segmentation.py
+++ b/segmentation.py
@@ -51,22 +51,24 @@ class Segmentation(object):
def __init__(self, dataset=None, parent=None):
self.cancelcomputation = False
self.parent = parent
- self.defaultParams = {'contrastCurve': np.array([[50,0],[100,200],[200,255]]),
- 'activateContrastCurve': True,
- 'blurRadius': 9,
- 'activateLowThresh': True,
- 'lowThresh': 0.2,
- 'activateUpThresh': False,
- 'upThresh': 0.5,
- 'invertThresh': False,
- 'maxholebrightness': 0.5,
-# 'erodeconvexdefects': 0,
- 'minparticlearea': 20,
- 'minparticledistance': 20,
- 'closeBackground': True,
- 'measurefrac': 1,
- 'compactness': 0.,
- 'seedRad': 3}
+ self.defaultParams = {'adaptiveHistEqu': False,
+ 'claheTileSize': 128,
+ 'contrastCurve': np.array([[50,0],[100,200],[200,255]]),
+ 'activateContrastCurve': True,
+ 'blurRadius': 9,
+ 'activateLowThresh': True,
+ 'lowThresh': 0.2,
+ 'activateUpThresh': False,
+ 'upThresh': 0.5,
+ 'invertThresh': False,
+ 'maxholebrightness': 0.5,
+ 'minparticlearea': 20,
+ 'maxparticlearea': 10000,
+ 'minparticledistance': 20,
+ 'closeBackground': True,
+ 'measurefrac': 1,
+ 'compactness': 0.,
+ 'seedRad': 3}
if dataset is not None:
self.detectParams = dataset.detectParams
for key in self.defaultParams:
@@ -77,7 +79,9 @@ class Segmentation(object):
self.initializeParameters()
def initializeParameters(self):
- parlist = [Parameter("contrastCurve", np.ndarray, self.detectParams['contrastCurve'], helptext="Curve contrast"),
+ parlist = [Parameter("adaptiveHistEqu", np.bool, self.detectParams['adaptiveHistEqu'], helptext="Adaptive histogram equalization", show=False, linkedParameter='claheTileSize'),
+ Parameter("claheTileSize", int, self.detectParams['claheTileSize'], 1, 2048, 1, 1, helptext="Tile size for adaptive histogram adjustment\nThe Image will be split into tiles with size approx. (NxN)", show=True),
+ Parameter("contrastCurve", np.ndarray, self.detectParams['contrastCurve'], helptext="Curve contrast"),
Parameter("activateContrastCurve", np.bool, self.detectParams['activateContrastCurve'], helptext="activate Contrast curve", show=True, linkedParameter='contrastCurve'),
Parameter("blurRadius", int, self.detectParams['blurRadius'], 3, 99, 1, 2, helptext="Blur radius", show=True),
Parameter("invertThresh", np.bool, self.detectParams['invertThresh'], helptext="Invert the current threshold", show=False),
@@ -86,9 +90,9 @@ class Segmentation(object):
Parameter("activateUpThresh", np.bool, self.detectParams['activateUpThresh'], helptext="activate upper threshold", show=False, linkedParameter='upThresh'),
Parameter("upThresh", float, self.detectParams['upThresh'], .01, 1.0, 2, .02, helptext="Upper threshold", show=True),
Parameter("maxholebrightness", float, self.detectParams['maxholebrightness'], 0, 1, 2, 0.02, helptext="Close holes brighter than..", show = True),
-# Parameter("erodeconvexdefects", int, self.detectParams['erodeconvexdefects'], 0, 20, helptext="Erode convex defects", show=True), #TODO: Consider removing it entirely. It is usually not used...
- Parameter("minparticlearea", int, self.detectParams['minparticlearea'], 10, 1000, 0, 50, helptext="Min. particle pixel area", show=False),
- Parameter("minparticledistance", int, self.detectParams['minparticledistance'], 10, 1000, 0, 5, helptext="Min. distance between particles", show=False),
+ Parameter("minparticlearea", int, self.detectParams['minparticlearea'], 1, 1000, 0, 50, helptext="Min. particle pixel area", show=False),
+ Parameter("maxparticlearea", int, self.detectParams['maxparticlearea'], 10, 1E9, 0, 50, helptext="Max. particle pixel area", show=True),
+ Parameter("minparticledistance", int, self.detectParams['minparticledistance'], 5, 1000, 0, 5, helptext="Min. distance between particles", show=False),
Parameter("measurefrac", float, self.detectParams['measurefrac'], 0, 1, 2, stepsize = 0.05, helptext="measure fraction of particles", show=False),
Parameter("closeBackground", np.bool, self.detectParams['closeBackground'], helptext="close holes in sure background", show=False),
Parameter("sure_fg", None, helptext="Show sure foreground", show=True),
@@ -177,6 +181,7 @@ class Segmentation(object):
def closeBrightHoles(self, thresh, grayimage, maxbrightness):
n, labels, stats, centroids = cv2.connectedComponentsWithStats(thresh, 8, cv2.CV_32S)
maxbrightness = np.uint8(maxbrightness * 255)
+ print('num comps in brightHoles:', n)
for label in range(1, n):
up = stats[label, cv2.CC_STAT_TOP]
@@ -236,7 +241,25 @@ class Segmentation(object):
return thresh[1:-1, 1:-1]
- def getSureForeground(self, thresh, mindistance, minarea):
+ def filterThresholdByAreas(self, thresh, minarea, maxarea):
+ newthresh = np.zeros_like(thresh)
+ n, labels, stats, centroids = cv2.connectedComponentsWithStats(thresh, 8, cv2.CV_32S)
+ print('num comps:', n)
+
+ for label in range(1, n):
+ area = stats[label, cv2.CC_STAT_AREA]
+ if minarea < area < maxarea:
+ up = stats[label, cv2.CC_STAT_TOP]
+ left = stats[label, cv2.CC_STAT_LEFT]
+ width = stats[label, cv2.CC_STAT_WIDTH]
+ height = stats[label, cv2.CC_STAT_HEIGHT]
+ subthresh = np.uint8(255 * (labels[up:(up+height), left:(left+width)] == label))
+
+ newthresh[up:(up+height), left:(left+width)] += subthresh
+
+ return newthresh
+
+ def getSureForeground(self, thresh, mindistance):
sure_fg = np.zeros_like(thresh)
n, labels, stats, centroids = cv2.connectedComponentsWithStats(thresh, 8, cv2.CV_32S)
@@ -245,22 +268,23 @@ class Segmentation(object):
left = stats[label, cv2.CC_STAT_LEFT]
width = stats[label, cv2.CC_STAT_WIDTH]
height = stats[label, cv2.CC_STAT_HEIGHT]
- area = stats[label, cv2.CC_STAT_AREA]
+# area = stats[label, cv2.CC_STAT_AREA]
+# if minarea < area < maxarea:
subimg = np.uint8(255 * (labels[up:(up+height), left:(left+width)] == label))
subdist = cv2.distanceTransform(subimg, cv2.DIST_L2,3)
subfg = np.uint8(peak_local_max(subdist, mindistance, indices = False))
-
- if subfg.max() > 0 and random() < self.measurefrac: #i.e., at least one maximum value was added
+
+ if subfg.max() > 0 and random() < self.measurefrac: #i.e., at least one maximum value was added
sure_fg[up:(up+height), left:(left+width)] += subfg
- elif area > minarea and random() < self.measurefrac:
+ elif random() < self.measurefrac:
#simply get maximum of subdist
submax = np.where(subdist == subdist.max())
sure_fg[up+submax[0][0], left+submax[1][0]] = 1
sure_fg = cv2.dilate(sure_fg, np.ones((3, 3)))
return sure_fg
-
+
def characterizeParticle(self, contours):
longellipse, shortellipse = np.nan, np.nan
@@ -269,8 +293,6 @@ class Segmentation(object):
if cnt.shape[0] >= 5: ##at least 5 points required for ellipse fitting...
ellipse = cv2.fitEllipse(cnt)
shortellipse, longellipse = ellipse[1]
- # double Sizes, as the ellipse returns half-axes
- # - > THIS is WRONG! fitEllipse returns the FULL width and height of the rotated ellipse
rect = cv2.minAreaRect(cnt)
long, short = rect[1]
if short>long:
@@ -307,6 +329,17 @@ class Segmentation(object):
gray = self.convert2Gray(img)
print("gray")
+ if self.adaptiveHistEqu:
+ numTilesX = round(img.shape[1]/self.claheTileSize)
+ numTilesY = round(img.shape[0]/self.claheTileSize)
+ clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(numTilesY,numTilesX))
+ gray = clahe.apply(gray)
+ if return_step=="claheTileSize": return gray, 0
+ print("adaptive Histogram Adjustment")
+
+ if self.cancelcomputation:
+ return None, None, None
+
if self.activateContrastCurve:
xi, arr = self.calculateHistFunction(self.contrastCurve)
gray = arr[gray]
@@ -362,7 +395,7 @@ class Segmentation(object):
return blur, 0
#close holes darkter than self.max_brightness
- self.closeBrightHoles(thresh, blur, self.maxholebrightness)
+ thresh = self.closeBrightHoles(thresh, blur, self.maxholebrightness)
print("closed holes")
if return_step=='maxholebrightness': return thresh, 0
@@ -382,16 +415,17 @@ class Segmentation(object):
# if self.erodeconvexdefects > 0: return thresh, 0
# else: return thresh, 0
- dist_transform = cv2.distanceTransform(thresh, cv2.DIST_L2,5)
- print("distanceTransform")
+ thresh = self.filterThresholdByAreas(thresh, self.minparticlearea, self.maxparticlearea)
+ print('filter threshold by areas')
+ if return_step == 'maxparticlearea': return thresh, 0
if self.cancelcomputation:
return None, None, None
-
+
####get sure_fg
'''the peak_local_max function takes the min distance between peaks. Unfortunately, that means that individual
particles smaller than that distance are consequently disregarded. Hence, we need a connectec_components approach'''
- sure_fg = self.getSureForeground(thresh, self.minparticledistance, self.minparticlearea)
+ sure_fg = self.getSureForeground(thresh, self.minparticledistance)
sure_bg = cv2.dilate(thresh, np.ones((5, 5)), iterations = 1)
if self.closeBackground:
@@ -429,10 +463,19 @@ class Segmentation(object):
img[np.nonzero(sure_fg)] |= 1 #dilation of sure_fg is included in self.getSureForeground
img[np.nonzero(sure_bg)] |= 2
return img, 1
-
+
+
+ dist_transform = cv2.distanceTransform(thresh, cv2.DIST_L2,5)
+ print("distanceTransform")
+ if self.cancelcomputation:
+ return None, None, None
+
#ich habe jetzt nur noch den Skimage Watershed integriert. Oben auskommentiert der opencv watershed, falls wir ihn doch nochmal für irgendwas brauchen...
markers = ndi.label(sure_fg)[0]
markers = watershed(-dist_transform, markers, mask=sure_bg, compactness = self.compactness, watershed_line = True) #labels = 0 for background, 1... for particles
+
+ #filter markers by area limits
+# markers = self.filterMarkerImageByAreaLimits(markers, self.minparticlearea, self.maxparticlearea)
print("watershed")
if self.cancelcomputation:
@@ -471,8 +514,6 @@ class Segmentation(object):
newMeasPoint = MeasurementPoint(particleIndex, x[index] + x0, y[index] + y0)
measurementPoints[particleIndex].append(newMeasPoint)
-# getMeasurementPoints.append([x[index] + x0, y[index] + y0])
-
print(len(np.unique(markers))-1, len(contours))
print("stats")
diff --git a/viewitems.py b/viewitems.py
index 7b197129f0270b9b6d5af417a1c976871c8114ed..81958cf6e28df746754c02fa0c7d4e2d1011e16b 100644
--- a/viewitems.py
+++ b/viewitems.py
@@ -183,6 +183,7 @@ class ScanIndicator(QtWidgets.QGraphicsItem):
painter.drawText(rect, QtCore.Qt.AlignCenter, str(self.number))
painter.drawRect(rect)
+
class Edge(QtWidgets.QGraphicsItem):
def __init__(self, n1, n2):
@@ -200,16 +201,16 @@ class Edge(QtWidgets.QGraphicsItem):
def paint(self, painter, option, widget):
painter.setPen(QtCore.Qt.green)
painter.drawLine(self.n1.pos(), self.n2.pos())
+
class Node(QtWidgets.QGraphicsItem):
def __init__(self, point, view):
self.view = view
self.point = point
self.edges = []
- self.rectSize = 60
+ self.rectSize = 120
super().__init__()
self.setPos(self.point[0], self.point[1])
- self.rectSize = 2*self.rectSize #<--- why that? Why not set it to 120 right in the first place??
self.setFlag(QtWidgets.QGraphicsItem.ItemIsMovable)
self.setFlag(QtWidgets.QGraphicsItem.ItemSendsGeometryChanges)
self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
@@ -226,9 +227,9 @@ class Node(QtWidgets.QGraphicsItem):
def paint(self, painter, option, widget):
painter.setBrush(QtGui.QColor(255,0,0,80))
painter.setPen(QtCore.Qt.red)
- painter.drawLine(0,-self.rectSize/2.,0,self.rectSize/2)
- painter.drawLine(-self.rectSize/2.,0,self.rectSize/2,0)
- rect = QtCore.QRectF(-self.rectSize,-self.rectSize,2*self.rectSize,2*self.rectSize)
+ painter.drawLine(0, -self.rectSize/2., 0, self.rectSize/2)
+ painter.drawLine(-self.rectSize/2., 0, self.rectSize/2, 0)
+ rect = QtCore.QRectF(-self.rectSize, -self.rectSize, 2*self.rectSize, 2*self.rectSize)
painter.drawEllipse(rect)
def itemChange(self, change, value):