# -*- coding: utf-8 -*- """ GEPARD - Gepard-Enabled PARticle Detection Copyright (C) 2018 Lars Bittrich and Josef Brandt, Leibniz-Institut für Polymerforschung Dresden e. V. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program, see COPYING. If not, see . """ import os from PyQt5 import QtCore, QtWidgets from .zeissxml import ZeissHandler, make_parser from .opticalscan import PointCoordinates from .helperfunctions import cv2imread_fix, cv2imwrite_fix from .ramancom.ramancontrol import defaultPath from .dataset import DataSet from scipy.optimize import least_squares from itertools import permutations import cv2 import numpy as np class ZeissImporter(QtWidgets.QDialog): def __init__(self, fname, ramanctrl, parent=None): super().__init__(parent) if not ramanctrl.connect() or not self.readImportData(fname): msg = QtWidgets.QMessageBox() msg.setText('Connection failed! Please enable remote control.') msg.exec() self.validimport = False return else: self.validimport = True self.ramanctrl = ramanctrl vbox = QtWidgets.QVBoxLayout() pointgroup = QtWidgets.QGroupBox('Marker coordinates at Raman spot [µm]', self) self.points = PointCoordinates(len(self.markers), self.ramanctrl, self, names = [m.name for m in self.markers]) self.points.pimageOnly.setVisible(False) pointgroup.setLayout(self.points) self.points.readPoint.connect(self.takePoint) self.pconvert = QtWidgets.QPushButton('Convert', self) self.pexit = QtWidgets.QPushButton('Cancel', self) self.pconvert.released.connect(self.convert) self.pexit.released.connect(self.reject) self.pconvert.setEnabled(False) btnLayout = QtWidgets.QHBoxLayout() btnLayout.addStretch() btnLayout.addWidget(self.pconvert) btnLayout.addWidget(self.pexit) label = QtWidgets.QLabel("Z-Image blur radius", self) self.blurspinbox = QtWidgets.QSpinBox(self) self.blurspinbox.setMinimum(3) self.blurspinbox.setMaximum(99) self.blurspinbox.setSingleStep(2) self.blurspinbox.setValue(5) blurlayout = QtWidgets.QHBoxLayout() blurlayout.addWidget(label) blurlayout.addWidget(self.blurspinbox) blurlayout.addStretch() vbox.addWidget(pointgroup) vbox.addLayout(blurlayout) vbox.addLayout(btnLayout) self.setLayout(vbox) def readImportData(self, fname): path = os.path.split(fname)[0] self.zmapimgname = os.path.join(path, '3D.tif') self.edfimgname = os.path.join(path, 'EDF.tif') xmlname = os.path.join(path, '3D.tif_metadata.xml') errmsges = [] if not os.path.exists(self.zmapimgname): errmsges.append('Depth map image not found: 3D.tif') if not os.path.exists(self.edfimgname): errmsges.append('EDF image not found: EDF.tif') if not os.path.exists(xmlname): errmsges.append('XML metadata not found: 3D.tif_metadata.xml') else: parser = make_parser() z = ZeissHandler() parser.setContentHandler(z) parser.parse(xmlname) if len(z.markers)<3: errmsges.append('Fewer than 3 markers found to adjust coordinates!') if None in [z.region.centerx, z.region.centery, z.region.width, z.region.height]: errmsges.append('Image dimensions incomplete or missing!') if None in [z.zrange.z0, z.zrange.zn, z.zrange.dz]: errmsges.append('ZStack information missing or incomplete!') if len(errmsges)>0: QtWidgets.QMessageBox.error(self, 'Error!', '\n'.join(errmsges), QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.Ok) return False self.region = z.region self.zrange = z.zrange self.markers = z.markers return True @QtCore.pyqtSlot(float, float, float) def takePoint(self, x, y, z): points = self.points.getPoints() if len(points)>=3: self.pconvert.setEnabled(True) @QtCore.pyqtSlot() def convert(self): fname = QtWidgets.QFileDialog.getSaveFileName(self, 'Create New GEPARD Project', defaultPath, '*.pkl')[0] if fname=='': return dataset = DataSet(fname, newProject=True) T, pc, zpc = self.getTransform() imgshape, warp_mat = self.convertZimg(dataset, T, pc, zpc) self.convertImage(dataset, warp_mat) dataset.save() self.gepardname = dataset.fname self.accept() def convertImage(self, dataset, warp_mat): img = cv2imread_fix(self.edfimgname) img = cv2.warpAffine(img, warp_mat, img.shape[:2][::-1]) cv2imwrite_fix(dataset.getImageName(), img) def convertZimg(self, dataset, T, pc, zpc): N = int(round((self.zrange.zn-self.zrange.z0)/self.zrange.dz)) dataset.zpositions = np.linspace(self.zrange.z0, self.zrange.zn, N)-zpc[2]+pc[2] zimg = cv2imread_fix(self.zmapimgname, cv2.IMREAD_GRAYSCALE) zmdist = zimg.mean() zm = zmdist/255.*(self.zrange.zn-self.zrange.z0) + self.zrange.z0 radius = self.blurspinbox.value() blur = cv2.GaussianBlur(zimg, (radius, radius), 0) pshift = self.ramanctrl.getRamanPositionShift() dataset.pshift = pshift pixelscale = self.region.width/zimg.shape[1] # use input image as single image aquired in one shot dataset.imagedim_df = (self.region.width, self.region.height, 0.0) dataset.pixelscale_df = pixelscale dataset.imagedim_bf = (self.region.width, self.region.height, 0.0) dataset.pixelscale_bf = pixelscale # set image center as reference point in data set (transform from Zeiss) p0 = np.dot((np.array([self.region.centerx, self.region.centery,zm])-zpc),T)[:2] + pc[:2] dataset.readin = False dataset.lastpos = p0 dataset.maxdim = p0 + p0 # pixel triangle for coordinate warping transformation srcTri = np.array( [[0, 0], [zimg.shape[1] - 1, 0], [0, zimg.shape[0] - 1]] ).astype(np.float32) # upper left point (0,0) in Zeiss coordinates: z0 = np.array([self.region.centerx - self.region.width/2, self.region.centery + self.region.height/2]) # transform pixel data to Zeiss coordinates dstTri = np.array([[p[0]*pixelscale + z0[0], z0[1] - p[1]*pixelscale, zm] for p in srcTri]).astype(np.double)-zpc # transform to Raman coordinates dstTri = np.dot(dstTri,T) + pc[np.newaxis,:] # tilt blur image based on transformend z and adapt zpositions x = np.linspace(0,1,blur.shape[1]) y = np.linspace(0,1,blur.shape[0]) x, y = np.meshgrid(x,y) zmap = x*(dstTri[1,2]-dstTri[0,2]) + y*(dstTri[2,2]-dstTri[0,2]) + \ (zimg * ((self.zrange.zn-self.zrange.z0)/255.) - \ zmdist*((self.zrange.zn-self.zrange.z0)/255.)) zmin, zmax = zmap.min(), zmap.max() dataset.zpositions = np.array([zmap.min(), zmap.max()]) blur = (zmap-zmin)*(255./(zmax-zmin)) blur[blur>255.] = 255. blur = np.uint8(blur) # transform triangle back to pixel dstTri = np.array([dataset.mapToPixel(p[:2]) for p in dstTri]).astype(np.float32) warp_mat = cv2.getAffineTransform(srcTri, dstTri) blur = cv2.warpAffine(blur, warp_mat, zimg.shape[::-1]) zimgname = dataset.getZvalImageName() cv2imwrite_fix(zimgname, blur) return zimg.shape, warp_mat def getTransform(self): points = self.points.getPoints() pshift = self.ramanctrl.getRamanPositionShift() points[:,0] -= pshift[0] points[:,1] -= pshift[1] zpoints = np.array([m.getPos() for m in self.markers], dtype=np.double) pc = points.mean(axis=0) zpc = zpoints.mean(axis=0) points -= pc[np.newaxis,:] zpoints -= zpc[np.newaxis,:] def getRotMat(angles): c1, s1 = np.cos(angles[0]), np.sin(angles[0]) c2, s2 = np.cos(angles[1]), np.sin(angles[1]) c3, s3 = np.cos(angles[2]), np.sin(angles[2]) return np.mat([[c1*c3-s1*c2*s3, -c1*s3-s1*c2*c3, s1*s2], [s1*c3+c1*c2*s3, -s1*s3+c1*c2*c3, -c1*s2], [s1*s3, s2*c3, c2]]) # find the transformation matrix with best fit for small angles in # [-45°,45°] for all permutation of markers permbest = None pointsbest = None for perm in permutations(range(points.shape[0])): ppoints = points[perm,:] def err(angles_shift): T = getRotMat(angles_shift[:3]).T.A return (np.dot(zpoints, T) - angles_shift[np.newaxis,3:] \ - ppoints).ravel() angle = np.zeros(3) opt = least_squares(err, np.concatenate((angle, np.zeros(3))), bounds=(np.array([-np.pi/4]*3+[-np.inf]*3), np.array([np.pi/4]*3+[np.inf]*3)), method='dogbox') if permbest is None or \ permbest.cost>opt.cost: print("Current best permutation:", perm, flush=True) permbest = opt pointsbest = ppoints optangles = permbest.x[:3] shift = permbest.x[3:] T = getRotMat(optangles).T.A e = (np.dot(zpoints, T)-shift[np.newaxis,:]-pointsbest) print("Transformation angles:", optangles, flush=True) print("Transformation shift:", shift, flush=True) print("Transformation err:", e, flush=True) d = np.linalg.norm(e, axis=1) if np.any(d>1.): QtWidgets.QMessageBox.warning(self, 'Warning!', f'Transformation residuals are large:{d}', QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.Ok) return T, pc-shift, zpc