Commit f1fe95fd authored by Lars Bittrich's avatar Lars Bittrich

tranformation fix to separate the previous implicit image orientation in pixel sign values

parent 10e10e12
......@@ -44,7 +44,7 @@ class DataBaseWindow(QtWidgets.QMainWindow):
self.path = os.path.join(Path.home(), 'gepard', 'databases')
self.importPath = self.path
if not os.path.exists(self.path):
os.mkdir(self.path)
os.makedirs(self.path)
self.activeDatabase = None
self.activeSpectrum = None
self.activeSpectrumName = None
......
# -*- coding: utf-8 -*-
"""
GEPARD - Gepard-Enabled PARticle Detection
Copyright (C) 2018 Lars Bittrich and Josef Brandt, Leibniz-Institut für
Polymerforschung Dresden e. V. <bittrich-lars@ipfdd.de>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program, see COPYING.
If not, see <https://www.gnu.org/licenses/>.
"""
import os
import pickle
import numpy as np
import cv2
# -*- coding: utf-8 -*-
"""
GEPARD - Gepard-Enabled PARticle Detection
Copyright (C) 2018 Lars Bittrich and Josef Brandt, Leibniz-Institut für
Polymerforschung Dresden e. V. <bittrich-lars@ipfdd.de>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program, see COPYING.
If not, see <https://www.gnu.org/licenses/>.
"""
import os
import pickle
import numpy as np
import cv2
from .helperfunctions import cv2imread_fix, cv2imwrite_fix
from copy import copy
currentversion = 2
def loadData(fname):
retds = None
with open(fname, "rb") as fp:
ds = pickle.load(fp)
ds.fname = fname
ds.readin = True
ds.updatePath()
retds = DataSet(fname)
retds.version = 0
retds.__dict__.update(ds.__dict__)
if retds.version < currentversion:
retds.legacyConversion()
elif retds.zvalimg=="saved":
retds.loadZvalImg()
return retds
def saveData(dataset, fname):
with open(fname, "wb") as fp:
# zvalimg is rather large and thus it is saved separately in a tif file
# only onces after its creation
zvalimg = dataset.zvalimg
if zvalimg is not None:
dataset.zvalimg = "saved"
pickle.dump(dataset, fp, protocol=-1)
dataset.zvalimg = zvalimg
def arrayCompare(a1, a2):
if a1.shape!=a2.shape:
return False
if a1.dtype!=np.float32 and a1.dtype!=np.float64:
return np.all(a1==a2)
ind = np.isnan(a1)
if not np.any(ind):
return np.all(a1==a2)
return np.all(a1[~ind]==a2[~ind])
def listCompare(l1, l2):
if len(l1)!=len(l2):
return False
for l1i, l2i in zip(l1, l2):
if isinstance(l1i, np.ndarray):
if not isinstance(l2i, np.ndarray) or not arrayCompare(l1i, l2i):
return False
elif isinstance(l1i, (list, tuple)):
if not isinstance(l2i, (list, tuple)) or not listCompare(l1i, l2i):
return False
elif l1i!=l2i and ((~np.isnan(l1i)) or (~np.isnan(l2i))):
return False
return True
def recursiveDictCompare(d1, d2):
for key in d1:
if not key in d2:
print("key missing in d2:", key, flush=True)
return False
a = d1[key]
b = d2[key]
print(key, type(a), type(b), flush=True)
if isinstance(a, np.ndarray):
if not isinstance(b, np.ndarray) or not arrayCompare(a, b):
print("data is different!", a, b)
return False
elif isinstance(a, dict):
if not isinstance(b, dict):
print("data is different!", a, b)
return False
if not recursiveDictCompare(a, b):
return False
elif isinstance(a, (list, tuple)):
if not isinstance(b, (list, tuple)) or not listCompare(a, b):
print("data is different!", a, b)
return False
elif a != b:
if (a is not None) and (b is not None):
print("data is different!", a, b)
return False
return True
class DataSet(object):
def __init__(self, fname, newProject=False):
self.fname = fname
# parameters specifically for optical scan
self.version = currentversion
self.lastpos = None
self.maxdim = None
self.pixelscale_df = None # µm / pixel --> scale of DARK FIELD camera (used for image stitching)
self.pixelscale_bf = None # µm / pixel of DARK FIELD camera (set to same as bright field, if both use the same camera)
self.imagedim_bf = None # width, height, angle of BRIGHT FIELD camera
self.imagedim_df = None # width, height, angle of DARK FIELD camera (set to same as bright field, if both use the same camera)
self.imagescanMode = 'df' #was the fullimage acquired in dark- or brightfield?
self.fitpoints = [] # manually adjusted positions aquired to define the specimen geometry
self.fitindices = [] # which of the five positions in the ui are already known
self.boundary = [] # scan boundary computed by a circle around the fitpoints + manual adjustments
self.grid = [] # scan grid positions for optical scan
self.zpositions = [] # z-positions for optical scan
self.heightmap = None
self.zvalimg = None
# parameters specifically for raman scan
self.pshift = None # shift of raman scan position relative to image center
self.coordOffset = [0, 0] #offset of entire coordinate system
self.seedpoints = np.array([])
self.seeddeletepoints = np.array([])
self.detectParams = {'points': np.array([[50,0],[100,200],[200,255]]),
'contrastcurve': True,
'blurRadius': 9,
'threshold': 0.2,
'maxholebrightness': 0.5,
'erodeconvexdefects': 0,
'minparticlearea': 20,
'minparticledistance': 20,
'measurefrac': 1,
'compactness': 0.1,
'seedRad': 3}
self.ramanpoints = []
self.particlecontours = []
self.particlestats = []
self.ramanscansortindex = None
self.ramanscandone = False
self.results = {'polymers': None,
'hqis': None,
'additives': None,
'additive_hqis': None}
self.resultParams = {'minHQI': None,
'compHQI': None}
self.spectraPath = None
self.particles2spectra = None #links idParticle to corresponding idSpectra (i.e., first measured particle (ID=0) is linked to spectra indices 0 and 1)
self.colorSeed = 'default'
self.resultsUploadedToSQL = []
self.readin = True # a value that is always set to True at loadData
# and mark that the coordinate system might be changed in the meantime
self.mode = "prepare"
if newProject:
self.fname = self.newProject(fname)
self.updatePath()
def __eq__(self, other):
return recursiveDictCompare(self.__dict__, other.__dict__)
def getPixelScale(self, mode=None):
if mode is None:
mode = self.imagescanMode
return (self.pixelscale_df if mode == 'df' else self.pixelscale_bf)
def saveZvalImg(self):
if self.zvalimg is not None:
cv2imwrite_fix(self.getZvalImageName(), self.zvalimg)
def loadZvalImg(self):
if os.path.exists(self.getZvalImageName()):
self.zvalimg = cv2imread_fix(self.getZvalImageName(), cv2.IMREAD_GRAYSCALE)
def legacyConversion(self, recreatefullimage=False):
if self.version==0:
print("Converting legacy version 0 to 1")
print("This may take some time")
# local imports as these functions are only needed for the rare occasion of legacy conversion
from opticalscan import loadAndPasteImage
# try to load png and check for detection contours
recreatefullimage = recreatefullimage or not os.path.exists(self.getLegacyImageName())
if not recreatefullimage:
img = cv2imread_fix(self.getLegacyImageName())
Nc = len(self.particlecontours)
if Nc>0:
contour = self.particlecontours[Nc//2]
contpixels = img[contour[:,0,1],contour[:,0,0]]
if np.all(contpixels[:,1]==255) and np.all(contpixels[:,2]==0) \
and np.all(contpixels[:,0]==0):
recreatefullimage = True
if not recreatefullimage:
cv2imwrite_fix(self.getImageName(), img)
del img
if recreatefullimage:
print("recreating fullimage from grid data")
imgdata = None
zvalimg = None
Ngrid = len(self.grid)
width, height, rotationvalue = self.imagedim_df
p0, p1 = self.maxdim[:2], self.maxdim[2:]
for i in range(Ngrid):
print(f"Processing image {i+1} of {Ngrid}")
names = []
for k in range(len(self.zpositions)):
names.append(os.path.join(self.getScanPath(), f"image_{i}_{k}.bmp"))
p = self.grid[i]
imgdata, zvalimg = loadAndPasteImage(names, imgdata, zvalimg, width,
height, rotationvalue, p0, p1, p)
self.zvalimg = zvalimg
cv2imwrite_fix(self.getImageName(), cv2.cvtColor(imgdata, cv2.COLOR_RGB2BGR))
del imgdata
self.saveZvalImg()
if "particleimgs" in self.__dict__:
del self.particleimgs
self.version = 1
if self.version == 1:
print("Converting legacy version 1 to 2")
if hasattr(self, 'pixelscale'):
print('pixelscale was', self.pixelscale)
self.pixelscale_bf = self.pixelscale
self.pixelscale_df = self.pixelscale
del self.pixelscale
if hasattr(self, 'imagedim'):
self.imagedim_bf = self.imagedim
self.imagedim_df = self.imagedim
del self.imagedim
self.version = 2
# add later conversion for higher version numbers here
def getSubImage(self, img, index, draw=True):
contour = self.particlecontours[index]
x0, x1 = contour[:,0,0].min(), contour[:,0,0].max()
y0, y1 = contour[:,0,1].min(), contour[:,0,1].max()
subimg = img[y0:y1+1,x0:x1+1].copy()
if draw:
cv2.drawContours(subimg, [contour], -1, (0,255,0), 1)
return subimg
def getZval(self, pixelpos):
assert self.zvalimg is not None
zp = self.zvalimg[round(pixelpos[1]), round(pixelpos[0])]
z0, z1 = self.zpositions.min(), self.zpositions.max()
return zp/255.*(z1-z0) + z0
def mapHeight(self, x, y):
assert not self.readin
assert self.heightmap is not None
return self.heightmap[0]*x + self.heightmap[1]*y + self.heightmap[2]
def mapToPixel(self, p, mode='df', force=False):
if not force:
assert not self.readin
p0 = copy(self.lastpos)
if mode == 'df':
p0[0] -= self.imagedim_df[0]/2
p0[1] += self.imagedim_df[1]/2
return (p[0] - p0[0])/self.pixelscale_df, (p0[1] - p[1])/self.pixelscale_df
elif mode == 'bf':
p0[0] -= self.imagedim_bf[0]/2
p0[1] += self.imagedim_bf[1]/2
return (p[0] - p0[0])/self.pixelscale_bf, (p0[1] - p[1])/self.pixelscale_bf
else:
print('mapToPixelMode not understood')
return
def mapToLength(self, pixelpos, mode='df', force=False):
if not force:
assert not self.readin
p0 = copy(self.lastpos)
p0[0] += self.coordOffset[0]
p0[1] += self.coordOffset[1]
if mode == 'df':
p0[0] -= self.imagedim_df[0]/2
p0[1] += self.imagedim_df[1]/2
return (pixelpos[0]*self.pixelscale_df + p0[0]), (p0[1] - pixelpos[1]*self.pixelscale_df)
elif mode == 'bf':
p0[0] -= self.imagedim_bf[0]/2
p0[1] += self.imagedim_bf[1]/2
return (pixelpos[0]*self.pixelscale_bf + p0[0]), (p0[1] - pixelpos[1]*self.pixelscale_bf)
else:
raise ValueError(f'mapToLength mode: {mode} not understood')
def mapToLengthRaman(self, pixelpos, microscopeMode='df', noz=False):
p0x, p0y = self.mapToLength(pixelpos, mode = microscopeMode)
x, y = p0x + self.pshift[0], p0y + self.pshift[1]
z = None
if not noz:
z = self.mapHeight(x, y)
z += self.getZval(pixelpos)
return x, y, z
def newProject(self, fname):
path = os.path.split(fname)[0]
name = os.path.splitext(os.path.basename(fname))[0]
newpath = os.path.join(path, name)
fname = os.path.join(newpath, name + ".pkl")
if not os.path.exists(newpath):
os.mkdir(newpath) # for new projects a directory will be created
elif os.path.exists(fname): # if this project is already there, load it instead
self.__dict__.update(loadData(fname).__dict__)
return fname
def getScanPath(self):
scandir = os.path.join(self.path, "scanimages")
if not os.path.exists(scandir):
os.mkdir(scandir)
return scandir
def updatePath(self):
self.path = os.path.split(self.fname)[0]
self.name = os.path.splitext(os.path.basename(self.fname))[0]
def getImageName(self):
return os.path.join(self.path, 'fullimage.tif')
def getZvalImageName(self):
return os.path.join(self.path, "zvalues.tif")
def getLegacyImageName(self):
return os.path.join(self.path, "fullimage.png")
def getLegacyDetectImageName(self):
return os.path.join(self.path, "detectimage.png")
def getDetectImageName(self):
raise NotImplementedError("No longer implemented due to change in API")
def getTmpImageName(self):
return os.path.join(self.path, "tmp.bmp")
def saveParticleData(self):
print('Not saving ParticleData into text file...:\nThe current output format might be wrong, if multiple spectra per particle are present...')
# if len(self.ramanscansortindex)>0:
# data = []
# pixelscale = (self.pixelscale_df if self.imagescanMode == 'df' else self.pixelscale_bf)
# for i in self.ramanscansortindex:
# data.append(list(self.ramanpoints[i])+list(self.particlestats[i]))
# data = np.array(data)
# data[:,0], data[:,1], z = self.mapToLengthRaman((data[:,0], data[:,1]), microscopeMode=self.imagescanMode, noz=True)
# data[:,2:7] *= pixelscale
# header = "x [µm], y [µm], length [µm], height [µm], length_ellipse [µm], height_ellipse [µm]"
# if data.shape[1]>6:
# header = header + ", area [µm^2]"
# data[:,6] *= pixelscale
# np.savetxt(os.path.join(self.path, "particledata.txt"), data,
# header=header)
def save(self):
saveData(self, self.fname)
def saveBackup(self):
inc = 0
while True:
directory = os.path.dirname(self.fname)
filename = self.name + '_backup_' + str(inc) + '.pkl'
path = os.path.join(directory, filename)
if os.path.exists(path):
inc += 1
else:
saveData(self, path)
return filename
from copy import copy
currentversion = 2
def loadData(fname):
retds = None
with open(fname, "rb") as fp:
ds = pickle.load(fp)
ds.fname = fname
ds.readin = True
ds.updatePath()
retds = DataSet(fname)
retds.version = 0
retds.__dict__.update(ds.__dict__)
if retds.version < currentversion:
retds.legacyConversion()
elif retds.zvalimg=="saved":
retds.loadZvalImg()
return retds
def saveData(dataset, fname):
with open(fname, "wb") as fp:
# zvalimg is rather large and thus it is saved separately in a tif file
# only onces after its creation
zvalimg = dataset.zvalimg
if zvalimg is not None:
dataset.zvalimg = "saved"
pickle.dump(dataset, fp, protocol=-1)
dataset.zvalimg = zvalimg
def arrayCompare(a1, a2):
if a1.shape!=a2.shape:
return False
if a1.dtype!=np.float32 and a1.dtype!=np.float64:
return np.all(a1==a2)
ind = np.isnan(a1)
if not np.any(ind):
return np.all(a1==a2)
return np.all(a1[~ind]==a2[~ind])
def listCompare(l1, l2):
if len(l1)!=len(l2):
return False
for l1i, l2i in zip(l1, l2):
if isinstance(l1i, np.ndarray):
if not isinstance(l2i, np.ndarray) or not arrayCompare(l1i, l2i):
return False
elif isinstance(l1i, (list, tuple)):
if not isinstance(l2i, (list, tuple)) or not listCompare(l1i, l2i):
return False
elif l1i!=l2i and ((~np.isnan(l1i)) or (~np.isnan(l2i))):
return False
return True
def recursiveDictCompare(d1, d2):
for key in d1:
if not key in d2:
print("key missing in d2:", key, flush=True)
return False
a = d1[key]
b = d2[key]
print(key, type(a), type(b), flush=True)
if isinstance(a, np.ndarray):
if not isinstance(b, np.ndarray) or not arrayCompare(a, b):
print("data is different!", a, b)
return False
elif isinstance(a, dict):
if not isinstance(b, dict):
print("data is different!", a, b)
return False
if not recursiveDictCompare(a, b):
return False
elif isinstance(a, (list, tuple)):
if not isinstance(b, (list, tuple)) or not listCompare(a, b):
print("data is different!", a, b)
return False
elif a != b:
if (a is not None) and (b is not None):
print("data is different!", a, b)
return False
return True
class DataSet(object):
def __init__(self, fname, newProject=False):
self.fname = fname
# parameters specifically for optical scan
self.version = currentversion
self.lastpos = None
self.maxdim = None
self.pixelscale_df = None # µm / pixel --> scale of DARK FIELD camera (used for image stitching)
self.pixelscale_bf = None # µm / pixel of DARK FIELD camera (set to same as bright field, if both use the same camera)
self.imagedim_bf = None # width, height, angle of BRIGHT FIELD camera
self.imagedim_df = None # width, height, angle of DARK FIELD camera (set to same as bright field, if both use the same camera)
self.imagescanMode = 'df' #was the fullimage acquired in dark- or brightfield?
self.fitpoints = [] # manually adjusted positions aquired to define the specimen geometry
self.fitindices = [] # which of the five positions in the ui are already known
self.boundary = [] # scan boundary computed by a circle around the fitpoints + manual adjustments
self.grid = [] # scan grid positions for optical scan
self.zpositions = [] # z-positions for optical scan
self.heightmap = None
self.zvalimg = None
self.coordinatetransform = None # if imported form extern source coordinate system may be rotated
self.signx = 1.
self.signy = -1.
# parameters specifically for raman scan
self.pshift = None # shift of raman scan position relative to image center
self.coordOffset = [0, 0] #offset of entire coordinate system
self.seedpoints = np.array([])
self.seeddeletepoints = np.array([])
self.detectParams = {'points': np.array([[50,0],[100,200],[200,255]]),
'contrastcurve': True,
'blurRadius': 9,
'threshold': 0.2,
'maxholebrightness': 0.5,
'erodeconvexdefects': 0,
'minparticlearea': 20,
'minparticledistance': 20,
'measurefrac': 1,
'compactness': 0.1,
'seedRad': 3}
self.ramanpoints = []
self.particlecontours = []
self.particlestats = []
self.ramanscansortindex = None
self.ramanscandone = False
self.results = {'polymers': None,
'hqis': None,
'additives': None,
'additive_hqis': None}
self.resultParams = {'minHQI': None,
'compHQI': None}
self.spectraPath = None
self.particles2spectra = None #links idParticle to corresponding idSpectra (i.e., first measured particle (ID=0) is linked to spectra indices 0 and 1)
self.colorSeed = 'default'
self.resultsUploadedToSQL = []
self.readin = True # a value that is always set to True at loadData
# and mark that the coordinate system might be changed in the meantime
self.mode = "prepare"
if newProject:
self.fname = self.newProject(fname)
self.updatePath()
def __eq__(self, other):
return recursiveDictCompare(self.__dict__, other.__dict__)
def getPixelScale(self, mode=None):
if mode is None:
mode = self.imagescanMode
return (self.pixelscale_df if mode == 'df' else self.pixelscale_bf)
def saveZvalImg(self):
if self.zvalimg is not None:
cv2imwrite_fix(self.getZvalImageName(), self.zvalimg)
def loadZvalImg(self):
if os.path.exists(self.getZvalImageName()):
self.zvalimg = cv2imread_fix(self.getZvalImageName(), cv2.IMREAD_GRAYSCALE)
def legacyConversion(self, recreatefullimage=False):
if self.version==0:
print("Converting legacy version 0 to 1")
print("This may take some time")
# local imports as these functions are only needed for the rare occasion of legacy conversion
from opticalscan import loadAndPasteImage
# try to load png and check for detection contours
recreatefullimage = recreatefullimage or not os.path.exists(self.getLegacyImageName())
if not recreatefullimage:
img = cv2imread_fix(self.getLegacyImageName())
Nc = len(self.particlecontours)
if Nc>0:
contour = self.particlecontours[Nc//2]
contpixels = img[contour[:,0,1],contour[:,0,0]]
if np.all(contpixels[:,1]==255) and np.all(contpixels[:,2]==0) \
and np.all(contpixels[:,0]==0):
recreatefullimage = True
if not recreatefullimage:
cv2imwrite_fix(self.getImageName(), img)
del img
if recreatefullimage: