Commit 38d7b35f authored by Lars Bittrich's avatar Lars Bittrich

first step to move data operations to new datastats module

sqlexport (from Josed) added: credentials are imported via json file: here are still data operations
parent 6b9fc03c
This diff is collapsed.
......@@ -30,16 +30,16 @@ from matplotlib.figure import Figure
class ExpExcelDialog(QtWidgets.QDialog):
def __init__(self, parent):
super(ExpExcelDialog, self).__init__()
def __init__(self, datastats, parent):
super(ExpExcelDialog, self).__init__(parent)
self.setWindowTitle('Export Options')
self.setGeometry(200,200, 300, 300)
self.parent = parent
self.particles = self.parent.particlestats
self.polymers = self.parent.particleResults
self.additives = self.parent.currentAdditives
self.hqis = self.parent.hqis
self.datastats = datastats
self.particles = self.datastats.particlestats
self.polymers = self.datastats.particleResults
self.additives = self.datastats.currentAdditives
self.hqis = self.datastats.hqis
self.layout = QtWidgets.QHBoxLayout()
self.setLayout(self.layout)
......@@ -51,7 +51,7 @@ class ExpExcelDialog(QtWidgets.QDialog):
self.exportOptions = ['Polymer Type (mandatory)', 'Additives', 'Long Size (µm)', 'Short Size (µm)', 'Area (µm²)', 'HQI', 'Size Classes']
self.checkBoxes = []
self.sizeClasses = [5, 10, 20, 50, 100, 1e6]
self.directory = self.parent.parent.dataset.path
self.directory = self.datastats.dataset.path
for index, option in enumerate(self.exportOptions):
self.checkBoxes.append(QtWidgets.QCheckBox(self))
......@@ -69,7 +69,7 @@ class ExpExcelDialog(QtWidgets.QDialog):
excelvbox.addWidget(self.checkBoxes[-1])
self.xlsFileName = QtWidgets.QLineEdit()
self.xlsFileName.setText('{}_Particle_List'.format(self.parent.parent.dataset.name))
self.xlsFileName.setText('{}_Particle_List'.format(self.datastats.dataset.name))
excelvbox.addWidget(QtWidgets.QLabel('Filename:'))
excelvbox.addWidget(self.xlsFileName)
......
# -*- coding: utf-8 -*-
"""
GEPARD - Gepard-Enabled PARticle Detection
Copyright (C) 2018 Lars Bittrich and Josef Brandt, Leibniz-Institut für
Polymerforschung Dresden e. V. <bittrich-lars@ipfdd.de>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program, see COPYING.
If not, see <https://www.gnu.org/licenses/>.
"""
import os
import numpy as np
import operator
class DataStats(object):
def __init__(self, dataset):
self.dataset = dataset
self.config = dataset.resultParams
self.spectraResults = None #entire List of all spectra assignments
self.additiveResults = None #entire List of all additives
self.particlestats = None
self.particleResults = None #final assignment for each particle
self.currentPolymers = None #list of polymers after setting entries with low hqi to unknown
self.currentAdditives = None #same thing for the additives
self.uniquePolymers = None #list of present polymer types
self.spectra = None #acquired spectra
self.indices = None #assignment of what spectra-indices belong to what substance
self.particles2spectra = None
self.manualPolymers = {}
self.manualAdditives = {}
def resetResults(self, spectraResults, additiveResults, hqis, addhqis):
self.spectraResults = spectraResults
self.additiveResults = additiveResults
self.hqis = hqis
self.addhqis = addhqis
def update(self):
print('updating data from', self.dataset.name)
self.spectraResults = self.dataset.results['polymers']
self.additiveResults = self.dataset.results['additives']
self.hqis = self.dataset.results['hqis']
self.addhqis = self.dataset.results['additive_hqis']
self.colorSeed = self.dataset.colorSeed
if type(self.colorSeed) != str:
self.colorSeed = 'default'
#load Spectra
if self.dataset.spectraPath is None:
fname = os.path.join(self.dataset.path, self.dataset.name + '_000_Spec.Data 1.txt')
else:
fname = self.dataset.spectraPath
return self.loadSpectra(fname)
def loadSpectra(self, fname):
import time
t0 = time.time()
specfilename = self.dataset.fname.split('.pkl')[0] + '_spectra.npy'
specs = None
if os.path.exists(specfilename):
specs = np.load(specfilename)
else:
try:
specs = np.loadtxt(fname)
#if spectra are already in correct format (WITec, first column: wavenumbers, other columns, intensities),
#we take them, otherwise we have to convert from Renishaw export format...
if not len(np.unique(specs[:, 0])) == len(specs[:, 0]): #--> only unique numbers -> this is the wavenumber column, we have the witec format
#Renishaw Convert
#columns 0 and 1 are x and y coordinates. We dont need them...
startWavenumber = specs[0, 2]
startIndices = np.where(specs[:, 2] == startWavenumber)[0]
spectra = np.zeros((startIndices[1], len(startIndices)+1)) #create array with shape (numWavenumbers, numSpectra+1) (first column holds wavenumbers)
spectra[:, 0] = specs[startIndices[0]:startIndices[1], 2]
for i in range(len(startIndices)-1):
spectra[:, i+1] = specs[startIndices[i]:startIndices[i+1], 3]
#aaand the last spectrum:
spectra[:, -1] = specs[startIndices[-1]:, 3]
specs = np.flip(spectra, 0) #Renishaw goes from highest to lowest wavenumber, out of whatever reason...
#write spectra to binary file, that makes reloading them in future significantly faster
np.save(specfilename, specs)
print('loading specs:', time.time()-t0)
self.dataset.spectraPath = fname
except:
pass
self.spectra = specs
return specs
def loadParticleData(self):
self.particlestats = np.array(self.dataset.particlestats)
pixelscale = (self.dataset.pixelscale_df if self.dataset.imagescanMode == 'df' else self.dataset.pixelscale_bf)
#convert to mikrometer scale
for index in range(len(self.particlestats)):
for subindex in range(5):
self.particlestats[index][subindex] = self.particlestats[index][subindex] * pixelscale #multiply by pixelscale
if subindex == 4:
self.particlestats[index][subindex] = self.particlestats[index][subindex] * pixelscale #again for the area...
self.particles2spectra = self.dataset.particles2spectra
sortindices = self.dataset.ramanscansortindex
if self.particles2spectra is None:
print('creating default particles2spectra list')
#no assignment found, so we assume one measurement per particle and use ramanscansortindex for assignment
self.particles2spectra = [[int(np.where(sortindices == i)[0])] for i in range(len(sortindices))]
#check, if dataset already contains results. Otherwise load them...
return not (self.spectraResults is None or (len(self.spectraResults) != len(sortindices)))
def invalidateSpectra(self):
self.spectraResults = ['empty']*(self.spectra.shape[1]-1)
self.hqis = [100]*(self.spectra.shape[1]-1)
def formatResults(self, hqi, compHqi):
if self.spectraResults is not None:
del self.currentPolymers, self.currentAdditives
#convert to arrays (makes indexing easier...)
self.currentPolymers, self.hqis = np.array(self.spectraResults), np.array(self.hqis)
if self.additiveResults is not None:
self.currentAdditives, self.addhqis = np.array(self.additiveResults), np.array(self.addhqis)
self.compHqiSpinBox.setDisabled(False)
else:
self.currentAdditives = None
#set poor HQI results to unknown
self.currentPolymers[self.hqis < hqi] = 'unknown'
if self.currentAdditives is not None:
self.currentAdditives[self.addhqis < compHqi] = 'unknown'
def createHistogramData(self):
self.uniquePolymers = np.unique(self.currentPolymers)
self.particleResults = [None]*len(self.particlestats)
self.typehistogram = {i: 0 for i in self.uniquePolymers}
if len(self.particles2spectra) != len(self.particlestats):
return False
for particleID, specList in enumerate(self.particles2spectra):
assignment = self.currentPolymers[specList[0]] #we take the first result as particle result. Hence, all spectra per particle have to have the same result
self.particleResults[particleID] = assignment
self.typehistogram[assignment] += 1
self.particleResults = np.array(self.particleResults)
##sort typehistogram, it will be converted into a list!!
self.typehistogram = sorted(self.typehistogram.items(), key = operator.itemgetter(1), reverse = True)
self.uniquePolymers = [i[0] for i in self.typehistogram]
self.indices = [] #what particles belong to which polymer type?
for polymer in self.uniquePolymers:
self.indices.append(list(np.where(self.particleResults == polymer)[0]))
###generate additive array for each type in typehistogram:
if self.currentAdditives is None:
self.sorted_additives = None
else:
self.sorted_additives = []
for polymer in self.typehistogram: #get additives of each polymer type
self.sorted_additives.append(self.currentAdditives[np.where(self.currentPolymers == polymer[0])])
for i in range(len(self.sorted_additives)): #sort out 'none' entries
nonentries = np.where(self.sorted_additives[i] == 'none')
self.sorted_additives[i] = np.delete(self.sorted_additives[i], nonentries)
return True
def saveAnalysisResults(self, minHQI, compHQI):
self.dataset.results = {'polymers': self.spectraResults,
'hqis': self.hqis,
'additives': self.additiveResults,
'additive_hqis': self.addhqis}
self.dataset.resultParams = {'minHQI': minHQI,
'compHQI': compHQI}
self.dataset.save()
print('saved dataset')
\ No newline at end of file
......@@ -12,13 +12,14 @@ import sys
from os import chdir, getcwd
class LoadWITecResults(QtWidgets.QDialog):
def __init__(self, parent):
def __init__(self, datastats, parent):
super(LoadWITecResults, self).__init__()
self.setGeometry(400, 400, 200, 300)
self.setWindowTitle('Get Truematch Results')
self.layout = QtWidgets.QGridLayout()
self.setLayout(self.layout)
self.datastats = datastats
self.parent = parent
self.parent.setDisabled(True)
self.trueMatchResults = None
......@@ -99,7 +100,7 @@ class LoadWITecResults(QtWidgets.QDialog):
self.editEntryWindow.show()
def loadFileManually(self):
dsetpath = self.parent.parent.dataset.path
dsetpath = self.datastats.dataset.path
fnames =QtWidgets.QFileDialog.getOpenFileNames(self, 'Select TrueMatch result file', dsetpath, 'text file (*.txt)')[0]
if len(fnames) > 1:
QtWidgets.QMessageBox.about(self, 'Info', 'The following order of files was loaded. If incorrect, please call a coder!\n{}'.format('\n'.join([fname for fname in fnames])))
......@@ -247,22 +248,16 @@ class LoadWITecResults(QtWidgets.QDialog):
assert len(self.polymertypes) == len(self.resultList), 'incorrect number of polymer types added...'
del self.parent.spectraResults, self.parent.additiveResults, self.parent.hqis, self.parent.addhqis
self.parent.spectraResults = self.polymertypes
self.parent.additiveResults = self.additives
self.parent.hqis = self.hqis
self.parent.addhqis = self.addhqis
self.datastats.resetResults(self.polymertypes, self.additives,
self.hqis, self.addhqis)
self.parent.formatResults()
if len(self.manualPolymers) > 0:
self.reviewGroup.setDisabled(False)
def closeEvent(self, event):
del self.parent.spectraResults, self.parent.additiveResults, self.parent.hqis, self.parent.addhqis
self.parent.spectraResults = self.polymertypes
self.parent.additiveResults = self.additives
self.parent.hqis = self.hqis
self.parent.addhqis = self.addhqis
self.datastats.resetResults(self.polymertypes, self.additives,
self.hqis, self.addhqis)
self.parent.updateBtn.clicked.connect(self.parent.formatResults)
self.parent.formatResults()
self.parent.show_hide_labels()
......
......@@ -27,10 +27,10 @@ If not, see <https://www.gnu.org/licenses/>.
import numpy as np
import cv2
from PyQt5 import QtWidgets
#import matplotlib.pyplot as plt
class ParticleEditor(object):
def __init__(self, parent):
def __init__(self, datastats, parent):
self.datastats = datastats
self.parent = parent #the assigned analysis widget
self.backupFreq = 3 #save a backup every n actions
self.neverBackedUp = True
......@@ -39,7 +39,7 @@ class ParticleEditor(object):
def createSafetyBackup(self):
self.actionCounter += 1
if self.actionCounter == self.backupFreq-1 or self.neverBackedUp:
backupname = self.parent.parent.dataset.saveBackup()
backupname = self.datastats.dataset.saveBackup()
print('backing up as', backupname)
self.neverBackedUp = False
self.actionCounter = 0
......@@ -59,7 +59,7 @@ class ParticleEditor(object):
print('merging contours:', contourIndices)
self.createSafetyBackup()
#get contours:
contours = [self.parent.parent.dataset.particlecontours[i] for i in contourIndices]
contours = [self.datastats.dataset.particlecontours[i] for i in contourIndices]
cnt = np.vstack(tuple(contours)) #combine contous
#draw contours
......@@ -72,7 +72,7 @@ class ParticleEditor(object):
img = np.zeros((rangey, rangex))
for i in contourIndices:
curCnt = self.parent.parent.dataset.particlecontours[i]
curCnt = self.datastats.dataset.particlecontours[i]
for i in range(len(curCnt)):
curCnt[i][0][0] -= xmin-padding
curCnt[i][0][1] -= ymin-padding
......@@ -96,48 +96,48 @@ class ParticleEditor(object):
#check, if dataset contains (already modified) particle2spectra, otherwise create new.
if self.parent.parent.dataset.particles2spectra is None: #create default assignment
if self.datastats.dataset.particles2spectra is None: #create default assignment
print('recreating particles2spectra from within edit particles...')
sortindices = self.parent.parent.dataset.ramanscansortindex
self.parent.parent.dataset.particles2spectra = [[int(np.where(sortindices == i)[0])] for i in range(len(sortindices))]
sortindices = self.datastats.dataset.ramanscansortindex
self.datastats.dataset.particles2spectra = [[int(np.where(sortindices == i)[0])] for i in range(len(sortindices))]
#Contour indices are the same as the original particlestats, which are contained in the dataset.
#We have to modify that and reload in the analysisview
#first, overwrite first index with new particlestats
self.parent.parent.dataset.particlestats[contourIndices[0]] = stats
self.datastats.dataset.particlestats[contourIndices[0]] = stats
#now, delete the rest...
self.parent.parent.dataset.particlestats = [i for ind, i in enumerate(self.parent.parent.dataset.particlestats) if ind not in contourIndices[1:]]
self.datastats.dataset.particlestats = [i for ind, i in enumerate(self.datastats.dataset.particlestats) if ind not in contourIndices[1:]]
#same with the contours
self.parent.parent.dataset.particlecontours[contourIndices[0]] = newContour
self.parent.parent.dataset.particlecontours = [i for ind, i in enumerate(self.parent.parent.dataset.particlecontours) if ind not in contourIndices[1:]]
self.datastats.dataset.particlecontours[contourIndices[0]] = newContour
self.datastats.dataset.particlecontours = [i for ind, i in enumerate(self.datastats.dataset.particlecontours) if ind not in contourIndices[1:]]
#update particle2spectra_list
#what is the current particle index??
specIndices = []
#other spectra indices:
for index in contourIndices:
specIndices.append(self.parent.particles2spectra[index])
specIndices.append(self.datastats.particles2spectra[index])
#flatten index list (in case, that a nested list was created...)
specIndices = list(np.concatenate(specIndices))
for i in specIndices:
self.parent.spectraResults[i] = new_assignment
self.parent.hqis[i] = 100 #avoid sorting them out again by hqi-filter...
self.datastats.spectraResults[i] = new_assignment
self.datastats.hqis[i] = 100 #avoid sorting them out again by hqi-filter...
print(f'spectrum {i} of particle{contourIndices[0]} is now {new_assignment}')
#modify particles2spectra..
self.parent.parent.dataset.particles2spectra[contourIndices[0]] = specIndices
self.datastats.dataset.particles2spectra[contourIndices[0]] = specIndices
for index in reversed(contourIndices[1:]):
print('removing index from particles2spectra:', index)
del self.parent.parent.dataset.particles2spectra[index]
del self.datastats.dataset.particles2spectra[index]
#save data
self.parent.saveAnalysisResults()
self.datastats.saveAnalysisResults()
#update contours in sampleview
self.parent.parent.contouritem.resetContours(self.parent.parent.dataset.particlecontours)
self.parent.parent.contouritem.resetContours(self.datastats.dataset.particlecontours)
self.parent.loadParticleData()
......@@ -150,13 +150,13 @@ class ParticleEditor(object):
self.createSafetyBackup()
print(f'reassigning indices {contourindices} into {new_assignment}')
for partIndex in contourindices:
for specIndex in self.parent.particles2spectra[partIndex]:
self.parent.currentPolymers[specIndex] = new_assignment
self.parent.spectraResults[specIndex] = new_assignment
self.parent.hqis[specIndex] = 100
for specIndex in self.datastats.particles2spectra[partIndex]:
self.datastats.currentPolymers[specIndex] = new_assignment
self.datastats.spectraResults[specIndex] = new_assignment
self.datastats.hqis[specIndex] = 100
#save data
self.parent.saveAnalysisResults()
self.datastats.saveAnalysisResults()
self.parent.loadParticleData()
......
This diff is collapsed.
......@@ -329,6 +329,3 @@ class DataSet(object):
return filename
# backupNameNotFound = False
if __name__ == '__main__':
dset = loadData(r'D:\Projekte\Mikroplastik\Microcatch_BALT\Sampling Kampagne 1\MCI_2\MCI_2_all_kleiner500\MCI_2_ds1+2_all_kleiner500_10_1\MCI_2_ds1+2_all_kleiner500_10_1.pkl')
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment