Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
S
Subsampling
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Incidents
Environments
Packages & Registries
Packages & Registries
Package Registry
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Josef Brandt
Subsampling
Commits
4250c8d1
Commit
4250c8d1
authored
Mar 30, 2020
by
Josef Brandt
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'ChemometricMethods' into ResultGeneration
parents
c509aa9c
6d4cec73
Changes
15
Hide whitespace changes
Inline
Side-by-side
Showing
15 changed files
with
711 additions
and
53 deletions
+711
-53
.gitignore
.gitignore
+8
-0
chemometricMethods.py
chemometricMethods.py
+220
-0
cythonModules/rotateContour.pyx
cythonModules/rotateContour.pyx
+32
-0
cythonModules/setup_cython.py
cythonModules/setup_cython.py
+18
-0
evaluation.py
evaluation.py
+17
-13
gui/filterView.py
gui/filterView.py
+18
-1
gui/mainView.py
gui/mainView.py
+16
-1
gui/measureModes.py
gui/measureModes.py
+4
-3
helpers.py
helpers.py
+23
-5
input_output.py
input_output.py
+1
-0
subsampling.py
subsampling.py
+43
-19
tests/test_chemometricMethods.py
tests/test_chemometricMethods.py
+214
-0
tests/test_cython.py
tests/test_cython.py
+61
-0
tests/test_evaluation.py
tests/test_evaluation.py
+16
-10
tests/test_methods.py
tests/test_methods.py
+20
-1
No files found.
.gitignore
View file @
4250c8d1
...
...
@@ -6,3 +6,11 @@ __pycache__/
*.png
*.res
cythonModules/build/
*.c
*.pyd
*.html
chemometricMethods.py
0 → 100644
View file @
4250c8d1
import
numpy
as
np
import
cv2
from
sklearn.preprocessing
import
StandardScaler
from
sklearn.decomposition
import
PCA
from
sklearn.cluster
import
DBSCAN
from
scipy
import
spatial
from
itertools
import
combinations
from
random
import
sample
import
time
import
sys
sys
.
path
.
append
(
"C://Users//xbrjos//Desktop//Python"
)
from
gepard.analysis.particleContainer
import
ParticleContainer
from
gepard.analysis
import
particleAndMeasurement
as
pm
from
methods
import
SubsamplingMethod
def
get_pca
(
data
:
np
.
ndarray
,
numComp
:
int
=
2
)
->
np
.
ndarray
:
try
:
standardizedData
=
StandardScaler
().
fit_transform
(
data
.
copy
())
except
ValueError
:
print
(
'first standardscaler attempt failed, retrying..'
)
print
(
'datashape'
,
data
.
shape
)
print
(
'unique:'
,
np
.
unique
(
data
))
raise
pca
=
PCA
(
n_components
=
numComp
)
princComp
:
np
.
ndarray
=
pca
.
fit_transform
(
np
.
transpose
(
standardizedData
))
return
princComp
def
do_DBSCAN_clustering
(
data
:
np
.
ndarray
,
eps
:
float
=
0.1
,
min_samples
:
int
=
10
)
->
tuple
:
"""
Does DBSCAN clustering and finds noisy data
:param data: The input array
:param eps:
:param min_samples:
:return: Cluster labels for each point in the dataset given to fit(). Noisy samples are given the label -1.
"""
assert
data
.
shape
[
1
]
==
2
standardizedData
=
StandardScaler
().
fit_transform
(
data
)
db
=
DBSCAN
(
eps
=
eps
,
min_samples
=
min_samples
).
fit
(
standardizedData
)
return
db
.
labels_
,
db
.
core_sample_indices_
def
get_n_points_closest_to_point
(
points
:
np
.
ndarray
,
n
:
int
,
refPoint
:
np
.
ndarray
)
->
list
:
"""
Returns a list with indices of n points that are closest to the indicated refPoint
:param points: np.ndarray, cols: x, y, rows: individual points
:param n: number of points to return
:param refPoint: np.array([x, y]) of reference point
:return: list of point indices
"""
distancesToPoints
:
np
.
ndarray
=
np
.
linalg
.
norm
(
points
-
refPoint
,
axis
=
1
)
sortedIndices
=
np
.
argsort
(
distancesToPoints
)
return
list
(
sortedIndices
[:
n
])
class
ChemometricSubsampling
(
SubsamplingMethod
):
def
__init__
(
self
,
particleContainer
:
ParticleContainer
,
desiredFraction
:
float
):
super
(
ChemometricSubsampling
,
self
).
__init__
(
particleContainer
,
desiredFraction
)
@
property
def
label
(
self
)
->
str
:
return
'Chemometric Selection'
def
apply_subsampling_method
(
self
)
->
list
:
vectors
:
np
.
ndarray
=
self
.
_get_particle_featurematrix
()
try
:
princComps
:
np
.
ndarray
=
get_pca
(
vectors
)
except
ValueError
:
print
(
'numParticles:'
,
len
(
self
.
particleContainer
.
particles
))
print
(
'input featurematrix shape'
,
vectors
.
shape
)
clusterLabels
,
coreIndices
=
do_DBSCAN_clustering
(
princComps
)
indices
:
list
=
self
.
_get_indices_from_clusterLabels
(
princComps
,
clusterLabels
,
coreIndices
)
selectedParticles
:
list
=
[]
for
particle
in
self
.
particleContainer
.
particles
:
if
particle
.
index
in
indices
:
selectedParticles
.
append
(
particle
)
return
selectedParticles
def
_get_particle_featurematrix
(
self
)
->
np
.
ndarray
:
"""
:return: np.ndarray, numRows: Features, numCols: Particles
"""
vectors
:
list
=
[]
for
particle
in
self
.
particleContainer
.
particles
:
extractor
:
FeatureExtractor
=
FeatureExtractor
(
particle
)
vectors
.
append
(
extractor
.
get_characteristic_vector
())
vectors
:
np
.
ndarray
=
np
.
transpose
(
np
.
array
(
vectors
))
assert
vectors
.
shape
==
(
11
,
len
(
self
.
particleContainer
.
particles
)),
f
'wrong featureMat-shape:
{
vectors
.
shape
}
'
return
vectors
def
equals
(
self
,
otherMethod
)
->
bool
:
equals
:
bool
=
False
if
type
(
otherMethod
)
==
ChemometricSubsampling
and
otherMethod
.
fraction
==
self
.
fraction
:
equals
=
True
return
equals
def
_get_indices_from_clusterLabels
(
self
,
points
:
np
.
ndarray
,
labels
:
np
.
ndarray
,
centerIndices
:
np
.
ndarray
)
->
list
:
indices
:
list
=
[]
allIndices
:
np
.
ndarray
=
np
.
arange
(
len
(
labels
))
numPointsPerCluster
:
dict
=
self
.
_get_numPoints_per_cluster
(
labels
)
for
clusterIndex
in
set
(
labels
):
indToAppend
:
list
=
[]
nPoints
:
int
=
int
(
numPointsPerCluster
[
clusterIndex
])
indicesInCluster
:
np
.
ndarray
=
allIndices
[
labels
==
clusterIndex
]
if
clusterIndex
==
-
1
:
for
ind
in
sample
(
list
(
indicesInCluster
),
nPoints
):
# assert ind not in indices
indices
.
append
(
ind
)
else
:
clusterPoints
:
np
.
ndarray
=
points
[
indicesInCluster
]
centerPoint
:
np
.
ndarray
=
np
.
mean
(
clusterPoints
,
axis
=
0
)
indicesToSelect
:
list
=
get_n_points_closest_to_point
(
clusterPoints
,
nPoints
,
centerPoint
)
for
ind
in
indicesToSelect
:
origInd
=
indicesInCluster
[
ind
]
indices
.
append
(
origInd
)
assert
len
(
set
(
indices
))
==
len
(
indices
),
f
'The calculated indices contain duplicates, '
\
f
'num duplicates:
{
len
(
indices
)
-
len
(
set
(
indices
))
}
'
return
indices
def
_get_numPoints_per_cluster
(
self
,
labels
:
np
.
ndarray
,
noiseAmpFactor
:
float
=
5
)
->
dict
:
"""
MP Particles are expected to be the minority of all particles. So, if datapoints were classified as noise
(i.e., label = -1), it is likely that MP is in there. The abundancy of points taken from the noise is multiplied
by the noiseAmpFactor
:param labels:
:param noiseAmpFactor:
:return: A dictionary with keys = cluster index (i.e., label) and value = number of points to take from that
"""
pointsPerCluster
:
dict
=
{}
if
type
(
labels
)
!=
np
.
ndarray
:
labels
=
np
.
array
(
labels
)
individualLabels
:
set
=
set
(
labels
)
numPointsToSelect
=
round
(
len
(
labels
)
*
self
.
fraction
)
if
numPointsToSelect
==
0
:
numPointsToSelect
=
1
numNoisePoints
=
len
(
labels
[
labels
==
-
1
])
numClusteredPoints
=
len
(
labels
)
-
numNoisePoints
# # get max noiseAmpFactor
if
noiseAmpFactor
>
1
/
self
.
fraction
:
noiseAmpFactor
=
1
/
self
.
fraction
numAmpPoints
=
numClusteredPoints
+
numNoisePoints
*
noiseAmpFactor
fractionPerCluster
=
np
.
clip
(
numPointsToSelect
/
numAmpPoints
,
0.0
,
1.0
)
tooFewPoints
=
numPointsToSelect
<
len
(
individualLabels
)
totalPointsAdded
=
0
for
ind
in
individualLabels
:
if
ind
>
-
1
:
if
not
tooFewPoints
:
pointsToAdd
=
round
(
fractionPerCluster
*
len
(
labels
[
labels
==
ind
]))
else
:
pointsToAdd
=
1
if
totalPointsAdded
<
numPointsToSelect
else
0
pointsPerCluster
[
ind
]
=
pointsToAdd
totalPointsAdded
+=
pointsToAdd
# fill up the rest with noisePoints
if
numNoisePoints
>
0
:
diff
:
float
=
np
.
clip
(
numPointsToSelect
-
totalPointsAdded
,
0
,
numNoisePoints
)
pointsPerCluster
[
-
1
]
=
diff
totalPointsAdded
+=
diff
# just in case too many points were selected (due to rounding errors), keep on deleting until it matches
while
totalPointsAdded
>
numPointsToSelect
:
indexWithHighestCount
=
None
maxCount
=
0
for
index
in
pointsPerCluster
.
values
():
if
pointsPerCluster
[
index
]
>
maxCount
:
maxCount
=
pointsPerCluster
[
index
]
indexWithHighestCount
=
index
pointsPerCluster
[
indexWithHighestCount
]
-=
1
totalPointsAdded
-=
1
if
not
abs
(
totalPointsAdded
-
numPointsToSelect
)
<=
1
:
print
(
'error'
)
# assert abs(totalPointsAdded - numPointsToSelect) <= 1
for
clusterIndex
in
pointsPerCluster
.
keys
():
assert
0
<=
pointsPerCluster
[
clusterIndex
]
<=
len
(
labels
[
labels
==
clusterIndex
])
return
pointsPerCluster
class
FeatureExtractor
(
object
):
def
__init__
(
self
,
particle
:
pm
.
Particle
):
super
(
FeatureExtractor
,
self
).
__init__
()
self
.
particle
:
pm
.
Particle
=
particle
def
get_characteristic_vector
(
self
)
->
np
.
ndarray
:
log_hu
:
np
.
ndarray
=
self
.
_get_log_hu_moments
()
color
:
np
.
ndarray
=
self
.
_get_color_hash
(
self
.
particle
.
color
,
desiredLength
=
4
)
vector
:
np
.
ndarray
=
np
.
hstack
((
log_hu
,
color
))
if
len
(
vector
)
!=
11
:
print
(
'error'
)
assert
len
(
vector
)
==
7
+
4
,
f
'wrong feature vector:
{
vector
}
with shape:
{
vector
.
shape
}
'
return
vector
def
_get_log_hu_moments
(
self
)
->
np
.
ndarray
:
moments
:
dict
=
cv2
.
moments
(
self
.
particle
.
contour
)
resultMoments
:
np
.
ndarray
=
np
.
zeros
((
7
,
1
))
for
index
,
mom
in
enumerate
(
cv2
.
HuMoments
(
moments
)):
if
mom
!=
0
:
resultMoments
[
index
]
=
-
1
*
np
.
copysign
(
1.0
,
mom
)
*
np
.
log10
(
abs
(
mom
))
else
:
resultMoments
[
index
]
=
0
return
resultMoments
[:,
0
]
def
_get_color_hash
(
self
,
color
:
str
,
desiredLength
:
int
=
4
)
->
np
.
ndarray
:
colorArray
:
list
=
[
int
(
i
)
for
i
in
str
(
abs
(
hash
(
color
)))[:
desiredLength
]]
return
np
.
transpose
(
np
.
array
(
colorArray
))
cythonModules/rotateContour.pyx
0 → 100644
View file @
4250c8d1
import
numpy
as
np
cimport
numpy
as
np
cimport
cython
DTYPE
=
np
.
float
ctypedef
np
.
float_t
DTYPE_t
ctypedef
np
.
int32_t
INT32_t
def
rotate_contour_around_point
(
np
.
ndarray
[
DTYPE_t
,
ndim
=
3
]
contour
,
np
.
ndarray
[
DTYPE_t
,
ndim
=
1
]
refPoint
,
np
.
float
angleDegree
):
# def rotate_contour_around_point(contour: np.ndarray, refPoint: tuple, angleDegree: float) -> np.ndarray:
"""
Rotates a point around another one...
:param contour: Array of points to be rotated, [:, 0, 0] = x, [:, 0, 1] = y
:param refPoint: The referemce point around which the first point is rotated, tuple of x and y
:param angleDegree: The angle in degree to rotate (counter-clockwise)
:return: Array of the rotated point, [:, 0, 0] = x, [:, 0, 1] = y
"""
cdef
int
i
cdef
double
theta
,
sin
,
cos
,
x
,
y
cdef
np
.
ndarray
[
DTYPE_t
,
ndim
=
3
]
newContour
theta
=
np
.
deg2rad
(
angleDegree
)
sin
=
np
.
sin
(
theta
)
cos
=
np
.
cos
(
theta
)
newContour
=
np
.
zeros_like
(
contour
,
dtype
=
np
.
float
)
for
i
in
range
(
contour
.
shape
[
0
]):
x
:
float
=
cos
*
(
contour
[
i
,
0
,
0
]
-
refPoint
[
0
])
-
sin
*
(
contour
[
i
,
0
,
1
]
-
refPoint
[
1
])
+
refPoint
[
0
]
y
:
float
=
sin
*
(
contour
[
i
,
0
,
0
]
-
refPoint
[
0
])
+
cos
*
(
contour
[
i
,
0
,
1
]
-
refPoint
[
1
])
+
refPoint
[
1
]
newContour
[
i
,
0
,
0
]
=
x
newContour
[
i
,
0
,
1
]
=
y
return
newContour
cythonModules/setup_cython.py
0 → 100644
View file @
4250c8d1
# try:
from
setuptools
import
setup
from
setuptools
import
Extension
from
Cython.Build
import
cythonize
import
numpy
as
np
import
sys
if
len
(
sys
.
argv
)
==
1
:
sys
.
argv
.
append
(
"build_ext"
)
sys
.
argv
.
append
(
"--inplace"
)
ext
=
Extension
(
"rotateContour"
,
[
"rotateContour.pyx"
],
extra_compile_args
=
[
'-O3'
],)
setup
(
name
=
"rotate contour around reference point"
,
ext_modules
=
cythonize
([
ext
],
annotate
=
True
),
# accepts a glob pattern
include_dirs
=
[
np
.
get_include
()]
)
\ No newline at end of file
evaluation.py
View file @
4250c8d1
...
...
@@ -14,6 +14,7 @@ import matplotlib.pyplot as plt
from
helpers
import
ParticleBinSorter
import
methods
as
meth
import
geometricMethods
as
gmeth
import
chemometricMethods
as
cmeth
sys
.
path
.
append
(
"C://Users//xbrjos//Desktop//Python"
)
from
gepard
import
dataset
...
...
@@ -23,9 +24,11 @@ def get_name_from_directory(dirPath: str) -> str:
class
TotalResults
(
object
):
methods
:
list
=
[
meth
.
RandomSampling
,
meth
.
SizeBinFractioning
,
gmeth
.
CrossBoxSubSampling
,
gmeth
.
SpiralBoxSubsampling
]
measuredFreactions
:
list
=
[
0.05
,
0.1
,
0.15
,
0.2
,
0.3
,
0.5
,
0.9
]
# methods: list = [meth.RandomSampling, meth.SizeBinFractioning, gmeth.CrossBoxSubSampling,
# gmeth.SpiralBoxSubsampling, cmeth.ChemometricSubsampling]
measuredFractions
:
list
=
[
0.01
,
0.05
,
0.1
,
0.15
,
0.2
,
0.5
,
0.75
,
0.9
]
# measuredFractions: list = [0.1, 0.15, 0.2, 0.5, 0.75, 0.9]
# measuredFractions: list = [0.1, 0.3, 0.5, 0.9]
def
__init__
(
self
):
super
(
TotalResults
,
self
).
__init__
()
...
...
@@ -47,18 +50,19 @@ class TotalResults(object):
return
newResult
def
update_all
(
self
)
->
None
:
def
update_all
(
self
,
force
:
bool
=
False
)
->
None
:
"""
Updates all samples with all methods and all fractions
:param force: Wether to force an update of an already existing method.
:return:
"""
for
index
,
sample
in
enumerate
(
self
.
sampleResults
):
sample
.
load_dataset
()
for
fraction
in
self
.
measuredFr
e
actions
:
for
fraction
in
self
.
measuredFractions
:
possibleMethods
=
self
.
_get_methods_for_fraction
(
sample
.
dataset
,
fraction
)
for
curMethod
in
possibleMethods
:
#
print(f'updating {sample.sampleName} with {curMethod.label} at fraction {fraction}')
sample
.
update_result_with_method
(
curMethod
)
print
(
f
'updating
{
sample
.
sampleName
}
with
{
curMethod
.
label
}
at fraction
{
fraction
}
'
)
sample
.
update_result_with_method
(
curMethod
,
force
=
force
)
print
(
f
'processed
{
index
+
1
}
of
{
len
(
self
.
sampleResults
)
}
samples'
)
def
get_error_vs_fraction_data
(
self
,
attributes
:
list
=
[],
methods
:
list
=
[])
->
dict
:
...
...
@@ -102,10 +106,11 @@ class TotalResults(object):
particleContainer
=
dataset
.
particleContainer
methods
:
list
=
[
meth
.
RandomSampling
(
particleContainer
,
fraction
),
meth
.
SizeBinFractioning
(
particleContainer
,
fraction
)]
boxCreator
:
gmeth
.
BoxSelectionCreator
=
gmeth
.
BoxSelectionCreator
(
dataset
)
methods
+=
boxCreator
.
get_crossBoxSubsamplers_for_fraction
(
fraction
)
methods
+=
boxCreator
.
get_spiralBoxSubsamplers_for_fraction
(
fraction
)
methods
.
append
(
cmeth
.
ChemometricSubsampling
(
particleContainer
,
fraction
))
# methods = [cmeth.ChemometricSubsampling(particleContainer, fraction)]
return
methods
...
...
@@ -156,7 +161,6 @@ class SampleResult(object):
"""
if
not
self
.
has_attribute
(
newAttribute
):
self
.
attributes
.
append
(
newAttribute
)
print
(
f
'sample
{
self
.
filepath
}
has now attribute
{
newAttribute
}
'
)
def
has_any_attribute
(
self
,
listOfAttributes
:
list
)
->
bool
:
hasAttr
:
bool
=
False
...
...
@@ -206,6 +210,8 @@ class SubsamplingResult(object):
self
.
subSampledParticleCount
:
int
=
None
self
.
mpCountError
:
float
=
None
self
.
mpCountErrorPerBin
:
tuple
=
None
# TODO: UPDATE PATTERNS -> ARE THESE REASONABLE???
self
.
mpPatterns
=
[
'poly'
,
'rubber'
,
'pb'
,
'pr'
,
'pg'
,
'py'
,
'pv'
]
def
update
(
self
)
->
None
:
"""
...
...
@@ -248,15 +254,13 @@ class SubsamplingResult(object):
def
_get_error_from_values
(
self
,
exact
:
float
,
estimate
:
float
)
->
float
:
assert
(
exact
!=
0
)
return
abs
(
exact
-
estimate
)
/
exact
return
abs
(
exact
-
estimate
)
/
exact
*
100
def
_get_number_of_MP_particles
(
self
,
particleList
:
list
)
->
int
:
mpPatterns
=
[
'poly'
,
'rubber'
,
'pb'
,
'pr'
,
'pg'
,
'py'
,
'pv'
]
numMPParticles
=
0
for
particle
in
particleList
:
assignment
=
particle
.
getParticleAssignment
()
for
pattern
in
mpPatterns
:
for
pattern
in
self
.
mpPatterns
:
if
assignment
.
lower
().
find
(
pattern
)
!=
-
1
:
numMPParticles
+=
1
break
...
...
gui/filterView.py
View file @
4250c8d1
from
PyQt5
import
QtGui
,
QtWidgets
,
QtCore
import
numpy
as
np
import
sys
sys
.
path
.
append
(
"C://Users//xbrjos//Desktop//Python"
)
import
gepard
from
gepard
import
dataset
import
helpers
import
numpy
as
np
from
cythonModules
import
rotateContour
as
rc
class
FilterView
(
QtWidgets
.
QGraphicsView
):
...
...
@@ -14,6 +15,7 @@ class FilterView(QtWidgets.QGraphicsView):
self
.
setWindowTitle
(
'FilterView'
)
self
.
dataset
:
dataset
.
DataSet
=
None
self
.
rotation
:
int
=
0
scene
=
QtWidgets
.
QGraphicsScene
(
self
)
scene
.
setItemIndexMethod
(
QtWidgets
.
QGraphicsScene
.
NoIndex
)
...
...
@@ -47,6 +49,21 @@ class FilterView(QtWidgets.QGraphicsView):
self
.
_update_particle_contours
()
self
.
_fit_to_window
()
@
helpers
.
timingDecorator
def
update_rotation
(
self
,
newRotation
:
int
)
->
None
:
if
newRotation
!=
self
.
rotation
:
angle
:
float
=
np
.
float
(
newRotation
-
self
.
rotation
)
center
:
np
.
ndarray
=
np
.
array
([
self
.
filter
.
circleOffset
[
0
]
+
self
.
filter
.
diameter
/
2
,
self
.
filter
.
circleOffset
[
1
]
+
self
.
filter
.
diameter
/
2
],
dtype
=
np
.
float
)
for
particle
in
self
.
dataset
.
particleContainer
.
particles
:
contour
:
np
.
ndarray
=
particle
.
contour
.
astype
(
np
.
float
)
particle
.
contour
=
rc
.
rotate_contour_around_point
(
contour
,
center
,
angle
)
self
.
_update_particle_contours
()
self
.
rotation
=
newRotation
@
helpers
.
timingDecorator
def
_update_particle_contours
(
self
)
->
None
:
self
.
_remove_particle_contours
()
if
self
.
dataset
is
not
None
:
...
...
gui/mainView.py
View file @
4250c8d1
...
...
@@ -25,14 +25,24 @@ class MainView(QtWidgets.QWidget):
loadDsetBtn
=
QtWidgets
.
QPushButton
(
'Load Dataset'
)
loadDsetBtn
.
released
.
connect
(
self
.
_load_dataset
)
self
.
rotationSpinBox
=
QtWidgets
.
QSpinBox
()
self
.
rotationSpinBox
.
setMinimum
(
0
)
self
.
rotationSpinBox
.
setMaximum
(
359
)
self
.
rotationSpinBox
.
setValue
(
0
)
self
.
rotationSpinBox
.
setMaximumWidth
(
50
)
self
.
rotationSpinBox
.
valueChanged
.
connect
(
self
.
_update_fiter_rotation
)
self
.
controlGroup
=
QtWidgets
.
QGroupBox
()
self
.
controlGroupLayout
=
QtWidgets
.
QHBoxLayout
()
self
.
controlGroup
.
setLayout
(
self
.
controlGroupLayout
)
self
.
controlGroupLayout
.
addWidget
(
loadDsetBtn
)
self
.
controlGroupLayout
.
addWidget
(
QtWidgets
.
QLabel
(
'Filter Rotation'
))
self
.
controlGroupLayout
.
addWidget
(
self
.
rotationSpinBox
)
self
.
controlGroupLayout
.
addWidget
(
QtWidgets
.
QLabel
(
'Select Subsampling Mode:'
))
self
.
controlGroupLayout
.
addWidget
(
self
.
modeSelector
)
self
.
controlGroupLayout
.
addWidget
(
self
.
activeModeControl
)
self
.
controlGroupLayout
.
addStretch
()
self
.
layout
.
addWidget
(
self
.
controlGroup
)
self
.
filterView
=
FilterView
()
...
...
@@ -62,10 +72,11 @@ class MainView(QtWidgets.QWidget):
self
.
activeMode
=
requestedMode
self
.
activeModeControl
=
self
.
activeMode
.
get_control_groupBox
()
self
.
controlGroupLayout
.
insertWidget
(
2
,
self
.
activeModeControl
)
self
.
controlGroupLayout
.
insertWidget
(
3
,
self
.
activeModeControl
)
self
.
activeMode
.
update_measure_viewItems
()
@
helpers
.
timingDecorator
def
_load_dataset
(
self
)
->
None
:
fname
=
QtWidgets
.
QFileDialog
.
getOpenFileName
(
self
,
'Select .pkl file'
,
filter
=
'pkl file (*.pkl)'
)
if
fname
[
0
]
!=
''
:
...
...
@@ -91,6 +102,10 @@ class MainView(QtWidgets.QWidget):
self
.
filterView
.
update_from_dataset
(
dset
)
self
.
activeMode
.
update_measure_viewItems
()
def
_update_fiter_rotation
(
self
):
self
.
filterView
.
update_rotation
(
self
.
rotationSpinBox
.
value
())
self
.
activeMode
.
send_measuredParticles_to_filterview
()
if
__name__
==
'__main__'
:
import
sys
...
...
gui/measureModes.py
View file @
4250c8d1
...
...
@@ -9,6 +9,7 @@ class MeasureMode(QtCore.QObject):
self
.
filterView
:
FilterView
=
relatedFilterView
self
.
uiControls
:
QtWidgets
.
QGroupBox
=
QtWidgets
.
QGroupBox
()
self
.
boxGenerator
:
BoxSelectionSubsamplingMethod
=
None
self
.
subParticles
:
list
=
[]
def
get_control_groupBox
(
self
)
->
QtWidgets
.
QGroupBox
:
return
self
.
uiControls
...
...
@@ -16,7 +17,7 @@ class MeasureMode(QtCore.QObject):
def
update_measure_viewItems
(
self
)
->
None
:
raise
NotImplementedError
def
_
send_measuredParticles_to_filterview
(
self
)
->
None
:
def
send_measuredParticles_to_filterview
(
self
)
->
None
:
if
self
.
boxGenerator
.
particleContainer
is
not
None
:
subParticles
=
self
.
boxGenerator
.
apply_subsampling_method
()
self
.
filterView
.
update_measured_particles
(
subParticles
)
...
...
@@ -44,7 +45,7 @@ class CrossBoxMode(MeasureMode):
topLefts
:
list
=
self
.
boxGenerator
.
get_topLeft_of_boxes
()
boxSize
=
self
.
boxGenerator
.
boxSize
self
.
filterView
.
update_measure_boxes
(
topLefts
,
boxSize
)
self
.
_
send_measuredParticles_to_filterview
()
self
.
send_measuredParticles_to_filterview
()
class
CrossBoxesControls
(
QtWidgets
.
QGroupBox
):
...
...
@@ -103,7 +104,7 @@ class SpiralBoxMode(MeasureMode):
topLefts
:
list
=
self
.
boxGenerator
.
get_topLeft_of_boxes
()
boxSize
=
self
.
boxGenerator
.
boxSize
self
.
filterView
.
update_measure_boxes
(
topLefts
,
boxSize
)
self
.
_
send_measuredParticles_to_filterview
()
self
.
send_measuredParticles_to_filterview
()
class
SpiralBoxControls
(
QtWidgets
.
QGroupBox
):
...
...
helpers.py
View file @
4250c8d1
...
...
@@ -3,24 +3,42 @@ import numpy as np
import
sys
sys
.
path
.
append
(
"C://Users//xbrjos//Desktop//Python"
)
from
gepard
import
dataset
import
time
def
timingDecorator
(
callingFunction
):
"""
A wrapper function for timing the duration of the given function.
:param callingFunction:
:return: wrapped function
"""
def
wrapper
(
*
args
,
**
kwargs
):
t0
=
time
.
time
()
ret
=
callingFunction
(
*
args
,
**
kwargs
)
print
(
f
'
{
callingFunction
.
__name__
}
took
{
np
.
round
(
time
.
time
()
-
t0
,
2
)
}
seconds'
)
return
ret
return
wrapper
class
ParticleBinSorter
(
object
):
def
__init__
(
self
):
super
(
ParticleBinSorter
,
self
).
__init__
()
self
.
bins
=
[
5
,
10
,
20
,
50
,
100
,
200
,
500
]
def
sort_particles_into_bins
(
self
,
particleList
):
particlesInBins
=
self
.
_get_empty_bins
()
for
particle
in
particleList
:
binIndex
=
self
.
_get_binIndex_of_particle
(
particle
)
particlesInBins
[
binIndex
].
append
(
particle
)
return
particlesInBins
def
_get_empty_bins
(
self
):
return
[[]
for
_
in
range
(
len
(
self
.
bins
)
+
1
)]
def
_get_binIndex_of_particle
(
self
,
particle
):
size
=
particle
.
getParticleSize
()
binIndex
=
0
...
...
@@ -114,7 +132,7 @@ def get_polygon_area(polygon: QtGui.QPolygonF) -> float:
return
area
def
get_filterDimensions_from_dataset
(
dataset
)
->
tuple
:
def
get_filterDimensions_from_dataset
(
dataset
:
dataset
.
DataSet
)
->
tuple
:
"""
Processes the datasets boundary items to calculate diameter and offset (coord system offset of circular filter
with respect to actual dataset). This is used to set circular filter dimensions to use in the geometric
...
...
input_output.py
View file @
4250c8d1
...
...
@@ -4,6 +4,7 @@ from evaluation import TotalResults
def
load_results
(
fname
:
str
)
->
TotalResults
:
# TODO: REMVOE DATASET FROM SAMPLERESULTS, OTHERWISE THE FILESIZE IS GOING TO BE HUGE
res
:
TotalResults
=
None
if
os
.
path
.
exists
(
fname
):
with
open
(
fname
,
"rb"
)
as
fp
:
...
...
subsampling.py
View file @
4250c8d1
...
...
@@ -10,33 +10,57 @@ SET GEPARD TO EVALUATION BRANCH (WITHOUT THE TILING STUFF), OTHERWISE SOME OF TH
"""
# results: TotalResults = TotalResults()
# pklsInFolders = get_pkls_from_directory(r'C:\Users\xbrjos\Desktop\temp MP\NewDatasets')
#
# for folder in pklsInFolders.keys():
# for samplePath in pklsInFolders[folder]:
# newSampleResult: SampleResult = results.add_sample(samplePath)
# for attr in get_attributes_from_foldername(folder):
# newSampleResult.set_attribute(attr)
#
# t0 = time.time()
# results.update_all()
# print('updating all took', time.time()-t0, 'seconds')
#
results
:
TotalResults
=
TotalResults
()
pklsInFolders
=
get_pkls_from_directory
(
r
'C:\Users\xbrjos\Desktop\temp MP\NewDatasets'
)
for
folder
in
pklsInFolders
.
keys
():
for
samplePath
in
pklsInFolders
[
folder
]:
newSampleResult
:
SampleResult
=
results
.
add_sample
(
samplePath
)
for
attr
in
get_attributes_from_foldername
(
folder
):
newSampleResult
.
set_attribute
(
attr
)
t0
=
time
.
time
()
results
.
update_all
()
print
(
'updating all took'
,
time
.
time
()
-
t0
,
'seconds'
)
save_results
(
'results1.res'
,
results
)
# results: TotalResults = load_results('results1.res')
# results.update_all(force=True)
# save_results('results1.res', results)
results
:
TotalResults
=
load_results
(
'results1.res'
)
errorPerFraction
:
dict
=
results
.
get_error_vs_fraction_data
(
methods
=
[
'spiral'
,
'cross'
])
plt
.
clf
()
errorPerFraction
:
dict
=
results
.
get_error_vs_fraction_data
(
attributes
=
[
'air'
,
'water'
],
methods