Skip to content

Commit

Permalink
Merge branch 'bpurinton:master' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
bpurinton authored Aug 17, 2021
2 parents 4a59dda + 92557ff commit a355d22
Show file tree
Hide file tree
Showing 14 changed files with 1,158 additions and 71 deletions.
37 changes: 25 additions & 12 deletions PebbleCounts.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,9 @@
parser.add_argument("-bilat_filt_szs", nargs='+', type=int,
help="Size of bilateral filtering windows for the different scales. DEFAULT=[9, 5, 3]", default=[9, 5, 3])
parser.add_argument("-tophat_th", type=float,
help="Top percentile threshold to take from tophat filter for edge detection. DEFAULT=0.9", default=0.9)
help="Top percentile threshold to take from tophat filter for edge detection. DEFAULT=90", default=90)
parser.add_argument("-sobel_th", type=float,
help="Top percentile threshold to take from sobel filter for edge detection. DEFAULT=0.9", default=0.9)
help="Top percentile threshold to take from sobel filter for edge detection. DEFAULT=90", default=90)
parser.add_argument("-canny_sig", type=int,
help="Canny filtering sigma value for edge detection. DEFAULT=2", default=2)
parser.add_argument("-resize", type=float,
Expand Down Expand Up @@ -241,7 +241,8 @@
otsu_thresholding.percent_of_otsu(otsu_th)
otsu_thresholding.apply_threshold(gray, bgr, otsu_th, resize)
if otsu_thresholding.thresh != None:
ignore_mask = gray > otsu_th*(otsu_thresholding.thresh/100)
otsu_threshold = otsu_thresholding.thresh
ignore_mask = gray > otsu_th*(otsu_threshold/100)
break

# do color masking of sand
Expand Down Expand Up @@ -421,7 +422,7 @@
# tophat edges
print("Black tophat edge detection")
tophat = morph.black_tophat(GRAY, selem=morph.selem.disk(1))
tophat = tophat < np.percentile(tophat, tophat_th*100)
tophat = tophat < np.percentile(tophat, tophat_th)
tophat = morph.remove_small_holes(tophat, area_threshold=5, connectivity=2)
if not np.sum(tophat) == 0:
foo = func.featAND_fast(MASK, tophat)
Expand All @@ -435,7 +436,7 @@
# sobel edges
print("Sobel edge detection")
sobel = filt.sobel(GRAY)
sobel = sobel < np.percentile(sobel, sobel_th*100)
sobel = sobel < np.percentile(sobel, sobel_th)
sobel = morph.remove_small_holes(sobel, area_threshold=5, connectivity=2)
sobel = morph.thin(np.invert(sobel))
sobel = np.invert(sobel)
Expand Down Expand Up @@ -666,15 +667,27 @@
# output the measured grains as a csv
with open(csv_out, "w") as csv_file:
writer=csv.writer(csv_file, delimiter=",",lineterminator="\n",)
writer.writerow(["PebbleCounts Parameters"])
writer.writerow(["otsu_threshold", "maxGS", "cutoff", "min_sz_factors",
"win_sz_factors", "improvement_ths", "coordinate_scales",
"overlaps", "first_nl_denoise", "nl_means_chroma_filts",
"bilat_filt_szs", "tophat_th", "sobel_th", "canny_sig"])
writer.writerow([otsu_threshold, maxGS, cutoff, min_sz_factors, win_sz_factors,
improvement_ths, coordinate_scales, overlaps, first_nl_denoise,
nl_means_chroma_hs, bilat_filt_szs, tophat_th, sobel_th, canny_sig])
writer.writerow([])
writer.writerow(["Image Details"])
writer.writerow(["perc. not meas.", "perc. background color"])
writer.writerow([perc_nongrain*100, perc_sand*100])
writer.writerow([])
writer.writerow(["Pebble Details"])
if ortho:
writer.writerow(["perc. not meas.", "perc. background color",
"UTM X (m)", "UTM Y (m)", "a (px)", "b (px)",
writer.writerow(["UTM X (m)", "UTM Y (m)", "a (px)", "b (px)",
"a (m)", "b (m)", "area (px)", "area (m2)",
"orientation", "ellipse area (px)", "perc. diff. area"])
if not ortho:
writer.writerow(["perc. not meas.", "perc. background color",
"a (px)", "b (px)", "a (m)", "b (m)",
"area (px)", "area (m2)",
writer.writerow(["a (px)", "b (px)",
"a (m)", "b (m)", "area (px)", "area (m2)",
"orientation", "ellipse area (px)", "perc. diff. area"])

for grain in grains:
Expand All @@ -688,12 +701,12 @@
if ortho:
x_coord = xgrid[np.round(y0).astype(int), np.round(x0).astype(int)]
y_coord = ygrid[np.round(y0).astype(int), np.round(x0).astype(int)]
writer.writerow([perc_nongrain, perc_sand, x_coord, y_coord, a, b,
writer.writerow([x_coord, y_coord, a, b,
a*step, b*step, area, area*step**2, orientation,
ellipseArea, perc_diff_area])

if not ortho:
writer.writerow([perc_nongrain, perc_sand, a, b,
writer.writerow([a, b,
a*step, b*step, area, area*step**2, orientation,
ellipseArea, perc_diff_area])

Expand Down
37 changes: 25 additions & 12 deletions PebbleCountsAuto.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,9 @@
parser.add_argument("-first_nl_denoise", type=int,
help="Initial denoising non-local means chromaticity filtering strength. DEFAULT=5", default=5)
parser.add_argument("-tophat_th", type=float,
help="Top percentile threshold to take from tophat filter for edge detection. DEFAULT=0.9", default=0.9)
help="Top percentile threshold to take from tophat filter for edge detection. DEFAULT=90", default=90)
parser.add_argument("-sobel_th", type=float,
help="Top percentile threshold to take from sobel filter for edge detection. DEFAULT=0.9", default=0.9)
help="Top percentile threshold to take from sobel filter for edge detection. DEFAULT=90", default=90)
parser.add_argument("-canny_sig", type=int,
help="Canny filtering sigma value for edge detection. DEFAULT=2", default=2)
parser.add_argument("-resize", type=float,
Expand Down Expand Up @@ -203,7 +203,8 @@
otsu_thresholding.percent_of_otsu(otsu_th)
otsu_thresholding.apply_threshold(gray, bgr, otsu_th, resize)
if otsu_thresholding.thresh != None:
ignore_mask = gray > otsu_th*(otsu_thresholding.thresh/100)
otsu_threshold = otsu_thresholding.thresh
ignore_mask = gray > otsu_th*(otsu_threshold/100)
break

# do color masking of sand
Expand Down Expand Up @@ -335,7 +336,7 @@
# tophat edges
print("Black tophat edge detection")
tophat = morph.black_tophat(gray, selem=morph.selem.disk(1))
tophat = tophat < np.percentile(tophat, tophat_th*100)
tophat = tophat < np.percentile(tophat, tophat_th)
tophat = morph.remove_small_holes(tophat, area_threshold=5, connectivity=2)
if not np.sum(tophat) == 0:
foo = func.featAND_fast(ignore_mask, tophat)
Expand All @@ -349,7 +350,7 @@
# sobel edges
print("Sobel edge detection")
sobel = filt.sobel(gray)
sobel = sobel < np.percentile(sobel, sobel_th*100)
sobel = sobel < np.percentile(sobel, sobel_th)
sobel = morph.remove_small_holes(sobel, area_threshold=5, connectivity=2)
sobel = morph.thin(np.invert(sobel))
sobel = np.invert(sobel)
Expand Down Expand Up @@ -484,16 +485,28 @@
# output the measured grains as a csv
with open(csv_out, "w") as csv_file:
writer=csv.writer(csv_file, delimiter=",",lineterminator="\n",)
writer.writerow(["PebbleCounts Parameters"])
writer.writerow(["otsu_threshold", "cutoff", "percent_overlap", "misfit_threshold",
"min_size_threshold", "first_nl_denoise",
"tophat_th", "sobel_th", "canny_sig"])
writer.writerow([otsu_threshold, cutoff, perc_overlap, misfit_threshold,
min_size, first_nl_denoise,
tophat_th, sobel_th, canny_sig])
writer.writerow([])
writer.writerow(["Image Details"])
writer.writerow(["perc. not meas.", "perc. background color"])
writer.writerow([perc_nongrain*100, perc_sand*100])
writer.writerow([])
writer.writerow(["Pebble Details"])
if ortho:
writer.writerow(["perc. not meas.", "perc. background color",
"UTM X (m)", "UTM Y (m)", "a (px)", "b (px)",
writer.writerow(["UTM X (m)", "UTM Y (m)", "a (px)", "b (px)",
"a (m)", "b (m)", "area (px)", "area (m2)",
"orientation", "ellipse area (px)", "perc. diff. area"])
if not ortho:
writer.writerow(["perc. not meas.", "perc. background color",
"a (px)", "b (px)", "a (m)", "b (m)",
"area (px)", "area (m2)",
writer.writerow(["a (px)", "b (px)",
"a (m)", "b (m)", "area (px)", "area (m2)",
"orientation", "ellipse area (px)", "perc. diff. area"])

for grain in grains:
y0, x0 = grain[0], grain[1]
a, b = grain[3], grain[2]
Expand All @@ -505,12 +518,12 @@
if ortho:
x_coord = xgrid[np.round(y0).astype(int), np.round(x0).astype(int)]
y_coord = ygrid[np.round(y0).astype(int), np.round(x0).astype(int)]
writer.writerow([perc_nongrain, perc_sand, x_coord, y_coord, a, b,
writer.writerow([x_coord, y_coord, a, b,
a*step, b*step, area, area*step**2, orientation,
ellipseArea, perc_diff_area])

if not ortho:
writer.writerow([perc_nongrain, perc_sand, a, b,
writer.writerow([a, b,
a*step, b*step, area, area*step**2, orientation,
ellipseArea, perc_diff_area])

Expand Down
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,10 @@ PebbleCounts is a free (released under GNU General Public License v3.0) and open
Georeferenced ortho-photos should be in a [**UTM projection**](https://en.wikipedia.org/wiki/Universal_Transverse_Mercator_coordinate_system), providing the scale in meters. You can use the [gdal](https://www.gdal.org/) command line utilities to [translate rasters between various projections](https://www.nceas.ucsb.edu/scicomp/recipes/gdal-reproject). Because PebbleCounts doesn't allow you to save work in the middle of clicking it's recommended that you don't use images covering areas of more than 2 by 2 meters or so. Furthermore, the algorithm is most effective on images of 0.8-1.2 mm/pixel resolution, where a lower cutoff of 20-pixels is appropriate. Resampling can also be accomplished quickly in [gdal](https://www.gdal.org/). For higher resolution (< 0.8 mm/pixel) imagery it's recommended not to go above 1 by 1 meter areas, particularly if there are many < 1 cm pebbles. If you want to cover a larger area simply break the image into smaller parts and process each individually, so you can give yourself a break. If at anytime you want to end the application simply press *CTRL + C*.

## The PebbleCountsAuto Function
In addition to the manual-clicking version of PebbleCounts based on k-means segmentation, we have also developed and included an automated version that has higher uncertainties. We recommend using PebbleCounts in a subset of data to validate larger areas run in PebbleCountsAuto. The description of the automatic algorithm and uncertainties can be found in the publication (**PUBLICATION DOI TO BE ADDED**).
In addition to the manual-clicking version of PebbleCounts based on k-means segmentation, we have also developed and included an automated version that has higher uncertainties. We recommend using PebbleCounts in a subset of data to validate larger areas run in PebbleCountsAuto. The description of the automatic algorithm and uncertainties can be found in the publication: [https://doi.org/10.5194/esurf-7-859-2019](https://doi.org/10.5194/esurf-7-859-2019). Validation steps using both methods are shown in detail in another publication: [https://doi.org/10.1029/2021JF006260](https://doi.org/10.1029/2021JF006260).

# Installation
The first step is downloading the GitHub repository somewhere on your computer, and unzipping it. There you will find the Python algorithms (e.g., `PebbleCounts.py`), an `environment.yml` file containing the Python dependencies for quick installs with `conda` on Windows, a folder `example_data` with two example images one orthorectified and the other raw, and a folder `docs` containing the [full manual](docs/PebbleCounts_Manual.pdf).
The first step is downloading the GitHub repository somewhere on your computer, and unzipping it. There you will find the Python algorithms (e.g., `PebbleCounts.py`), a folder `example_data` with two example images one orthorectified and the other raw, a jupyter notebook `Read_and_Plot_PebbleCounts_CSV.ipynb` with a tutorial on how to manipulate output grain-size distribution .csv files, and a folder `docs` containing the [full manual](docs/PebbleCounts_Manual.pdf).

For newcomers to Python, no worries! Installation should be a cinch on most machines. First, you'll want the [Miniconda](https://conda.io/miniconda.html) Python package manager to setup a new Python environment for running the algorithm ([see this good article on Python package management](https://medium.freecodecamp.org/why-you-need-python-environments-and-how-to-manage-them-with-conda-85f155f4353c)). Download either the 32- or 64-bit Miniconda installer of Python 3.x then follow the instructions (either using the `.exe` file for Windows, `.pkg` for Mac, or `bash installer` for Linux). Add Miniconda to the system `PATH` variable when prompted.

Expand Down
Loading

0 comments on commit a355d22

Please sign in to comment.