-
Notifications
You must be signed in to change notification settings - Fork 0
/
fragmentation_analysis.py
375 lines (297 loc) · 13.3 KB
/
fragmentation_analysis.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
import cv2 as cv
import numpy as np
from PIL import Image
import base64
import io
import argparse
# import draw_contours
from scipy import ndimage
from skimage import measure
import seaborn as sns
import pandas as pd
# important constants
PROTXT_FILE = "dockerized_flask_server\\web\\req_files\\deploy.prototxt"
CAFFEMODEL_FILE = "dockerized_flask_server\\web\\req_files\\hed_pretrained_bsds.caffemodel"
OUTPUT_FRAG_IMG = "demo_out.jpg"
OUTPUT_FRAG_PIE_CHART = "pie_fig.jpg"
OUTPUT_FRAG_GRAPH = "out_gf.jpg"
OUTPUT_FRAG_RESULTS_CSV = "fragmentation_result.csv"
OUTPUT_FRAG_RESULTS_CV_CSV = "fragmentation_result_cv.csv"
OUTPUT_DRAW_CONTOURS = "contour_draw.jpg"
# Take in base64 string and return cv image
def base64_to_image(base64_string):
"""Convert base64 string to image
Args:
base64_string (string): base64 format of an image
Returns:
numpy.ndarray: converted image
"""
imgdata = base64.b64decode(str(base64_string))
image = Image.open(io.BytesIO(imgdata))
return image
def image_to_base64(image_path):
"""Take an image path and convert that image to base64 string
Args:
image_path (numpy.ndarray): Path of the image that needs to be converted into base64
Returns:
base64_string: base64 format of an image
"""
with open(image_path, "rb") as image_file:
data = base64.b64encode(image_file.read())
return data.decode("utf-8")
def image_to_base64_without_path(image):
"""Take a numpy array (CV Image) and convert it into base64 string
Args:
image (str): Image that needs to be converted into base64
Returns:
base64_string: base64 format of an image
"""
data = base64.b64encode(image)
return data.decode("utf-8")
class CropLayer(object):
"""Holistically Nested Edge Detection Class"""
def __init__(self, params, blobs):
self.xstart = 0
self.xend = 0
self.ystart = 0
self.yend = 0
def getMemoryShapes(self, inputs):
"""Our layer receives two inputs. We need to crop the first input blob
to match a shape of the second one (keeping batch size and number of channels)
"""
inputShape, targetShape = inputs[0], inputs[1]
batchSize, numChannels = inputShape[0], inputShape[1]
height, width = targetShape[2], targetShape[3]
self.ystart = int((inputShape[2] - targetShape[2]) / 2)
self.xstart = int((inputShape[3] - targetShape[3]) / 2)
self.yend = self.ystart + height
self.xend = self.xstart + width
return [[batchSize, numChannels, height, width]]
def forward(self, inputs):
"""Simple feedforward function"""
return [inputs[0][:,:,self.ystart:self.yend,self.xstart:self.xend]]
def processing(img_name, low_intensity=100, high_intensity=200, external_contours=None):
"""Main Function to perform fragmentation. It performs so by first finding the edges of rocks in a picture.
A Holistically Nested Edge Detection Algoriithm (HED) is used for finding edges with better accuracy.
After that image is preprocessed and thresholded to find contours in the image where contours are rock boundaries.
After that area of contours / rocks are found. The area can be further visualized if needed with a matplotlib plot
Args:
img_name (str): image path
low_intensity (int, optional): Low intensity of thershold. Defaults to 100.
high_intensity (int, optional): High intensity of thershold. Defaults to 200.
external_contours (_type_, optional): A flag that if enabled lets you draw external contours. Defaults to None.
Returns:
output_list (list): output image name, rock area data and black and white version of image
"""
# Load the model and protxt file.
net = cv.dnn.readNetFromCaffe(PROTXT_FILE, CAFFEMODEL_FILE)
cv.dnn_registerLayer('Crop', CropLayer)
# Read Image
image=cv.imread(img_name)
# Pre-process image
inp = cv.dnn.blobFromImage(image, scalefactor=1.0, size=(image.shape[0],image.shape[1]),
mean=(104.00698793, 116.66876762, 122.67891434),
swapRB=False, crop=False)
# Start Forward pass and perform HED
net.setInput(inp)
out = net.forward()
# Post-process image
out = out[0, 0]
out = cv.resize(out, (image.shape[1], image.shape[0]))
print("Size of Image is: " + str(out.shape))
out=cv.cvtColor(out,cv.COLOR_GRAY2BGR)
out = 255 * out
out = out.astype(np.uint8)
# Converting image to binary using low and high threshold
out[np.where(out>high_intensity)] = 255
out[np.where(out<low_intensity)] = 0
########################## Find and Draw Contours ###################################
# Error can happen due to different cv2 version
try:
_, cnts,_ = cv.findContours(cv.cvtColor(out, cv.COLOR_BGR2GRAY), mode=cv.RETR_TREE, method=cv.CHAIN_APPROX_SIMPLE)
except:
cnts,_ = cv.findContours(cv.cvtColor(out, cv.COLOR_BGR2GRAY), mode=cv.RETR_TREE, method=cv.CHAIN_APPROX_SIMPLE)
# Draw external contours if it exists
if external_contours != None:
temp = []
temp = cnts
cnts = []
cnts.extend(temp)
cnts.extend(external_contours)
# draw contours
cv.drawContours(image, cnts, -1, (255, 0, 0), 2)
########################## Draw External Contours (Test Function Used for debugging only) ###################################
# out = draw_contours.draw_pixels(image)
########################## Find Area of Contour Blobs ###################################
# If we want the scipy function to extract area in terms of pixels
# scipy_contour_area(cnts, out)
# If we want to use opencv function to extract area in terms of pixels
ret_rock_data = cv_contour_area(cnts, out, img_name)
########################## Draw Graph and Pie Chart and save figures ###################################
# Change the the OUTPUT_FRAG_RESULTS_CSV to OUTPUT_FRAG_RESULTS_CV_CSV if we want to use csv from opencv
# and uncomment the above function accordingly
draw_graph_pie(img_name)
# Concatenate original image with processed image and save the image
# con=np.concatenate((image,out),axis=1)
con = image.copy()
cv.imwrite(img_name[:9] + OUTPUT_FRAG_IMG,con)
# saving black and white image
bandw_img_path = img_name[:9] + "bandw.png"
cv.imwrite(bandw_img_path, out)
# Unregister cv layer otherwise it will give error next time
cv.dnn_unregisterLayer('Crop')
return [img_name[:9] + OUTPUT_FRAG_IMG, ret_rock_data, bandw_img_path]
def cv_contour_area(cnts, image, filename):
"""Finding area of rock contours using opencv
Args:
cnts (numpy.ndarray): contours
image (numpy.ndarray): image / frame whose contours are provided
filename (str): CSV file of the output of rock area data
Returns:
rock_data (list): list of bucketed rock size
"""
out = image.copy()
total = 0
count = 0
# open file for saving data
f = open(filename[:9] + OUTPUT_FRAG_RESULTS_CV_CSV, "w")
f.write(',' + ",".join(["Area"]) + '\n')
ret = {}
for c in cnts:
x,y,w,h = cv.boundingRect(c)
# calculating the mask of image
mask = np.zeros(out.shape, dtype=np.uint8)
cv.fillPoly(mask, [c], [255,255,255])
mask = cv.cvtColor(mask, cv.COLOR_BGR2GRAY)
# counting all non zero blobs
pixels = cv.countNonZero(mask)
total += pixels
# cv.putText(out, '{}'.format(pixels), (x,y), cv.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,0), 2)
# writing the data to csv
f.write(str(count) + ", " + str(pixels) + "\n")
# sanity check to ignore rocks whose size are less than 100 pixels
if pixels > 100:
ret[count] = pixels
count += 1
f.close()
# print(ret, file=sys.stderr)
return bucket_data_v2(ret)
def bucket_data(data):
"""Bucket the rock area data into linear size areas
Args:
data (dict): dictionary containing count of rock and its area in terms of pixels
Returns:
rock_data (list): list of bucketed rock size
"""
ret = []
max_val = max(data.values())
temp = {max_val/10: 0, (max_val * 2)/10: 0, (max_val * 3)/10: 0, (max_val * 4)/10: 0, (max_val * 5)/10: 0, (max_val * 6)/10: 0, (max_val * 7)/10: 0, (max_val * 8)/10: 0, (max_val * 9)/10: 0, max_val: 0}
for key, value in data.items():
absolute_difference_function = lambda list_value : abs(list_value - value)
closest_value = min(temp.keys(), key=absolute_difference_function)
temp[closest_value] += 1
for key, value in temp.items():
ret.append([key, value])
return ret
def bucket_data_v2(data):
"""Bucket the rock area data into linear size areas in a better way using pandas
Args:
data (dict): dictionary containing count of rock and its area in terms of pixels
Returns:
rock_data (list): list of bucketed rock size
"""
ret = pd.DataFrame({'test':pd.Series(data)})
test = ret.value_counts().reset_index()
_, edges = pd.cut(test['test'], bins=1000, retbins=True)
edges = [int(x) for x in edges]
test['range'] = pd.cut(test['test'],bins=edges).astype(str)
test2 = test.groupby('range')['test'].count().reset_index()
test2['range_stripped'] = test2['range'].map(lambda x:x.split(',')[1].rstrip(']').lstrip(' '))
test2.drop(columns=['range'],inplace=True)
ret_list = []
for _,v in test2.astype(int).sort_values('range_stripped',ascending=True).iterrows():
ret_list.append([str(v[1]),int(v[0])])
return ret_list
def scipy_contour_area(cnts, image):
"""Finding area of rock contours using scipy
Args:
cnts (numpy.ndarray): contours
image (numpy.ndarray): image / frame whose contours are provided
filename (str): CSV file of the output of rock area data
Returns:
rock_data (list): list of bucketed rock size
"""
out = image.copy()
kernel = np.ones((3, 3), np.uint8)
eroded = cv.erode(out, kernel, iterations=1)
dilated = cv.dilate(eroded, kernel, iterations=1)
mask = dilated == 255
labeled_mask, num_labels = ndimage.label(mask)
print("Number of rocks detected are: " + str(num_labels))
# area of clusters
clusters = measure.regionprops(labeled_mask)
propList = ['Area']
output_file = open('output/fragmentation_result.csv', 'w')
output_file.write(',' + ",".join(propList) + '\n')
for cluster_props in clusters:
# output cluster properties to the excel file
output_file.write(str(cluster_props['Label']))
for i, prop in enumerate(propList):
if (prop == 'Area'):
to_print = cluster_props[prop] # Convert pixel square to um square
else:
to_print = cluster_props[prop]
output_file.write(',' + str(to_print))
output_file.write('\n')
output_file.close()
def draw_graph_pie(img_name):
"""Function to visualize the bucketed data
Args:
img_name (str): unique string received from main function
"""
df = pd.read_csv(img_name[:9] + OUTPUT_FRAG_RESULTS_CV_CSV)
# Make a copy
df_copy=df
# Make an empty list to store counts of rocks
l=[]
# Append the numbers as per logic given below
for i in range(df.Area.shape[0]):
if df.Area[i] < 1000:
l.append(0)
elif df.Area[i] < 3000:
l.append(1)
elif df.Area[i] < 6000:
l.append(2)
elif df.Area[i] < 9000:
l.append(3)
else:
l.append(4)
# Drop if area is too small or too large
for i in range(df.shape[0]):
# print(df.shape[0])
if df.Area[i] < 100 or df.Area[i] > 50000:
df = df.drop([i])
# process and save graph
df = df.reset_index(drop=True)
graph_pic = sns.displot(df, x="Area", kde=True)
graph_pic.savefig(img_name[:9] + OUTPUT_FRAG_GRAPH)
# process and save pie chart
df_copy['new_column'] = l
sns.displot(df_copy, x="Area", kde=True, col="new_column")
data = df_copy.groupby("new_column").count()
plot = data.plot.pie(y='Area', figsize=(10, 8))
fig = plot.get_figure()
fig.savefig(img_name[:9] + OUTPUT_FRAG_PIE_CHART)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='This sample shows how to define custom OpenCV deep learning layers in Python. '
'Holistically-Nested Edge Detection (https://arxiv.org/abs/1504.06375) neural network '
'is used as an example model. Find a pre-trained model at https://github.com/s9xie/hed.')
parser.add_argument('--input', help='Path to image', required=True)
parser.add_argument('--prototxt', help='Path to deploy.prototxt', default="dockerized_flase_server/web/req_files/deploy.protxt")
parser.add_argument('--caffemodel', help='Path to hed_pretrained_bsds.caffemodel', default="dockerized_flase_server/web/req_files/hed_pretrained_bsds.caffemodel")
parser.add_argument('--HI', help='High intensity of thershold', default=200, type=int)
parser.add_argument('--LI', help='Low intensity of thershold', default=100, type=int)
parser.add_argument('--extctrs', help='Flag if enabled will give user to draw external contours', default=False, type=bool)
args = parser.parse_args()
ret = processing(args.input)