-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
213 lines (155 loc) · 7.09 KB
/
app.py
File metadata and controls
213 lines (155 loc) · 7.09 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
import os, tempfile, json
from flask import Flask, request, jsonify, render_template
from flask_cors import CORS
from google.cloud import vision
from PIL import Image
from collections import OrderedDict
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'serviceAccountKey.json'
app = Flask(__name__)
CORS(app)
with open('category_map.json', 'r') as f:
category_map = json.load(f)
def optimize_image(image_path, max_size=(800, 800)):
with Image.open(image_path) as img:
img.thumbnail(max_size)
jpeg_path = image_path + ".jpg"
img.convert("RGB").save(jpeg_path, "JPEG", optimize=True, quality=85)
return jpeg_path
def get_detected_objects(image_path):
client = vision.ImageAnnotatorClient()
try:
with open(image_path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.object_localization(image=image)
objects = response.localized_object_annotations
if response.error.message:
raise Exception(f'Error: {response.error.message}')
detected_objects = [obj.name for obj in objects]
return detected_objects
except Exception as e:
return {'error': str(e)}
def get_recyclable_categories(detected_objects):
matching_categories = set()
for category, terms in category_map.items():
if any(obj in terms for obj in detected_objects):
matching_categories.add(category)
return list(matching_categories) if matching_categories else None
def get_best_fitting_category(file_name, image_path, detected_objects, matched_categories):
category_match_count = {category: 0 for category in matched_categories}
for category in matched_categories:
terms = category_map.get(category, [])
category_match_count[category] = sum(1 for obj in detected_objects if obj in terms)
best_category = max(category_match_count, key=category_match_count.get)
highest_count = category_match_count[best_category]
tied_categories = [cat for cat, count in category_match_count.items() if count == highest_count]
if len(tied_categories) > 1:
return granular_analysis_to_resolve_tie(file_name, image_path, tied_categories)
return best_category
def granular_analysis_to_resolve_tie(file_name, image_path, tied_categories):
client = vision.ImageAnnotatorClient()
try:
with open(image_path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.label_detection(image=image)
labels = [label.description for label in response.label_annotations]
if response.error.message:
raise Exception(f'Error: {response.error.message}')
label_match_count = {category: 0 for category in tied_categories}
for category in tied_categories:
terms = category_map.get(category, [])
label_match_count[category] = sum(1 for label in labels if label in terms)
with open("logs.txt", "a") as f:
f.write(f"Detected labels for {file_name}: {{ {', '.join(f'\"{label}\"' for label in labels)} }}\n")
f.write(f"Label match count for {file_name}: {label_match_count}\n")
f.write("\n")
best_category = max(label_match_count, key=label_match_count.get)
return best_category if label_match_count[best_category] > 0 else None
except Exception as e:
return {'error': str(e)}
def fetch_labels(image_path):
client = vision.ImageAnnotatorClient()
try:
with open(image_path, "rb") as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = client.label_detection(image=image)
labels = [label.description for label in response.label_annotations]
if response.error.message:
raise Exception(f'Error: {response.error.message}')
return labels
except Exception as e:
return {'error': str(e)}
@app.route('/', methods=['GET'])
def getIndex():
return render_template('index.html')
@app.route("/", methods=['POST'])
def postIndex():
return '[POST] - API is ONLINE.'
@app.route('/getLabels', methods=['POST'])
def get_labels():
if 'files' not in request.files or len(request.files.getlist('files')) == 0:
return jsonify({'error': 'No files uploaded'}), 400
all_labels = []
new_labels_list = []
files = request.files.getlist('files')
total_files = len(files)
successfull_files = 0
error_files = []
for file in files:
if file.filename == '':
continue # Skip empty filenames
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
file.save(temp_file.name)
file_path = temp_file.name
file_name = file.filename
labels = fetch_labels(file_path)
# Skip the label if it's "error"
if "error" in labels:
error_files.append(file_name)
continue
new_labels = []
for label in labels:
found = any(label in values for values in category_map.values())
if not found:
new_labels.append(label)
all_labels.append(labels)
successfull_files += 1
if new_labels:
new_labels_list.append((file_name, new_labels))
# Append all labels to the text file with a line of spacing between entries
with open("labels.txt", "a") as f:
for file_name, new_labels in new_labels_list:
f.write(f"New labels for {file_name}: {{ {', '.join(f'\"{label}\"' for label in new_labels)} }}\n\n")
response_data = OrderedDict([
('Received images', total_files),
('Successfull scans', successfull_files),
('Error scans', error_files),
('Corrupted files', len(error_files)),
])
return jsonify(response_data), 200
@app.route('/upload', methods=['POST'])
def upload_image():
if 'file' not in request.files or request.files['file'].filename == '':
return jsonify({'error': 'No file uploaded'}), 400
file = request.files['file']
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
file.save(temp_file.name)
file_path = temp_file.name
file_name = file.filename
optimized_image_path = optimize_image(file_path)
analysis_result = get_detected_objects(optimized_image_path)
if isinstance(analysis_result, dict) and 'error' in analysis_result:
return jsonify({'error': analysis_result['error']}), 400
detected_objects = analysis_result
if not detected_objects:
return jsonify({'result': 'No', 'category': "No match", "items": []}), 200
recyclable_categories = get_recyclable_categories(detected_objects)
if recyclable_categories:
best_category = get_best_fitting_category(file_name, file_path, detected_objects, recyclable_categories)
if best_category:
return jsonify({'result': 'Yes', 'category': best_category, "items": detected_objects}), 200
return jsonify({'result': 'No', 'category': "No match", "items": detected_objects}), 200
if __name__ == '__main__':
app.run(debug=True)