-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
241 lines (178 loc) · 9.55 KB
/
app.py
File metadata and controls
241 lines (178 loc) · 9.55 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
# import torch
# from PIL import Image, ImageOps
# from transformers import AutoModel, AutoTokenizer
# from fastapi import FastAPI, File, UploadFile, HTTPException
# from fastapi.responses import JSONResponse
# from io import BytesIO
# import uvicorn
# import gc
# import re
# def clear_cuda_cache():
# """
# Clears the CUDA cache to prevent memory leaks.
# """
# if torch.cuda.is_available():
# torch.cuda.empty_cache()
# torch.cuda.synchronize()
# gc.collect()
# print("CUDA cache cleared")
# def clean_text(text):
# # List of terms to be removed
# removable_terms = [
# '**Title:**', '**Body Text:**', 'Title:', 'Body Text:', # Add more terms as needed
# ]
# # Iterate over each term and replace it with an empty string
# for term in removable_terms:
# # Regex pattern to match the exact term possibly surrounded by whitespace
# pattern = r'\s*' + re.escape(term) + r'\s*'
# text = re.sub(pattern, '', text, flags=re.IGNORECASE)
# return text
# app = FastAPI()
# torch.set_grad_enabled(False)
# # Load model and tokenizer
# model = AutoModel.from_pretrained('openbmb/MiniCPM-V-2_6-int4', trust_remote_code=True)
# tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-V-2_6-int4', trust_remote_code=True)
# # Move model to GPU if available
# # if torch.cuda.is_available():
# # model = model.to('cuda')
# model.eval()
# question = '''Extract the text from the provided image and return only plain text content.
# Ensure that no additional formatting, metadata, or fields like title, subtitles, or table headers are included in the response.
# Provide only the actual text from the image without explaining about the image or text in the response.
# Do not autocorrect the text and do not insert extra characters to the words and do not apply contraction to the words.
# Return the extracted text exactly as it appears, without any additional explanation.
# If there is no text in the image, simply return '0' but do not miss any word in the image.
# '''
# msgs = [
# {'role': 'user', 'content': [Image.open('train1.jpeg').convert('RGB'), question]},
# {'role': 'assistant', 'content': '''Hi , How are you you ? /nI am fine fine , what /nabout you ? /nThis is a test image for /nOCR whcih is opticall /ncharacterrr recognition . /nIt looks cool to get the /ndigitalized and it is a /ngood thing that can be /ndone . /nNotes:'''},
# # {'role': 'user', 'content': [Image.open('train2.jpeg').convert('RGB'), question]},
# # {'role': 'assistant', 'content': '''Title : Donut OCR /nDonut (Document understanding /ntransformer) is one of the ways /nwe can exxtract into form /ndocs and we use them in /nvarious ways. /nIt is a newest method for /nprocesing & extracting information /nfrom documents. Unlike OCR engines, /nDonut utilizes an end-to-end /ntransformer model. /nIt comprises a vision encoder & /na text - decoder (BART) . /nHi, How you are doing ? /nIt is true ?'''},
# {'role': 'user', 'content': [Image.open('train3.jpg').convert('RGB'), question]},
# {'role': 'assistant', 'content': '''Date: /nsmrt resuable Noetbok /nImagine a notebook that evloves /nwith your thouhgts , a smart /nreusable noetbook that harms /nthe powder of technologi to /nrevolutonze your writing /nxperience. Thi s remarkalbe tool /ncaptures the /ncaptures the esense of your /ncreativity , technology. /ntechnology , effortlessely.'''},
# # {'role': 'user', 'content': [Image.open('train4.jpg').convert('RGB'), question]},
# # {'role': 'assistant', 'content': '''Munday , Fraday , Tusedai , /nwednsedae , satuday /nGood Mrning Pencel /nKatlon studio is gve fre. /ntral for one manth. /nI wil tkae live Today /nbecase I am nat Feling wel'''}
# ]
# def preprocess_image(image: Image.Image, target_size=(1344, 1344)):
# """
# Preprocess the image by resizing and padding it to the target size (1344x1344).
# Ensures consistent padding and size for all images.
# """
# image.thumbnail(target_size, Image.Resampling.LANCZOS)
# delta_width = target_size[0] - image.size[0]
# delta_height = target_size[1] - image.size[1]
# padding = (delta_width // 2, delta_height // 2, delta_width - delta_width // 2, delta_height - delta_height // 2)
# padded_image = ImageOps.expand(image, padding, fill=(255, 255, 255))
# return padded_image
# @app.post("/OCR")
# async def extract_text(image: UploadFile = File(...)):
# if not image.content_type.startswith('image/'):
# raise HTTPException(status_code=400, detail="Invalid file type. Please upload an image.")
# try:
# clear_cuda_cache()
# # Load image from the uploaded file
# image_bytes = await image.read()
# img = Image.open(BytesIO(image_bytes)).convert('RGB')
# processed_img = preprocess_image(img, target_size=(1344, 1344))
# msgs.append({'role': 'user', 'content': [processed_img, question]})
# # msgs = [{'role': 'user', 'content': [processed_img, question]}]
# clear_cuda_cache()
# answer = model.chat(
# image=None,
# msgs=msgs,
# tokenizer=tokenizer,
# temperature=0.1
# )
# clear_cuda_cache()
# answer = clean_text(answer)
# return JSONResponse(content={"text": answer})
# except Exception as e:
# clear_cuda_cache()
# raise HTTPException(status_code=500, detail=f"Error processing the image: {str(e)}")
# if __name__ == "__main__":
# uvicorn.run(app, host="0.0.0.0", port=8000, workers=1)
##########################################################################
import torch
from PIL import Image, ImageOps
from transformers import AutoModel, AutoTokenizer
from fastapi import FastAPI, File, UploadFile, HTTPException
from fastapi.responses import JSONResponse
from io import BytesIO
import uvicorn
import gc
import re
def clear_cuda_cache():
"""
Clears the CUDA cache to prevent memory leaks.
"""
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.synchronize()
gc.collect()
print("CUDA cache cleared")
def clean_text(text):
# List of terms to be removed
removable_terms = [
'**Title:**', '**Body Text:**', 'Title:', 'Body Text:', '**Sub**', '**Subsection**' , '**Signature:**', '**Content:**','**Footer:**' # Add more terms as needed
]
# Iterate over each term and replace it with an empty string
for term in removable_terms:
# Regex pattern to match the exact term possibly surrounded by whitespace
pattern = r'\s*' + re.escape(term) + r'\s*'
text = re.sub(pattern, '', text, flags=re.IGNORECASE)
return text
app = FastAPI()
torch.set_grad_enabled(False)
# Load model and tokenizer
model = AutoModel.from_pretrained('openbmb/MiniCPM-V-2_6-int4', trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-V-2_6-int4', trust_remote_code=True)
model.eval()
question = '''Extract the text from the provided image and return only plain text content.
Ensure that no additional formatting, metadata, or fields like title, subtitles, or table headers are included in the response.
Provide only the actual text from the image without explaining about the image or text in the response.
Do not autocorrect the text and do not insert extra characters to the words and do not apply contraction to the words.
Return the extracted text exactly as it appears, without any additional explanation.
If there is no text in the image, simply return '0' but do not miss any word in the image.
'''
def preprocess_image(image: Image.Image, target_size=(1344, 1344)):
"""
Preprocess the image by resizing and padding it to the target size (1344x1344).
Ensures consistent padding and size for all images.
"""
image.thumbnail(target_size, Image.Resampling.LANCZOS)
delta_width = target_size[0] - image.size[0]
delta_height = target_size[1] - image.size[1]
padding = (delta_width // 2, delta_height // 2, delta_width - delta_width // 2, delta_height - delta_height // 2)
padded_image = ImageOps.expand(image, padding, fill=(255, 255, 255))
return padded_image
@app.post("/OCR")
async def extract_text(image: UploadFile = File(...)):
if not image.content_type.startswith('image/'):
raise HTTPException(status_code=400, detail="Invalid file type. Please upload an image.")
try:
clear_cuda_cache()
# Load image from the uploaded file
image_bytes = await image.read()
img = Image.open(BytesIO(image_bytes)).convert('RGB')
processed_img = preprocess_image(img, target_size=(1344, 1344))
# Each request has its own message list
msgs = [
{'role': 'user', 'content': [processed_img, question]},
]
clear_cuda_cache()
# Generate the response
answer = model.chat(
image=None,
msgs=msgs,
tokenizer=tokenizer,
temperature=0.1
)
clear_cuda_cache()
# Clean the extracted text
answer = clean_text(answer)
return JSONResponse(content={"text": answer})
except Exception as e:
clear_cuda_cache()
raise HTTPException(status_code=500, detail=f"Error processing the image: {str(e)}")
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000, workers=1)