Tony363 commited on
Commit
a64cb2d
·
1 Parent(s): 98deac4

Add application file

Browse files
Files changed (3) hide show
  1. Dockerfile +16 -0
  2. app.py +141 -0
  3. requirements.txt +154 -0
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
+ # you will also find guides on how best to write your Dockerfile
3
+
4
+ FROM python:3.11
5
+
6
+ RUN useradd -m -u 1000 user
7
+ USER user
8
+ ENV PATH="/home/user/.local/bin:$PATH"
9
+
10
+ WORKDIR /app
11
+
12
+ COPY --chown=user ./requirements.txt requirements.txt
13
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
14
+
15
+ COPY --chown=user . /app
16
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "5001"]
app.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ import logging
3
+ from openai import OpenAI
4
+ import uvicorn
5
+ import sys
6
+ import base64
7
+ import requests
8
+ import json
9
+ from fastapi import FastAPI, Request, Form,UploadFile,File
10
+ from fastapi.responses import HTMLResponse, RedirectResponse,JSONResponse,RedirectResponse
11
+ from fastapi.templating import Jinja2Templates
12
+ from fastapi.staticfiles import StaticFiles
13
+
14
+ from pydantic_settings import BaseSettings
15
+ import io
16
+ import torch
17
+ from PIL import Image
18
+ from torchvision import transforms
19
+ from torchmetrics.multimodal.clip_iqa import CLIPImageQualityAssessment
20
+
21
+
22
+ class Settings(BaseSettings):
23
+ OPENAI_API_KEY: str = 'OPENAI_API_KEY'
24
+ FLASK_APP: str = 'FLASK_APP'
25
+ FLASK_ENV: str = 'FLASK_ENV'
26
+
27
+ class Config:
28
+ env_file = '.env'
29
+
30
+ settings = Settings()
31
+ client = OpenAI(api_key=settings.OPENAI_API_KEY)
32
+ app = FastAPI()
33
+
34
+ templates = Jinja2Templates(directory="templates")
35
+ app.mount("/static", StaticFiles(directory="static"), name="static")
36
+
37
+ # Function to encode the image
38
+ def encode_image(image_path:str)->base64:
39
+ with open(image_path, "rb") as image_file:
40
+ return base64.b64encode(image_file.read()).decode('utf-8')
41
+
42
+ def get_json(
43
+ img_bytes:base64
44
+ )->dict:
45
+ # Getting the base64 string
46
+ # base64_image = encode_image(file_name)
47
+ headers = {
48
+ "Content-Type": "application/json",
49
+ "Authorization": f"Bearer {settings.OPENAI_API_KEY}"
50
+ }
51
+
52
+ payload = {
53
+ "model": "gpt-4o-mini",
54
+ "response_format": {"type": "json_object"},
55
+ "messages": [
56
+ {
57
+ "role": "user",
58
+ "content": [
59
+ {
60
+ "type": "text",
61
+ "text": "You are an intelligent system api that reads the texts from an image and outputs the texts as key values pairs in JSON format. Given an image, output the texts in JSON format."
62
+ },
63
+ {
64
+ "type": "image_url",
65
+ "image_url": {
66
+ "url": f"data:image/jpeg;base64,{img_bytes}"
67
+ }
68
+ }
69
+ ]
70
+ }
71
+ ],
72
+ "max_tokens": 1000
73
+ }
74
+
75
+ response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
76
+ return response.json()
77
+ # return templates.TemplateResponse("json_out.html", {"request": request, "data": response.json()})
78
+
79
+
80
+ @app.get("/", response_class=HTMLResponse)
81
+ def index(request: Request):
82
+ return templates.TemplateResponse("upload_image.html", {"request": request})
83
+
84
+
85
+ @app.post("/output_iqa")
86
+ async def create_upload_file(
87
+ request:Request,
88
+ file: UploadFile = File(...),
89
+ choice: str = Form(...),
90
+ )->dict:
91
+ pairs = {
92
+ 'quality': ("Good photo", "Bad photo"),
93
+ 'sharpness': ("Sharp photo", "Blurry photo"),
94
+ 'noisiness': ("Clean photo", "Noisy photo"),
95
+ }
96
+ classes = pairs[choice.lower()]
97
+
98
+ _ = torch.manual_seed(42)
99
+ transform = transforms.Compose([
100
+ transforms.ToTensor(), # Converts the image to a PyTorch tensor
101
+ # transforms.Normalize(mean=0,std=255)
102
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], # Normalize for pre-trained models
103
+ std=[0.229, 0.224, 0.225])
104
+ ])
105
+
106
+ # Read the image file into a PIL Image
107
+ image_content = await file.read()
108
+ image = Image.open(io.BytesIO(image_content)).convert('RGB')
109
+ img = transform(image)
110
+ metric = CLIPImageQualityAssessment(
111
+ # model_name_or_path='openai/clip-vit-large-patch14',
112
+ prompts=(classes, choice)
113
+ )
114
+ out = {
115
+ 'filename':file.filename,
116
+ 'on':choice,
117
+ "prompts":classes,
118
+ "score": metric(img)[choice].item(),
119
+ }
120
+ if out['score'] > 0.75:
121
+ # return RedirectResponse(url=f'/get_json/?filename={file.filename}')
122
+ to_gpt = base64.b64encode(image.tobytes()).decode('utf-8')
123
+ info = get_json(to_gpt)
124
+ if info.get('choices') is not None:
125
+ out.update(json.loads(info['choices'][0]['message']['content']))
126
+ else:
127
+ info['error']['mb'] = f"{sys.getsizeof(to_gpt)/1000000} MB?"
128
+ info['error']['refer_article'] = 'https://community.openai.com/t/400-errors-on-gpt-vision-api-since-today/534538/4'
129
+ out.update(info['error'])
130
+ return out
131
+ return out
132
+
133
+
134
+ if __name__ == "__main__":
135
+ # Disable uvicorn access logger
136
+ uvicorn_access = logging.getLogger("uvicorn.access")
137
+ uvicorn_access.disabled = True
138
+
139
+ logger = logging.getLogger("uvicorn")
140
+ logger.setLevel(logging.getLevelName(logging.DEBUG))
141
+ uvicorn.run('app:app', host="0.0.0.0", port=5001, reload=True)
requirements.txt ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ annotated-types==0.7.0
2
+ anyio==4.4.0
3
+ aplus==0.11.0
4
+ astropy==6.1.1
5
+ astropy-iers-data==0.2024.7.8.0.31.19
6
+ asttokens==2.4.1
7
+ azure-core==1.30.2
8
+ azure-storage-blob==12.20.0
9
+ azure-storage-file-datalake==12.15.0
10
+ blake3==0.4.1
11
+ bqplot==0.12.43
12
+ branca==0.7.2
13
+ cachetools==5.3.3
14
+ certifi==2024.7.4
15
+ cffi==1.16.0
16
+ charset-normalizer==3.3.2
17
+ click==8.1.7
18
+ cloudpickle==3.0.0
19
+ colorama==0.4.6
20
+ comm==0.2.2
21
+ contourpy==1.2.1
22
+ cryptography==42.0.8
23
+ cycler==0.12.1
24
+ dask==2024.7.0
25
+ debugpy==1.8.2
26
+ decorator==5.1.1
27
+ distro==1.9.0
28
+ dnspython==2.6.1
29
+ email_validator==2.2.0
30
+ exceptiongroup==1.2.1
31
+ executing==2.0.1
32
+ fastapi==0.111.0
33
+ fastapi-cli==0.0.4
34
+ filelock==3.15.4
35
+ fonttools==4.53.1
36
+ frozendict==2.4.4
37
+ fsspec==2024.6.1
38
+ future==1.0.0
39
+ h11==0.14.0
40
+ h5py==3.11.0
41
+ httpcore==1.0.5
42
+ httptools==0.6.1
43
+ httpx==0.27.0
44
+ huggingface-hub==0.24.0
45
+ idna==3.7
46
+ # importlib
47
+ # importlib_metadata==8.0.0
48
+ intel-openmp==2021.4.0
49
+ ipydatawidgets==4.3.5
50
+ ipykernel==6.29.5
51
+ ipyleaflet==0.19.1
52
+ ipympl==0.9.4
53
+ ipython==8.26.0
54
+ ipython-genutils==0.2.0
55
+ ipyvolume==0.6.3
56
+ ipyvue==1.11.1
57
+ ipyvuetify==1.9.4
58
+ ipywebrtc==0.6.0
59
+ ipywidgets==8.1.3
60
+ isodate==0.6.1
61
+ jedi==0.19.1
62
+ Jinja2==3.1.4
63
+ jupyter-leaflet==0.19.1
64
+ jupyter_client==8.6.2
65
+ jupyter_core==5.7.2
66
+ jupyterlab_widgets==3.0.11
67
+ kiwisolver==1.4.5
68
+ lightning-utilities==0.11.5
69
+ llvmlite==0.43.0
70
+ locket==1.0.0
71
+ markdown-it-py==3.0.0
72
+ MarkupSafe==2.1.5
73
+ matplotlib==3.9.1
74
+ matplotlib-inline==0.1.7
75
+ mdurl==0.1.2
76
+ mkl==2021.4.0
77
+ mpmath==1.3.0
78
+ nest-asyncio==1.6.0
79
+ networkx==3.3
80
+ numba==0.60.0
81
+ numpy==1.26.4
82
+ openai==1.37.0
83
+ orjson==3.10.6
84
+ packaging==24.1
85
+ pandas==2.2.2
86
+ parso==0.8.4
87
+ partd==1.4.2
88
+ pillow==10.4.0
89
+ piq==0.8.0
90
+ platformdirs==4.2.2
91
+ progressbar2==4.4.2
92
+ prompt_toolkit==3.0.47
93
+ psutil==6.0.0
94
+ pure-eval==0.2.2
95
+ pyarrow==16.1.0
96
+ pycparser==2.22
97
+ pydantic==2.8.2
98
+ pydantic-settings==2.3.4
99
+ pydantic_core==2.20.1
100
+ pyerfa==2.0.1.4
101
+ Pygments==2.18.0
102
+ pyparsing==3.1.2
103
+ python-dateutil==2.9.0.post0
104
+ python-dotenv==1.0.1
105
+ python-multipart==0.0.9
106
+ python-utils==3.8.2
107
+ pythreejs==2.4.2
108
+ pytz==2024.1
109
+ # pywin32==306
110
+ PyYAML==6.0.1
111
+ pyzmq==26.0.3
112
+ regex==2024.5.15
113
+ requests==2.32.3
114
+ rich==13.7.1
115
+ safetensors==0.4.3
116
+ shellingham==1.5.4
117
+ six==1.16.0
118
+ sniffio==1.3.1
119
+ stack-data==0.6.3
120
+ starlette==0.37.2
121
+ sympy==1.13.1
122
+ tabulate==0.9.0
123
+ tbb==2021.13.0
124
+ tokenizers==0.19.1
125
+ toolz==0.12.1
126
+ torch==2.3.1
127
+ torchmetrics==1.4.0.post0
128
+ torchvision==0.18.1
129
+ tornado==6.4.1
130
+ tqdm==4.66.4
131
+ traitlets==5.14.3
132
+ traittypes==0.2.1
133
+ transformers==4.42.4
134
+ typer==0.12.3
135
+ typing_extensions==4.12.2
136
+ tzdata==2024.1
137
+ ujson==5.10.0
138
+ urllib3==2.2.2
139
+ uvicorn==0.30.1
140
+ vaex==4.17.0
141
+ vaex-astro==0.9.3
142
+ vaex-core==4.17.1
143
+ vaex-hdf5==0.14.1
144
+ vaex-jupyter==0.8.2
145
+ vaex-ml==0.18.3
146
+ vaex-server==0.9.0
147
+ vaex-viz==0.5.4
148
+ watchfiles==0.22.0
149
+ wcwidth==0.2.13
150
+ websockets==12.0
151
+ widgetsnbextension==4.0.11
152
+ xarray==2024.6.0
153
+ xyzservices==2024.6.0
154
+ zipp==3.19.2