Spaces:
Configuration error
Configuration error
AIMedica
commited on
Commit
Β·
957df8a
1
Parent(s):
5dcad43
Update app configuration and add GitHub Pages setup
Browse files- .github/workflows/deploy.yml +33 -0
- .gitignore +88 -0
- .gradio/certificate.pem +31 -0
- QUICK_START.md +122 -0
- README.md +117 -11
- app.py +11 -7
- batch_process.py +260 -0
- config.py +123 -0
- index.html +264 -0
- requirements.txt +7 -6
- run_app.bat +43 -0
- run_app.ps1 +175 -0
- setup.py +191 -0
- test_model.py +123 -0
.github/workflows/deploy.yml
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Deploy to GitHub Pages
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches: [ main, master ]
|
6 |
+
pull_request:
|
7 |
+
branches: [ main, master ]
|
8 |
+
|
9 |
+
jobs:
|
10 |
+
deploy:
|
11 |
+
runs-on: ubuntu-latest
|
12 |
+
|
13 |
+
steps:
|
14 |
+
- name: Checkout
|
15 |
+
uses: actions/checkout@v3
|
16 |
+
|
17 |
+
- name: Setup Python
|
18 |
+
uses: actions/setup-python@v4
|
19 |
+
with:
|
20 |
+
python-version: '3.9'
|
21 |
+
|
22 |
+
- name: Install dependencies
|
23 |
+
run: |
|
24 |
+
python -m pip install --upgrade pip
|
25 |
+
pip install -r requirements.txt
|
26 |
+
|
27 |
+
- name: Deploy to GitHub Pages
|
28 |
+
uses: peaceiris/actions-gh-pages@v3
|
29 |
+
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/master'
|
30 |
+
with:
|
31 |
+
github_token: ${{ secrets.GITHUB_TOKEN }}
|
32 |
+
publish_dir: ./
|
33 |
+
publish_branch: gh-pages
|
.gitignore
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
*.so
|
6 |
+
.Python
|
7 |
+
build/
|
8 |
+
develop-eggs/
|
9 |
+
dist/
|
10 |
+
downloads/
|
11 |
+
eggs/
|
12 |
+
.eggs/
|
13 |
+
lib/
|
14 |
+
lib64/
|
15 |
+
parts/
|
16 |
+
sdist/
|
17 |
+
var/
|
18 |
+
wheels/
|
19 |
+
*.egg-info/
|
20 |
+
.installed.cfg
|
21 |
+
*.egg
|
22 |
+
MANIFEST
|
23 |
+
|
24 |
+
# Virtual environments
|
25 |
+
.env
|
26 |
+
.venv
|
27 |
+
env/
|
28 |
+
venv/
|
29 |
+
ENV/
|
30 |
+
env.bak/
|
31 |
+
venv.bak/
|
32 |
+
|
33 |
+
# IDE
|
34 |
+
.vscode/
|
35 |
+
.idea/
|
36 |
+
*.swp
|
37 |
+
*.swo
|
38 |
+
*~
|
39 |
+
|
40 |
+
# OS
|
41 |
+
.DS_Store
|
42 |
+
.DS_Store?
|
43 |
+
._*
|
44 |
+
.Spotlight-V100
|
45 |
+
.Trashes
|
46 |
+
ehthumbs.db
|
47 |
+
Thumbs.db
|
48 |
+
|
49 |
+
# Project specific
|
50 |
+
saved_predictions/
|
51 |
+
batch_results/
|
52 |
+
*.log
|
53 |
+
*.pth
|
54 |
+
!resnet50_dr_classifier.pth
|
55 |
+
|
56 |
+
# Jupyter Notebook
|
57 |
+
.ipynb_checkpoints
|
58 |
+
|
59 |
+
# pyenv
|
60 |
+
.python-version
|
61 |
+
|
62 |
+
# pipenv
|
63 |
+
Pipfile.lock
|
64 |
+
|
65 |
+
# PEP 582
|
66 |
+
__pypackages__/
|
67 |
+
|
68 |
+
# Celery
|
69 |
+
celerybeat-schedule
|
70 |
+
celerybeat.pid
|
71 |
+
|
72 |
+
# SageMath parsed files
|
73 |
+
*.sage.py
|
74 |
+
|
75 |
+
# Spyder project settings
|
76 |
+
.spyderproject
|
77 |
+
.spyproject
|
78 |
+
|
79 |
+
# Rope project settings
|
80 |
+
.ropeproject
|
81 |
+
|
82 |
+
# mkdocs documentation
|
83 |
+
/site
|
84 |
+
|
85 |
+
# mypy
|
86 |
+
.mypy_cache/
|
87 |
+
.dmypy.json
|
88 |
+
dmypy.json
|
.gradio/certificate.pem
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-----BEGIN CERTIFICATE-----
|
2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
31 |
+
-----END CERTIFICATE-----
|
QUICK_START.md
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# π Quick Start Guide
|
2 |
+
|
3 |
+
## β‘ Get Running in 5 Minutes
|
4 |
+
|
5 |
+
### 1. **Prerequisites Check**
|
6 |
+
- β
Python 3.8+ installed
|
7 |
+
- β
Model file `resnet50_dr_classifier.pth` present
|
8 |
+
- β
Internet connection (for first-time package installation)
|
9 |
+
|
10 |
+
### 2. **Easy Setup (Windows)**
|
11 |
+
```bash
|
12 |
+
# Option A: Double-click the batch file
|
13 |
+
run_app.bat
|
14 |
+
|
15 |
+
# Option B: Use PowerShell
|
16 |
+
.\run_app.ps1
|
17 |
+
|
18 |
+
# Option C: Use PowerShell with setup
|
19 |
+
.\run_app.ps1 -Setup
|
20 |
+
```
|
21 |
+
|
22 |
+
### 3. **Manual Setup (All Platforms)**
|
23 |
+
```bash
|
24 |
+
# Create virtual environment (recommended)
|
25 |
+
python -m venv venv
|
26 |
+
|
27 |
+
# Activate virtual environment
|
28 |
+
# Windows:
|
29 |
+
venv\Scripts\activate
|
30 |
+
# macOS/Linux:
|
31 |
+
source venv/bin/activate
|
32 |
+
|
33 |
+
# Install dependencies
|
34 |
+
pip install -r requirements.txt
|
35 |
+
|
36 |
+
# Test the setup
|
37 |
+
python test_model.py
|
38 |
+
|
39 |
+
# Run the app
|
40 |
+
python app.py
|
41 |
+
```
|
42 |
+
|
43 |
+
### 4. **What Happens Next**
|
44 |
+
- π Web interface opens at `http://127.0.0.1:7860`
|
45 |
+
- π Upload OCT images for analysis
|
46 |
+
- π€ AI classifies images as DR or NoDR
|
47 |
+
- π₯ Grad-CAM heatmap shows AI focus areas
|
48 |
+
- πΎ Results automatically saved to `saved_predictions/` folder
|
49 |
+
|
50 |
+
## π― Usage Examples
|
51 |
+
|
52 |
+
### **Single Image Analysis**
|
53 |
+
1. Open `http://127.0.0.1:7860` in your browser
|
54 |
+
2. Upload an OCT image
|
55 |
+
3. View results and Grad-CAM visualization
|
56 |
+
|
57 |
+
### **Batch Processing**
|
58 |
+
```bash
|
59 |
+
python batch_process.py
|
60 |
+
```
|
61 |
+
- Process multiple images at once
|
62 |
+
- Get CSV report with all results
|
63 |
+
- Grad-CAM images saved to `batch_results/` folder
|
64 |
+
|
65 |
+
### **Testing & Validation**
|
66 |
+
```bash
|
67 |
+
python test_model.py
|
68 |
+
```
|
69 |
+
- Verify model loading
|
70 |
+
- Test basic functionality
|
71 |
+
- Check all dependencies
|
72 |
+
|
73 |
+
## π§ Troubleshooting
|
74 |
+
|
75 |
+
### **Common Issues**
|
76 |
+
|
77 |
+
| Problem | Solution |
|
78 |
+
|---------|----------|
|
79 |
+
| "Model file not found" | Ensure `resnet50_dr_classifier.pth` is in the project folder |
|
80 |
+
| "Package not found" | Run `pip install -r requirements.txt` |
|
81 |
+
| "CUDA errors" | App runs on CPU by default. GPU not required |
|
82 |
+
| "Port already in use" | Change port in `config.py` or kill existing process |
|
83 |
+
|
84 |
+
### **Get Help**
|
85 |
+
- Run `python setup.py` for comprehensive setup
|
86 |
+
- Check `README.md` for detailed documentation
|
87 |
+
- Use `python test_model.py` to diagnose issues
|
88 |
+
|
89 |
+
## π Project Structure
|
90 |
+
```
|
91 |
+
Deep_Learning_for_Ophthalmologist/
|
92 |
+
βββ app.py # π Main web application
|
93 |
+
βββ batch_process.py # π Batch processing script
|
94 |
+
βββ test_model.py # π§ͺ Testing and validation
|
95 |
+
βββ setup.py # βοΈ Automated setup
|
96 |
+
βββ config.py # π§ Configuration settings
|
97 |
+
βββ requirements.txt # π¦ Python dependencies
|
98 |
+
βββ resnet50_dr_classifier.pth # π€ AI model weights
|
99 |
+
βββ run_app.bat # πͺ Windows batch launcher
|
100 |
+
βββ run_app.ps1 # πͺ Windows PowerShell launcher
|
101 |
+
βββ README.md # π Complete documentation
|
102 |
+
βββ QUICK_START.md # π This quick start guide
|
103 |
+
βββ saved_predictions/ # πΎ Single image results
|
104 |
+
βββ batch_results/ # π Batch processing results
|
105 |
+
```
|
106 |
+
|
107 |
+
## π You're Ready!
|
108 |
+
|
109 |
+
Your AI-powered diabetic retinopathy detection app is now ready to use!
|
110 |
+
|
111 |
+
**Next steps:**
|
112 |
+
1. π±οΈ Double-click `run_app.bat` (Windows) or run `python app.py`
|
113 |
+
2. π Open your web browser to the displayed URL
|
114 |
+
3. π Upload OCT images for analysis
|
115 |
+
4. π¬ Explore the Grad-CAM visualizations
|
116 |
+
5. π Use batch processing for multiple images
|
117 |
+
|
118 |
+
**Remember:** This tool is for research and educational purposes. Always consult healthcare professionals for medical diagnosis.
|
119 |
+
|
120 |
+
---
|
121 |
+
|
122 |
+
*Need help? Check the full `README.md` or run `python setup.py` for detailed assistance.*
|
README.md
CHANGED
@@ -1,13 +1,119 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
---
|
12 |
|
13 |
-
|
|
|
1 |
+
# AI Diabetic Retinopathy Detection
|
2 |
+
|
3 |
+
An AI-powered application for detecting diabetic retinopathy (DR) from Optical Coherence Tomography (OCT) images using deep learning and Grad-CAM visualization.
|
4 |
+
|
5 |
+
## π₯ What is Diabetic Retinopathy?
|
6 |
+
|
7 |
+
Diabetic retinopathy is a diabetes complication that affects the eyes. It's caused by damage to the blood vessels of the light-sensitive tissue at the back of the eye (retina). Early detection is crucial for preventing vision loss.
|
8 |
+
|
9 |
+
## π Features
|
10 |
+
|
11 |
+
- **AI Classification**: Uses a pre-trained ResNet-50 model to classify OCT images as DR (Diabetic Retinopathy) or NoDR (No Diabetic Retinopathy)
|
12 |
+
- **Grad-CAM Visualization**: Shows which areas of the image the AI focuses on for diagnosis
|
13 |
+
- **Confidence Scoring**: Provides probability scores for predictions
|
14 |
+
- **Image Storage**: Automatically saves analyzed images with timestamps
|
15 |
+
- **Web Interface**: User-friendly Gradio web interface
|
16 |
+
|
17 |
+
## π Requirements
|
18 |
+
|
19 |
+
- Python 3.8 or higher
|
20 |
+
- PyTorch 2.0+
|
21 |
+
- CUDA-compatible GPU (optional, for faster inference)
|
22 |
+
|
23 |
+
## π οΈ Installation
|
24 |
+
|
25 |
+
1. **Clone or download this repository**
|
26 |
+
```bash
|
27 |
+
git clone <repository-url>
|
28 |
+
cd Deep_Learning_for_Ophthalmologist
|
29 |
+
```
|
30 |
+
|
31 |
+
2. **Create a virtual environment (recommended)**
|
32 |
+
```bash
|
33 |
+
python -m venv venv
|
34 |
+
|
35 |
+
# On Windows:
|
36 |
+
venv\Scripts\activate
|
37 |
+
|
38 |
+
# On macOS/Linux:
|
39 |
+
source venv/bin/activate
|
40 |
+
```
|
41 |
+
|
42 |
+
3. **Install dependencies**
|
43 |
+
```bash
|
44 |
+
pip install -r requirements.txt
|
45 |
+
```
|
46 |
+
|
47 |
+
## π― Usage
|
48 |
+
|
49 |
+
1. **Start the application**
|
50 |
+
```bash
|
51 |
+
python app.py
|
52 |
+
```
|
53 |
+
|
54 |
+
2. **Open your web browser** and navigate to the URL shown in the terminal (usually `http://127.0.0.1:7860`)
|
55 |
+
|
56 |
+
3. **Upload an OCT image** by clicking the upload area or dragging and dropping an image file
|
57 |
+
|
58 |
+
4. **View results**:
|
59 |
+
- The AI will analyze the image and show the classification result
|
60 |
+
- A Grad-CAM heatmap will highlight areas of interest
|
61 |
+
- The prediction and confidence score will be displayed
|
62 |
+
- The analyzed image will be automatically saved to the `saved_predictions` folder
|
63 |
+
|
64 |
+
## π File Structure
|
65 |
+
|
66 |
+
```
|
67 |
+
Deep_Learning_for_Ophthalmologist/
|
68 |
+
βββ app.py # Main application file
|
69 |
+
βββ requirements.txt # Python dependencies
|
70 |
+
βββ resnet50_dr_classifier.pth # Pre-trained model weights
|
71 |
+
βββ README.md # This file
|
72 |
+
βββ saved_predictions/ # Folder for saved analyzed images
|
73 |
+
```
|
74 |
+
|
75 |
+
## π¬ How It Works
|
76 |
+
|
77 |
+
1. **Image Preprocessing**: OCT images are resized to 224x224 pixels and normalized
|
78 |
+
2. **AI Analysis**: A ResNet-50 model processes the image to classify DR vs NoDR
|
79 |
+
3. **Grad-CAM**: Generates a heatmap showing which image regions influenced the AI's decision
|
80 |
+
4. **Results**: Displays classification, confidence score, and visual heatmap
|
81 |
+
|
82 |
+
## π Model Information
|
83 |
+
|
84 |
+
- **Architecture**: ResNet-50 with modified final layer for binary classification
|
85 |
+
- **Training**: Pre-trained on OCT image dataset
|
86 |
+
- **Classes**: 2 (DR - Diabetic Retinopathy, NoDR - No Diabetic Retinopathy)
|
87 |
+
- **Input**: 224x224 RGB images
|
88 |
+
- **Output**: Binary classification with confidence scores
|
89 |
+
|
90 |
+
## β οΈ Important Notes
|
91 |
+
|
92 |
+
- **Medical Disclaimer**: This tool is for research and educational purposes only. It should not be used for actual medical diagnosis without proper validation and clinical oversight.
|
93 |
+
- **Image Quality**: For best results, use high-quality OCT images with good contrast and resolution
|
94 |
+
- **Model Limitations**: The model's accuracy depends on the quality and characteristics of the training data
|
95 |
+
|
96 |
+
## π Troubleshooting
|
97 |
+
|
98 |
+
- **CUDA errors**: The app runs on CPU by default. If you have GPU issues, ensure PyTorch is installed correctly
|
99 |
+
- **Memory issues**: Large images may cause memory problems. The app automatically resizes images to 224x224
|
100 |
+
- **Model loading errors**: Ensure `resnet50_dr_classifier.pth` is in the same directory as `app.py`
|
101 |
+
|
102 |
+
## π€ Contributing
|
103 |
+
|
104 |
+
Contributions are welcome! Please feel free to submit issues, feature requests, or pull requests.
|
105 |
+
|
106 |
+
## π License
|
107 |
+
|
108 |
+
This project is for educational and research purposes. Please ensure compliance with relevant regulations when using medical imaging data.
|
109 |
+
|
110 |
+
## π References
|
111 |
+
|
112 |
+
- [PyTorch](https://pytorch.org/)
|
113 |
+
- [Gradio](https://gradio.app/)
|
114 |
+
- [Grad-CAM](https://github.com/jacobgil/pytorch-grad-cam)
|
115 |
+
- [ResNet Paper](https://arxiv.org/abs/1512.03385)
|
116 |
+
|
117 |
---
|
118 |
|
119 |
+
**Note**: This application is designed for research and educational purposes in ophthalmology and medical AI. Always consult with qualified healthcare professionals for actual medical diagnosis and treatment decisions.
|
app.py
CHANGED
@@ -13,7 +13,8 @@ import datetime
|
|
13 |
|
14 |
# Setup
|
15 |
device = torch.device("cpu")
|
16 |
-
|
|
|
17 |
if not os.path.exists(save_dir):
|
18 |
os.makedirs(save_dir)
|
19 |
print("π Folder created:", save_dir)
|
@@ -68,11 +69,14 @@ def predict_retinopathy(image):
|
|
68 |
# Gradio app
|
69 |
gr.Interface(
|
70 |
fn=predict_retinopathy,
|
71 |
-
inputs=gr.Image(type="pil"),
|
72 |
outputs=[
|
73 |
-
gr.Image(type="pil", label="
|
74 |
-
gr.Text(label="
|
75 |
],
|
76 |
-
title="
|
77 |
-
description="
|
78 |
-
|
|
|
|
|
|
|
|
13 |
|
14 |
# Setup
|
15 |
device = torch.device("cpu")
|
16 |
+
# Create save directory in current working directory for cross-platform compatibility
|
17 |
+
save_dir = os.path.join(os.getcwd(), "saved_predictions")
|
18 |
if not os.path.exists(save_dir):
|
19 |
os.makedirs(save_dir)
|
20 |
print("π Folder created:", save_dir)
|
|
|
69 |
# Gradio app
|
70 |
gr.Interface(
|
71 |
fn=predict_retinopathy,
|
72 |
+
inputs=gr.Image(type="pil", label="Upload OCT Image"),
|
73 |
outputs=[
|
74 |
+
gr.Image(type="pil", label="Grad-CAM Heatmap"),
|
75 |
+
gr.Text(label="Diabetic Retinopathy Prediction")
|
76 |
],
|
77 |
+
title="AI Diabetic Retinopathy Detection",
|
78 |
+
description="Upload an OCT image to analyze for diabetic retinopathy. The AI will show a Grad-CAM heatmap highlighting areas of interest.",
|
79 |
+
examples=[
|
80 |
+
["example_oct.jpg"] if os.path.exists("example_oct.jpg") else None
|
81 |
+
]
|
82 |
+
).launch(server_name="0.0.0.0", server_port=7860)
|
batch_process.py
ADDED
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Batch processing script for diabetic retinopathy detection.
|
4 |
+
Processes multiple OCT images and saves results in a structured format.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import os
|
8 |
+
import torch
|
9 |
+
import torch.nn.functional as F
|
10 |
+
import numpy as np
|
11 |
+
from PIL import Image
|
12 |
+
from torchvision import models, transforms
|
13 |
+
from pytorch_grad_cam import GradCAM
|
14 |
+
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
|
15 |
+
from pytorch_grad_cam.utils.image import show_cam_on_image
|
16 |
+
import csv
|
17 |
+
import datetime
|
18 |
+
from pathlib import Path
|
19 |
+
|
20 |
+
class BatchDRDetector:
|
21 |
+
def __init__(self, model_path="resnet50_dr_classifier.pth"):
|
22 |
+
"""Initialize the batch detector with the trained model."""
|
23 |
+
self.device = torch.device("cpu")
|
24 |
+
self.model_path = model_path
|
25 |
+
self.model = None
|
26 |
+
self.cam = None
|
27 |
+
self.transform = None
|
28 |
+
self.output_dir = "batch_results"
|
29 |
+
|
30 |
+
# Create output directory
|
31 |
+
os.makedirs(self.output_dir, exist_ok=True)
|
32 |
+
|
33 |
+
self._load_model()
|
34 |
+
self._setup_gradcam()
|
35 |
+
self._setup_transforms()
|
36 |
+
|
37 |
+
def _load_model(self):
|
38 |
+
"""Load the trained ResNet-50 model."""
|
39 |
+
print("π Loading model...")
|
40 |
+
try:
|
41 |
+
self.model = models.resnet50(weights=None)
|
42 |
+
self.model.fc = torch.nn.Linear(self.model.fc.in_features, 2)
|
43 |
+
self.model.load_state_dict(torch.load(self.model_path, map_location=self.device))
|
44 |
+
self.model.to(self.device)
|
45 |
+
self.model.eval()
|
46 |
+
print("β
Model loaded successfully!")
|
47 |
+
except Exception as e:
|
48 |
+
print(f"β Error loading model: {e}")
|
49 |
+
raise
|
50 |
+
|
51 |
+
def _setup_gradcam(self):
|
52 |
+
"""Setup Grad-CAM for visualization."""
|
53 |
+
target_layer = self.model.layer4[-1]
|
54 |
+
self.cam = GradCAM(model=self.model, target_layers=[target_layer])
|
55 |
+
print("β
Grad-CAM setup complete!")
|
56 |
+
|
57 |
+
def _setup_transforms(self):
|
58 |
+
"""Setup image preprocessing transforms."""
|
59 |
+
self.transform = transforms.Compose([
|
60 |
+
transforms.Resize((224, 224)),
|
61 |
+
transforms.ToTensor(),
|
62 |
+
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
63 |
+
])
|
64 |
+
|
65 |
+
def process_single_image(self, image_path):
|
66 |
+
"""Process a single image and return results."""
|
67 |
+
try:
|
68 |
+
# Load and preprocess image
|
69 |
+
img = Image.open(image_path).convert("RGB")
|
70 |
+
img_tensor = self.transform(img).unsqueeze(0).to(self.device)
|
71 |
+
|
72 |
+
# Get prediction
|
73 |
+
with torch.no_grad():
|
74 |
+
output = self.model(img_tensor)
|
75 |
+
probs = F.softmax(output, dim=1)
|
76 |
+
pred = torch.argmax(probs, dim=1).item()
|
77 |
+
confidence = probs[0][pred].item()
|
78 |
+
|
79 |
+
# Generate Grad-CAM
|
80 |
+
rgb_img_np = np.array(img.resize((224, 224))).astype(np.float32) / 255.0
|
81 |
+
rgb_img_np = np.ascontiguousarray(rgb_img_np)
|
82 |
+
grayscale_cam = self.cam(input_tensor=img_tensor, targets=[ClassifierOutputTarget(pred)])[0]
|
83 |
+
cam_image = show_cam_on_image(rgb_img_np, grayscale_cam, use_rgb=True)
|
84 |
+
|
85 |
+
# Determine label
|
86 |
+
label = "DR" if pred == 0 else "NoDR"
|
87 |
+
|
88 |
+
return {
|
89 |
+
'image_path': image_path,
|
90 |
+
'prediction': label,
|
91 |
+
'confidence': confidence,
|
92 |
+
'dr_probability': 1 - confidence if pred == 1 else confidence,
|
93 |
+
'cam_image': cam_image,
|
94 |
+
'status': 'success'
|
95 |
+
}
|
96 |
+
|
97 |
+
except Exception as e:
|
98 |
+
return {
|
99 |
+
'image_path': image_path,
|
100 |
+
'prediction': 'ERROR',
|
101 |
+
'confidence': 0.0,
|
102 |
+
'dr_probability': 0.0,
|
103 |
+
'cam_image': None,
|
104 |
+
'status': f'error: {str(e)}'
|
105 |
+
}
|
106 |
+
|
107 |
+
def process_directory(self, input_dir, extensions=['.jpg', '.jpeg', '.png', '.tiff', '.bmp']):
|
108 |
+
"""Process all images in a directory."""
|
109 |
+
print(f"π Scanning directory: {input_dir}")
|
110 |
+
|
111 |
+
# Find all image files
|
112 |
+
image_files = []
|
113 |
+
for ext in extensions:
|
114 |
+
image_files.extend(Path(input_dir).glob(f"*{ext}"))
|
115 |
+
image_files.extend(Path(input_dir).glob(f"*{ext.upper()}"))
|
116 |
+
|
117 |
+
if not image_files:
|
118 |
+
print("β No image files found in the directory!")
|
119 |
+
return []
|
120 |
+
|
121 |
+
print(f"π Found {len(image_files)} image files")
|
122 |
+
|
123 |
+
# Process each image
|
124 |
+
results = []
|
125 |
+
for i, image_path in enumerate(image_files, 1):
|
126 |
+
print(f"π Processing {i}/{len(image_files)}: {image_path.name}")
|
127 |
+
result = self.process_single_image(str(image_path))
|
128 |
+
results.append(result)
|
129 |
+
|
130 |
+
# Save Grad-CAM image if successful
|
131 |
+
if result['status'] == 'success' and result['cam_image'] is not None:
|
132 |
+
cam_filename = f"cam_{Path(image_path).stem}_{result['prediction']}_{result['confidence']:.3f}.png"
|
133 |
+
cam_path = os.path.join(self.output_dir, cam_filename)
|
134 |
+
Image.fromarray(result['cam_image']).save(cam_path)
|
135 |
+
result['cam_saved_path'] = cam_path
|
136 |
+
|
137 |
+
return results
|
138 |
+
|
139 |
+
def save_results_csv(self, results, filename=None):
|
140 |
+
"""Save results to a CSV file."""
|
141 |
+
if not filename:
|
142 |
+
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
143 |
+
filename = f"dr_results_{timestamp}.csv"
|
144 |
+
|
145 |
+
csv_path = os.path.join(self.output_dir, filename)
|
146 |
+
|
147 |
+
with open(csv_path, 'w', newline='', encoding='utf-8') as csvfile:
|
148 |
+
fieldnames = ['image_path', 'prediction', 'confidence', 'dr_probability', 'status', 'cam_saved_path']
|
149 |
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
150 |
+
|
151 |
+
writer.writeheader()
|
152 |
+
for result in results:
|
153 |
+
# Clean up the result dict for CSV
|
154 |
+
csv_result = {k: v for k, v in result.items() if k in fieldnames}
|
155 |
+
writer.writerow(csv_result)
|
156 |
+
|
157 |
+
print(f"π Results saved to: {csv_path}")
|
158 |
+
return csv_path
|
159 |
+
|
160 |
+
def generate_summary(self, results):
|
161 |
+
"""Generate a summary of the batch processing results."""
|
162 |
+
successful = [r for r in results if r['status'] == 'success']
|
163 |
+
errors = [r for r in results if r['status'] != 'success']
|
164 |
+
|
165 |
+
if successful:
|
166 |
+
dr_count = len([r for r in successful if r['prediction'] == 'DR'])
|
167 |
+
nodr_count = len([r for r in successful if r['prediction'] == 'NoDR'])
|
168 |
+
|
169 |
+
avg_confidence = np.mean([r['confidence'] for r in successful])
|
170 |
+
avg_dr_prob = np.mean([r['dr_probability'] for r in successful])
|
171 |
+
|
172 |
+
summary = {
|
173 |
+
'total_images': len(results),
|
174 |
+
'successful': len(successful),
|
175 |
+
'errors': len(errors),
|
176 |
+
'dr_detected': dr_count,
|
177 |
+
'no_dr_detected': nodr_count,
|
178 |
+
'dr_percentage': (dr_count / len(successful)) * 100 if successful else 0,
|
179 |
+
'average_confidence': avg_confidence,
|
180 |
+
'average_dr_probability': avg_dr_prob
|
181 |
+
}
|
182 |
+
else:
|
183 |
+
summary = {
|
184 |
+
'total_images': len(results),
|
185 |
+
'successful': 0,
|
186 |
+
'errors': len(errors),
|
187 |
+
'dr_detected': 0,
|
188 |
+
'no_dr_detected': 0,
|
189 |
+
'dr_percentage': 0,
|
190 |
+
'average_confidence': 0,
|
191 |
+
'average_dr_probability': 0
|
192 |
+
}
|
193 |
+
|
194 |
+
return summary
|
195 |
+
|
196 |
+
def main():
|
197 |
+
"""Main function for batch processing."""
|
198 |
+
print("π Diabetic Retinopathy Detection - Batch Processing")
|
199 |
+
print("=" * 60)
|
200 |
+
|
201 |
+
# Check if model exists
|
202 |
+
if not os.path.exists("resnet50_dr_classifier.pth"):
|
203 |
+
print("β Model file 'resnet50_dr_classifier.pth' not found!")
|
204 |
+
print(" Please ensure the model file is in the current directory.")
|
205 |
+
return
|
206 |
+
|
207 |
+
# Initialize detector
|
208 |
+
try:
|
209 |
+
detector = BatchDRDetector()
|
210 |
+
except Exception as e:
|
211 |
+
print(f"β Failed to initialize detector: {e}")
|
212 |
+
return
|
213 |
+
|
214 |
+
# Get input directory from user
|
215 |
+
print("\nπ Enter the path to the directory containing OCT images:")
|
216 |
+
print(" (or press Enter to use current directory)")
|
217 |
+
|
218 |
+
user_input = input("Directory path: ").strip()
|
219 |
+
|
220 |
+
if user_input:
|
221 |
+
input_dir = user_input
|
222 |
+
else:
|
223 |
+
input_dir = os.getcwd()
|
224 |
+
|
225 |
+
if not os.path.exists(input_dir):
|
226 |
+
print(f"β Directory not found: {input_dir}")
|
227 |
+
return
|
228 |
+
|
229 |
+
print(f"\nπ― Processing images from: {input_dir}")
|
230 |
+
|
231 |
+
# Process images
|
232 |
+
results = detector.process_directory(input_dir)
|
233 |
+
|
234 |
+
if not results:
|
235 |
+
print("β No results to process!")
|
236 |
+
return
|
237 |
+
|
238 |
+
# Save results
|
239 |
+
csv_path = detector.save_results_csv(results)
|
240 |
+
|
241 |
+
# Generate and display summary
|
242 |
+
summary = detector.generate_summary(results)
|
243 |
+
|
244 |
+
print("\nπ Batch Processing Summary")
|
245 |
+
print("=" * 40)
|
246 |
+
print(f"Total images: {summary['total_images']}")
|
247 |
+
print(f"Successfully processed: {summary['successful']}")
|
248 |
+
print(f"Errors: {summary['errors']}")
|
249 |
+
|
250 |
+
if summary['successful'] > 0:
|
251 |
+
print(f"DR detected: {summary['dr_detected']} ({summary['dr_percentage']:.1f}%)")
|
252 |
+
print(f"No DR detected: {summary['no_dr_detected']}")
|
253 |
+
print(f"Average confidence: {summary['average_confidence']:.3f}")
|
254 |
+
print(f"Average DR probability: {summary['average_dr_probability']:.3f}")
|
255 |
+
|
256 |
+
print(f"\nπ Results saved to: {detector.output_dir}/")
|
257 |
+
print(f"π CSV report: {csv_path}")
|
258 |
+
|
259 |
+
if __name__ == "__main__":
|
260 |
+
main()
|
config.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Configuration file for Diabetic Retinopathy Detection App
|
4 |
+
Modify these settings to customize the application behavior
|
5 |
+
"""
|
6 |
+
|
7 |
+
import os
|
8 |
+
|
9 |
+
# Model Configuration
|
10 |
+
MODEL_CONFIG = {
|
11 |
+
'model_path': 'resnet50_dr_classifier.pth',
|
12 |
+
'model_architecture': 'resnet50',
|
13 |
+
'num_classes': 2,
|
14 |
+
'input_size': (224, 224),
|
15 |
+
'device': 'cpu' # Change to 'cuda' if you have a GPU
|
16 |
+
}
|
17 |
+
|
18 |
+
# Image Processing Configuration
|
19 |
+
IMAGE_CONFIG = {
|
20 |
+
'supported_formats': ['.jpg', '.jpeg', '.png', '.tiff', '.bmp'],
|
21 |
+
'max_file_size_mb': 50, # Maximum file size in MB
|
22 |
+
'normalization_mean': [0.485, 0.456, 0.406],
|
23 |
+
'normalization_std': [0.229, 0.224, 0.225]
|
24 |
+
}
|
25 |
+
|
26 |
+
# Grad-CAM Configuration
|
27 |
+
GRADCAM_CONFIG = {
|
28 |
+
'target_layer': 'layer4[-1]', # Target layer for visualization
|
29 |
+
'colormap': 'jet', # Colormap for heatmap visualization
|
30 |
+
'alpha': 0.4 # Transparency of the heatmap overlay
|
31 |
+
}
|
32 |
+
|
33 |
+
# Application Configuration
|
34 |
+
APP_CONFIG = {
|
35 |
+
'title': 'AI Diabetic Retinopathy Detection',
|
36 |
+
'description': 'Upload an OCT image to analyze for diabetic retinopathy. The AI will show a Grad-CAM heatmap highlighting areas of interest.',
|
37 |
+
'theme': 'default', # Gradio theme
|
38 |
+
'share': False, # Whether to create a public link
|
39 |
+
'server_name': '127.0.0.1',
|
40 |
+
'server_port': 7860,
|
41 |
+
'debug': False
|
42 |
+
}
|
43 |
+
|
44 |
+
# Output Configuration
|
45 |
+
OUTPUT_CONFIG = {
|
46 |
+
'save_predictions': True,
|
47 |
+
'save_gradcam': True,
|
48 |
+
'output_dir': 'saved_predictions',
|
49 |
+
'batch_output_dir': 'batch_results',
|
50 |
+
'filename_format': '{timestamp}_{label}_{confidence:.3f}.png'
|
51 |
+
}
|
52 |
+
|
53 |
+
# Medical Disclaimer
|
54 |
+
MEDICAL_DISCLAIMER = """
|
55 |
+
β οΈ MEDICAL DISCLAIMER β οΈ
|
56 |
+
|
57 |
+
This tool is for research and educational purposes only.
|
58 |
+
It should not be used for actual medical diagnosis without proper validation and clinical oversight.
|
59 |
+
|
60 |
+
Always consult with qualified healthcare professionals for medical diagnosis and treatment decisions.
|
61 |
+
"""
|
62 |
+
|
63 |
+
# Class Labels
|
64 |
+
CLASS_LABELS = {
|
65 |
+
0: 'DR', # Diabetic Retinopathy
|
66 |
+
1: 'NoDR' # No Diabetic Retinopathy
|
67 |
+
}
|
68 |
+
|
69 |
+
# Confidence Thresholds
|
70 |
+
CONFIDENCE_THRESHOLDS = {
|
71 |
+
'high_confidence': 0.9, # High confidence threshold
|
72 |
+
'medium_confidence': 0.7, # Medium confidence threshold
|
73 |
+
'low_confidence': 0.5 # Low confidence threshold
|
74 |
+
}
|
75 |
+
|
76 |
+
# Logging Configuration
|
77 |
+
LOGGING_CONFIG = {
|
78 |
+
'log_level': 'INFO',
|
79 |
+
'log_file': 'dr_detection.log',
|
80 |
+
'log_format': '%(asctime)s - %(levelname)s - %(message)s'
|
81 |
+
}
|
82 |
+
|
83 |
+
# Performance Configuration
|
84 |
+
PERFORMANCE_CONFIG = {
|
85 |
+
'batch_size': 1, # Batch size for processing
|
86 |
+
'num_workers': 0, # Number of worker processes
|
87 |
+
'pin_memory': False, # Pin memory for faster data transfer
|
88 |
+
'prefetch_factor': 2 # Prefetch factor for data loading
|
89 |
+
}
|
90 |
+
|
91 |
+
def get_model_path():
|
92 |
+
"""Get the model path, checking if it exists."""
|
93 |
+
model_path = MODEL_CONFIG['model_path']
|
94 |
+
if not os.path.exists(model_path):
|
95 |
+
raise FileNotFoundError(f"Model file not found: {model_path}")
|
96 |
+
return model_path
|
97 |
+
|
98 |
+
def get_device():
|
99 |
+
"""Get the device to use for inference."""
|
100 |
+
device = MODEL_CONFIG['device']
|
101 |
+
if device == 'cuda' and not torch.cuda.is_available():
|
102 |
+
print("β οΈ CUDA requested but not available. Falling back to CPU.")
|
103 |
+
return 'cpu'
|
104 |
+
return device
|
105 |
+
|
106 |
+
def get_output_directory():
|
107 |
+
"""Get the output directory, creating it if it doesn't exist."""
|
108 |
+
output_dir = OUTPUT_CONFIG['output_dir']
|
109 |
+
os.makedirs(output_dir, exist_ok=True)
|
110 |
+
return output_dir
|
111 |
+
|
112 |
+
def get_batch_output_directory():
|
113 |
+
"""Get the batch output directory, creating it if it doesn't exist."""
|
114 |
+
batch_dir = OUTPUT_CONFIG['batch_output_dir']
|
115 |
+
os.makedirs(batch_dir, exist_ok=True)
|
116 |
+
return batch_dir
|
117 |
+
|
118 |
+
# Import torch here to avoid circular imports
|
119 |
+
try:
|
120 |
+
import torch
|
121 |
+
except ImportError:
|
122 |
+
print("β οΈ PyTorch not available. Some functions may not work.")
|
123 |
+
torch = None
|
index.html
ADDED
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>AI Diabetic Retinopathy Detection</title>
|
7 |
+
<style>
|
8 |
+
* {
|
9 |
+
margin: 0;
|
10 |
+
padding: 0;
|
11 |
+
box-sizing: border-box;
|
12 |
+
}
|
13 |
+
|
14 |
+
body {
|
15 |
+
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
|
16 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
17 |
+
min-height: 100vh;
|
18 |
+
padding: 20px;
|
19 |
+
}
|
20 |
+
|
21 |
+
.container {
|
22 |
+
max-width: 1200px;
|
23 |
+
margin: 0 auto;
|
24 |
+
background: white;
|
25 |
+
border-radius: 20px;
|
26 |
+
box-shadow: 0 20px 40px rgba(0,0,0,0.1);
|
27 |
+
overflow: hidden;
|
28 |
+
}
|
29 |
+
|
30 |
+
.header {
|
31 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
32 |
+
color: white;
|
33 |
+
padding: 40px;
|
34 |
+
text-align: center;
|
35 |
+
}
|
36 |
+
|
37 |
+
.header h1 {
|
38 |
+
font-size: 2.5rem;
|
39 |
+
margin-bottom: 10px;
|
40 |
+
font-weight: 300;
|
41 |
+
}
|
42 |
+
|
43 |
+
.header p {
|
44 |
+
font-size: 1.1rem;
|
45 |
+
opacity: 0.9;
|
46 |
+
}
|
47 |
+
|
48 |
+
.content {
|
49 |
+
padding: 40px;
|
50 |
+
}
|
51 |
+
|
52 |
+
.demo-section {
|
53 |
+
background: #f8f9fa;
|
54 |
+
border-radius: 15px;
|
55 |
+
padding: 30px;
|
56 |
+
margin-bottom: 30px;
|
57 |
+
text-align: center;
|
58 |
+
}
|
59 |
+
|
60 |
+
.demo-section h2 {
|
61 |
+
color: #333;
|
62 |
+
margin-bottom: 20px;
|
63 |
+
font-size: 1.8rem;
|
64 |
+
}
|
65 |
+
|
66 |
+
.demo-section p {
|
67 |
+
color: #666;
|
68 |
+
margin-bottom: 25px;
|
69 |
+
font-size: 1.1rem;
|
70 |
+
line-height: 1.6;
|
71 |
+
}
|
72 |
+
|
73 |
+
.demo-button {
|
74 |
+
display: inline-block;
|
75 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
76 |
+
color: white;
|
77 |
+
padding: 15px 30px;
|
78 |
+
text-decoration: none;
|
79 |
+
border-radius: 50px;
|
80 |
+
font-size: 1.1rem;
|
81 |
+
font-weight: 500;
|
82 |
+
transition: all 0.3s ease;
|
83 |
+
box-shadow: 0 5px 15px rgba(102, 126, 234, 0.4);
|
84 |
+
}
|
85 |
+
|
86 |
+
.demo-button:hover {
|
87 |
+
transform: translateY(-2px);
|
88 |
+
box-shadow: 0 8px 25px rgba(102, 126, 234, 0.6);
|
89 |
+
}
|
90 |
+
|
91 |
+
.features {
|
92 |
+
display: grid;
|
93 |
+
grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
|
94 |
+
gap: 30px;
|
95 |
+
margin-bottom: 40px;
|
96 |
+
}
|
97 |
+
|
98 |
+
.feature-card {
|
99 |
+
background: white;
|
100 |
+
border-radius: 15px;
|
101 |
+
padding: 30px;
|
102 |
+
text-align: center;
|
103 |
+
box-shadow: 0 10px 30px rgba(0,0,0,0.1);
|
104 |
+
transition: transform 0.3s ease;
|
105 |
+
}
|
106 |
+
|
107 |
+
.feature-card:hover {
|
108 |
+
transform: translateY(-5px);
|
109 |
+
}
|
110 |
+
|
111 |
+
.feature-icon {
|
112 |
+
font-size: 3rem;
|
113 |
+
margin-bottom: 20px;
|
114 |
+
}
|
115 |
+
|
116 |
+
.feature-card h3 {
|
117 |
+
color: #333;
|
118 |
+
margin-bottom: 15px;
|
119 |
+
font-size: 1.4rem;
|
120 |
+
}
|
121 |
+
|
122 |
+
.feature-card p {
|
123 |
+
color: #666;
|
124 |
+
line-height: 1.6;
|
125 |
+
}
|
126 |
+
|
127 |
+
.github-section {
|
128 |
+
background: #24292e;
|
129 |
+
color: white;
|
130 |
+
padding: 40px;
|
131 |
+
text-align: center;
|
132 |
+
border-radius: 15px;
|
133 |
+
}
|
134 |
+
|
135 |
+
.github-section h2 {
|
136 |
+
margin-bottom: 20px;
|
137 |
+
font-size: 1.8rem;
|
138 |
+
}
|
139 |
+
|
140 |
+
.github-section p {
|
141 |
+
margin-bottom: 25px;
|
142 |
+
opacity: 0.9;
|
143 |
+
line-height: 1.6;
|
144 |
+
}
|
145 |
+
|
146 |
+
.github-button {
|
147 |
+
display: inline-block;
|
148 |
+
background: #28a745;
|
149 |
+
color: white;
|
150 |
+
padding: 15px 30px;
|
151 |
+
text-decoration: none;
|
152 |
+
border-radius: 50px;
|
153 |
+
font-size: 1.1rem;
|
154 |
+
font-weight: 500;
|
155 |
+
transition: all 0.3s ease;
|
156 |
+
margin: 0 10px;
|
157 |
+
}
|
158 |
+
|
159 |
+
.github-button:hover {
|
160 |
+
background: #218838;
|
161 |
+
transform: translateY(-2px);
|
162 |
+
}
|
163 |
+
|
164 |
+
.medical-disclaimer {
|
165 |
+
background: #fff3cd;
|
166 |
+
border: 1px solid #ffeaa7;
|
167 |
+
border-radius: 10px;
|
168 |
+
padding: 20px;
|
169 |
+
margin: 30px 0;
|
170 |
+
text-align: center;
|
171 |
+
}
|
172 |
+
|
173 |
+
.medical-disclaimer h3 {
|
174 |
+
color: #856404;
|
175 |
+
margin-bottom: 10px;
|
176 |
+
}
|
177 |
+
|
178 |
+
.medical-disclaimer p {
|
179 |
+
color: #856404;
|
180 |
+
line-height: 1.6;
|
181 |
+
}
|
182 |
+
|
183 |
+
@media (max-width: 768px) {
|
184 |
+
.header h1 {
|
185 |
+
font-size: 2rem;
|
186 |
+
}
|
187 |
+
|
188 |
+
.content {
|
189 |
+
padding: 20px;
|
190 |
+
}
|
191 |
+
|
192 |
+
.features {
|
193 |
+
grid-template-columns: 1fr;
|
194 |
+
}
|
195 |
+
}
|
196 |
+
</style>
|
197 |
+
</head>
|
198 |
+
<body>
|
199 |
+
<div class="container">
|
200 |
+
<div class="header">
|
201 |
+
<h1>π¬ AI Diabetic Retinopathy Detection</h1>
|
202 |
+
<p>Advanced deep learning system for early detection of diabetic retinopathy from OCT images</p>
|
203 |
+
</div>
|
204 |
+
|
205 |
+
<div class="content">
|
206 |
+
<div class="demo-section">
|
207 |
+
<h2>π Live Demo</h2>
|
208 |
+
<p>Experience our AI-powered diabetic retinopathy detection system in action. Upload OCT images and get instant analysis with Grad-CAM visualizations.</p>
|
209 |
+
<a href="http://192.168.18.28:7860" class="demo-button" target="_blank">Launch Live Demo</a>
|
210 |
+
</div>
|
211 |
+
|
212 |
+
<div class="features">
|
213 |
+
<div class="feature-card">
|
214 |
+
<div class="feature-icon">π€</div>
|
215 |
+
<h3>AI-Powered Analysis</h3>
|
216 |
+
<p>State-of-the-art ResNet-50 model trained on extensive OCT image datasets for accurate diabetic retinopathy detection.</p>
|
217 |
+
</div>
|
218 |
+
|
219 |
+
<div class="feature-card">
|
220 |
+
<div class="feature-icon">π₯</div>
|
221 |
+
<h3>Grad-CAM Visualization</h3>
|
222 |
+
<p>Advanced explainable AI technology that highlights the specific areas of OCT images that influence the AI's diagnosis.</p>
|
223 |
+
</div>
|
224 |
+
|
225 |
+
<div class="feature-card">
|
226 |
+
<div class="feature-icon">π</div>
|
227 |
+
<h3>Confidence Scoring</h3>
|
228 |
+
<p>Get detailed confidence scores and probability assessments for each diagnosis, ensuring reliable results.</p>
|
229 |
+
</div>
|
230 |
+
|
231 |
+
<div class="feature-card">
|
232 |
+
<div class="feature-icon">πΎ</div>
|
233 |
+
<h3>Result Storage</h3>
|
234 |
+
<p>Automatically save analyzed images with timestamps, predictions, and confidence scores for future reference.</p>
|
235 |
+
</div>
|
236 |
+
|
237 |
+
<div class="feature-card">
|
238 |
+
<div class="feature-icon">π</div>
|
239 |
+
<h3>Batch Processing</h3>
|
240 |
+
<p>Process multiple OCT images simultaneously with comprehensive CSV reporting and batch analysis capabilities.</p>
|
241 |
+
</div>
|
242 |
+
|
243 |
+
<div class="feature-card">
|
244 |
+
<div class="feature-icon">π</div>
|
245 |
+
<h3>Web Interface</h3>
|
246 |
+
<p>User-friendly Gradio web interface accessible from any device with a modern web browser.</p>
|
247 |
+
</div>
|
248 |
+
</div>
|
249 |
+
|
250 |
+
<div class="medical-disclaimer">
|
251 |
+
<h3>β οΈ Medical Disclaimer</h3>
|
252 |
+
<p>This tool is for research and educational purposes only. It should not be used for actual medical diagnosis without proper validation and clinical oversight. Always consult with qualified healthcare professionals for medical diagnosis and treatment decisions.</p>
|
253 |
+
</div>
|
254 |
+
|
255 |
+
<div class="github-section">
|
256 |
+
<h2>π Open Source Project</h2>
|
257 |
+
<p>This project is completely open source and available on GitHub. Contribute, fork, or star the repository to support the development of AI-powered medical imaging tools.</p>
|
258 |
+
<a href="https://github.com/yourusername/Deep_Learning_for_Ophthalmologist" class="github-button" target="_blank">View on GitHub</a>
|
259 |
+
<a href="https://github.com/yourusername/Deep_Learning_for_Ophthalmologist/fork" class="github-button" target="_blank">Fork Project</a>
|
260 |
+
</div>
|
261 |
+
</div>
|
262 |
+
</div>
|
263 |
+
</body>
|
264 |
+
</html>
|
requirements.txt
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
-
torch
|
2 |
-
torchvision
|
3 |
-
pillow
|
4 |
-
numpy
|
5 |
-
gradio
|
6 |
-
|
|
|
7 |
|
|
|
1 |
+
torch>=2.0.0
|
2 |
+
torchvision>=0.15.0
|
3 |
+
pillow>=9.0.0
|
4 |
+
numpy>=1.26.0
|
5 |
+
gradio>=4.0.0
|
6 |
+
pytorch-grad-cam
|
7 |
+
opencv-python>=4.8.0
|
8 |
|
run_app.bat
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@echo off
|
2 |
+
echo ========================================
|
3 |
+
echo Diabetic Retinopathy Detection App
|
4 |
+
echo ========================================
|
5 |
+
echo.
|
6 |
+
|
7 |
+
REM Check if Python is available
|
8 |
+
python --version >nul 2>&1
|
9 |
+
if errorlevel 1 (
|
10 |
+
echo ERROR: Python is not installed or not in PATH
|
11 |
+
echo Please install Python 3.8+ and try again
|
12 |
+
pause
|
13 |
+
exit /b 1
|
14 |
+
)
|
15 |
+
|
16 |
+
REM Check if virtual environment exists
|
17 |
+
if exist "venv\Scripts\activate.bat" (
|
18 |
+
echo Activating virtual environment...
|
19 |
+
call venv\Scripts\activate.bat
|
20 |
+
) else (
|
21 |
+
echo No virtual environment found. Using system Python.
|
22 |
+
echo.
|
23 |
+
)
|
24 |
+
|
25 |
+
REM Check if model file exists
|
26 |
+
if not exist "resnet50_dr_classifier.pth" (
|
27 |
+
echo ERROR: Model file 'resnet50_dr_classifier.pth' not found!
|
28 |
+
echo Please ensure the model file is in the current directory.
|
29 |
+
pause
|
30 |
+
exit /b 1
|
31 |
+
)
|
32 |
+
|
33 |
+
echo Starting the application...
|
34 |
+
echo.
|
35 |
+
echo The app will open in your web browser at: http://127.0.0.1:7860
|
36 |
+
echo.
|
37 |
+
echo Press Ctrl+C to stop the application
|
38 |
+
echo.
|
39 |
+
|
40 |
+
REM Start the application
|
41 |
+
python app.py
|
42 |
+
|
43 |
+
pause
|
run_app.ps1
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env pwsh
|
2 |
+
<#
|
3 |
+
.SYNOPSIS
|
4 |
+
Launcher script for Diabetic Retinopathy Detection App
|
5 |
+
|
6 |
+
.DESCRIPTION
|
7 |
+
This script checks prerequisites and launches the AI-powered
|
8 |
+
diabetic retinopathy detection application.
|
9 |
+
|
10 |
+
.PARAMETER Setup
|
11 |
+
Run setup instead of launching the app
|
12 |
+
|
13 |
+
.PARAMETER Test
|
14 |
+
Run tests instead of launching the app
|
15 |
+
|
16 |
+
.PARAMETER Batch
|
17 |
+
Launch batch processing instead of the web app
|
18 |
+
#>
|
19 |
+
|
20 |
+
param(
|
21 |
+
[switch]$Setup,
|
22 |
+
[switch]$Test,
|
23 |
+
[switch]$Batch
|
24 |
+
)
|
25 |
+
|
26 |
+
# Set console title
|
27 |
+
$Host.UI.RawUI.WindowTitle = "Diabetic Retinopathy Detection App"
|
28 |
+
|
29 |
+
# Function to write colored output
|
30 |
+
function Write-ColorOutput {
|
31 |
+
param(
|
32 |
+
[string]$Message,
|
33 |
+
[string]$Color = "White"
|
34 |
+
)
|
35 |
+
Write-Host $Message -ForegroundColor $Color
|
36 |
+
}
|
37 |
+
|
38 |
+
# Function to check Python installation
|
39 |
+
function Test-PythonInstallation {
|
40 |
+
try {
|
41 |
+
$pythonVersion = python --version 2>&1
|
42 |
+
if ($LASTEXITCODE -eq 0) {
|
43 |
+
Write-ColorOutput "β
Python found: $pythonVersion" "Green"
|
44 |
+
return $true
|
45 |
+
}
|
46 |
+
}
|
47 |
+
catch {
|
48 |
+
Write-ColorOutput "β Python not found in PATH" "Red"
|
49 |
+
return $false
|
50 |
+
}
|
51 |
+
return $false
|
52 |
+
}
|
53 |
+
|
54 |
+
# Function to check model file
|
55 |
+
function Test-ModelFile {
|
56 |
+
$modelPath = "resnet50_dr_classifier.pth"
|
57 |
+
if (Test-Path $modelPath) {
|
58 |
+
$fileSize = (Get-Item $modelPath).Length / 1MB
|
59 |
+
Write-ColorOutput "β
Model file found: $modelPath ($([math]::Round($fileSize, 2)) MB)" "Green"
|
60 |
+
return $true
|
61 |
+
}
|
62 |
+
else {
|
63 |
+
Write-ColorOutput "β Model file not found: $modelPath" "Red"
|
64 |
+
return $false
|
65 |
+
}
|
66 |
+
}
|
67 |
+
|
68 |
+
# Function to activate virtual environment
|
69 |
+
function Activate-VirtualEnvironment {
|
70 |
+
$venvPath = "venv\Scripts\Activate.ps1"
|
71 |
+
if (Test-Path $venvPath) {
|
72 |
+
Write-ColorOutput "π Activating virtual environment..." "Yellow"
|
73 |
+
& $venvPath
|
74 |
+
return $true
|
75 |
+
}
|
76 |
+
else {
|
77 |
+
Write-ColorOutput "βΉοΈ No virtual environment found. Using system Python." "Cyan"
|
78 |
+
return $false
|
79 |
+
}
|
80 |
+
}
|
81 |
+
|
82 |
+
# Function to run setup
|
83 |
+
function Start-Setup {
|
84 |
+
Write-ColorOutput "π Running setup..." "Yellow"
|
85 |
+
if (Test-PythonInstallation) {
|
86 |
+
python setup.py
|
87 |
+
}
|
88 |
+
else {
|
89 |
+
Write-ColorOutput "β Cannot run setup without Python" "Red"
|
90 |
+
}
|
91 |
+
}
|
92 |
+
|
93 |
+
# Function to run tests
|
94 |
+
function Start-Tests {
|
95 |
+
Write-ColorOutput "π§ͺ Running tests..." "Yellow"
|
96 |
+
if (Test-PythonInstallation) {
|
97 |
+
python test_model.py
|
98 |
+
}
|
99 |
+
else {
|
100 |
+
Write-ColorOutput "β Cannot run tests without Python" "Red"
|
101 |
+
}
|
102 |
+
}
|
103 |
+
|
104 |
+
# Function to start batch processing
|
105 |
+
function Start-BatchProcessing {
|
106 |
+
Write-ColorOutput "π Starting batch processing..." "Yellow"
|
107 |
+
if (Test-PythonInstallation) {
|
108 |
+
python batch_process.py
|
109 |
+
}
|
110 |
+
else {
|
111 |
+
Write-ColorOutput "β Cannot start batch processing without Python" "Red"
|
112 |
+
}
|
113 |
+
}
|
114 |
+
|
115 |
+
# Function to start web app
|
116 |
+
function Start-WebApp {
|
117 |
+
Write-ColorOutput "π Starting web application..." "Yellow"
|
118 |
+
if (Test-PythonInstallation) {
|
119 |
+
python app.py
|
120 |
+
}
|
121 |
+
else {
|
122 |
+
Write-ColorOutput "β Cannot start web app without Python" "Red"
|
123 |
+
}
|
124 |
+
}
|
125 |
+
|
126 |
+
# Main execution
|
127 |
+
Clear-Host
|
128 |
+
Write-ColorOutput "========================================" "Cyan"
|
129 |
+
Write-ColorOutput " Diabetic Retinopathy Detection App" "Cyan"
|
130 |
+
Write-ColorOutput "========================================" "Cyan"
|
131 |
+
Write-Host ""
|
132 |
+
|
133 |
+
# Check Python installation
|
134 |
+
if (-not (Test-PythonInstallation)) {
|
135 |
+
Write-ColorOutput "β Python is not installed or not in PATH" "Red"
|
136 |
+
Write-ColorOutput "Please install Python 3.8+ and try again" "Yellow"
|
137 |
+
Read-Host "Press Enter to exit"
|
138 |
+
exit 1
|
139 |
+
}
|
140 |
+
|
141 |
+
# Check model file
|
142 |
+
if (-not (Test-ModelFile)) {
|
143 |
+
Write-ColorOutput "β Model file is missing!" "Red"
|
144 |
+
Write-ColorOutput "Please ensure the model file is in the current directory." "Yellow"
|
145 |
+
Read-Host "Press Enter to exit"
|
146 |
+
exit 1
|
147 |
+
}
|
148 |
+
|
149 |
+
# Activate virtual environment if available
|
150 |
+
Activate-VirtualEnvironment
|
151 |
+
|
152 |
+
Write-Host ""
|
153 |
+
|
154 |
+
# Determine what to run based on parameters
|
155 |
+
if ($Setup) {
|
156 |
+
Start-Setup
|
157 |
+
}
|
158 |
+
elseif ($Test) {
|
159 |
+
Start-Tests
|
160 |
+
}
|
161 |
+
elseif ($Batch) {
|
162 |
+
Start-BatchProcessing
|
163 |
+
}
|
164 |
+
else {
|
165 |
+
# Default: start web app
|
166 |
+
Write-ColorOutput "π― Starting web application..." "Green"
|
167 |
+
Write-ColorOutput "The app will open in your web browser at: http://127.0.0.1:7860" "Cyan"
|
168 |
+
Write-ColorOutput "Press Ctrl+C to stop the application" "Yellow"
|
169 |
+
Write-Host ""
|
170 |
+
|
171 |
+
Start-WebApp
|
172 |
+
}
|
173 |
+
|
174 |
+
Write-Host ""
|
175 |
+
Read-Host "Press Enter to exit"
|
setup.py
ADDED
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Setup script for Diabetic Retinopathy Detection App
|
4 |
+
Helps with installation and environment setup
|
5 |
+
"""
|
6 |
+
|
7 |
+
import subprocess
|
8 |
+
import sys
|
9 |
+
import os
|
10 |
+
import platform
|
11 |
+
|
12 |
+
def check_python_version():
|
13 |
+
"""Check if Python version is compatible."""
|
14 |
+
print("π Checking Python version...")
|
15 |
+
version = sys.version_info
|
16 |
+
if version.major < 3 or (version.major == 3 and version.minor < 8):
|
17 |
+
print(f"β Python {version.major}.{version.minor} detected. Python 3.8+ is required.")
|
18 |
+
return False
|
19 |
+
print(f"β
Python {version.major}.{version.minor}.{version.micro} - Compatible!")
|
20 |
+
return True
|
21 |
+
|
22 |
+
def check_pip():
|
23 |
+
"""Check if pip is available."""
|
24 |
+
print("π¦ Checking pip...")
|
25 |
+
try:
|
26 |
+
subprocess.run([sys.executable, "-m", "pip", "--version"],
|
27 |
+
check=True, capture_output=True)
|
28 |
+
print("β
pip is available!")
|
29 |
+
return True
|
30 |
+
except subprocess.CalledProcessError:
|
31 |
+
print("β pip not found. Please install pip first.")
|
32 |
+
return False
|
33 |
+
|
34 |
+
def upgrade_pip():
|
35 |
+
"""Upgrade pip to latest version."""
|
36 |
+
print("π Upgrading pip...")
|
37 |
+
try:
|
38 |
+
subprocess.run([sys.executable, "-m", "pip", "install", "--upgrade", "pip"],
|
39 |
+
check=True)
|
40 |
+
print("β
pip upgraded successfully!")
|
41 |
+
return True
|
42 |
+
except subprocess.CalledProcessError as e:
|
43 |
+
print(f"β οΈ Warning: Could not upgrade pip: {e}")
|
44 |
+
return False
|
45 |
+
|
46 |
+
def install_requirements():
|
47 |
+
"""Install required packages from requirements.txt."""
|
48 |
+
print("π₯ Installing required packages...")
|
49 |
+
|
50 |
+
if not os.path.exists("requirements.txt"):
|
51 |
+
print("β requirements.txt not found!")
|
52 |
+
return False
|
53 |
+
|
54 |
+
try:
|
55 |
+
subprocess.run([sys.executable, "-m", "pip", "install", "-r", "requirements.txt"],
|
56 |
+
check=True)
|
57 |
+
print("β
All packages installed successfully!")
|
58 |
+
return True
|
59 |
+
except subprocess.CalledProcessError as e:
|
60 |
+
print(f"β Error installing packages: {e}")
|
61 |
+
return False
|
62 |
+
|
63 |
+
def check_model_file():
|
64 |
+
"""Check if the model file exists."""
|
65 |
+
print("π€ Checking model file...")
|
66 |
+
if os.path.exists("resnet50_dr_classifier.pth"):
|
67 |
+
file_size = os.path.getsize("resnet50_dr_classifier.pth") / (1024 * 1024)
|
68 |
+
print(f"β
Model file found: resnet50_dr_classifier.pth ({file_size:.2f} MB)")
|
69 |
+
return True
|
70 |
+
else:
|
71 |
+
print("β Model file 'resnet50_dr_classifier.pth' not found!")
|
72 |
+
print(" Please ensure the model file is in the current directory.")
|
73 |
+
return False
|
74 |
+
|
75 |
+
def create_directories():
|
76 |
+
"""Create necessary directories."""
|
77 |
+
print("π Creating directories...")
|
78 |
+
|
79 |
+
directories = ["saved_predictions", "batch_results"]
|
80 |
+
|
81 |
+
for directory in directories:
|
82 |
+
if not os.path.exists(directory):
|
83 |
+
os.makedirs(directory)
|
84 |
+
print(f" β
Created: {directory}/")
|
85 |
+
else:
|
86 |
+
print(f" π Exists: {directory}/")
|
87 |
+
|
88 |
+
def test_imports():
|
89 |
+
"""Test if all required packages can be imported."""
|
90 |
+
print("π§ͺ Testing imports...")
|
91 |
+
|
92 |
+
required_packages = [
|
93 |
+
("torch", "PyTorch"),
|
94 |
+
("torchvision", "TorchVision"),
|
95 |
+
("PIL", "Pillow"),
|
96 |
+
("numpy", "NumPy"),
|
97 |
+
("gradio", "Gradio"),
|
98 |
+
("pytorch_grad_cam", "PyTorch Grad-CAM")
|
99 |
+
]
|
100 |
+
|
101 |
+
all_good = True
|
102 |
+
|
103 |
+
for package, name in required_packages:
|
104 |
+
try:
|
105 |
+
if package == "PIL":
|
106 |
+
import PIL
|
107 |
+
print(f" β
{name} imported successfully")
|
108 |
+
else:
|
109 |
+
__import__(package)
|
110 |
+
print(f" β
{name} imported successfully")
|
111 |
+
except ImportError:
|
112 |
+
print(f" β {name} import failed")
|
113 |
+
all_good = False
|
114 |
+
|
115 |
+
return all_good
|
116 |
+
|
117 |
+
def run_test():
|
118 |
+
"""Run the test script to verify everything works."""
|
119 |
+
print("π§ͺ Running model test...")
|
120 |
+
try:
|
121 |
+
result = subprocess.run([sys.executable, "test_model.py"],
|
122 |
+
capture_output=True, text=True)
|
123 |
+
if result.returncode == 0:
|
124 |
+
print("β
Model test passed!")
|
125 |
+
return True
|
126 |
+
else:
|
127 |
+
print("β Model test failed!")
|
128 |
+
print("Output:", result.stdout)
|
129 |
+
print("Errors:", result.stderr)
|
130 |
+
return False
|
131 |
+
except Exception as e:
|
132 |
+
print(f"β Could not run test: {e}")
|
133 |
+
return False
|
134 |
+
|
135 |
+
def main():
|
136 |
+
"""Main setup function."""
|
137 |
+
print("π Diabetic Retinopathy Detection - Setup")
|
138 |
+
print("=" * 50)
|
139 |
+
|
140 |
+
# Check system info
|
141 |
+
print(f"π» System: {platform.system()} {platform.release()}")
|
142 |
+
print(f"π Python: {sys.executable}")
|
143 |
+
print()
|
144 |
+
|
145 |
+
# Step 1: Check Python version
|
146 |
+
if not check_python_version():
|
147 |
+
return False
|
148 |
+
|
149 |
+
# Step 2: Check pip
|
150 |
+
if not check_pip():
|
151 |
+
return False
|
152 |
+
|
153 |
+
# Step 3: Upgrade pip
|
154 |
+
upgrade_pip()
|
155 |
+
|
156 |
+
# Step 4: Install requirements
|
157 |
+
if not install_requirements():
|
158 |
+
return False
|
159 |
+
|
160 |
+
# Step 5: Check model file
|
161 |
+
if not check_model_file():
|
162 |
+
return False
|
163 |
+
|
164 |
+
# Step 6: Create directories
|
165 |
+
create_directories()
|
166 |
+
|
167 |
+
# Step 7: Test imports
|
168 |
+
if not test_imports():
|
169 |
+
print("β Some packages failed to import. Please check the installation.")
|
170 |
+
return False
|
171 |
+
|
172 |
+
# Step 8: Run test
|
173 |
+
if not run_test():
|
174 |
+
print("β Model test failed. Please check the model file and installation.")
|
175 |
+
return False
|
176 |
+
|
177 |
+
print("\nπ Setup completed successfully!")
|
178 |
+
print("\nπ Next steps:")
|
179 |
+
print(" 1. Run the web app: python app.py")
|
180 |
+
print(" 2. Open your browser to the URL shown in the terminal")
|
181 |
+
print(" 3. Upload OCT images for analysis")
|
182 |
+
print("\nπ For batch processing:")
|
183 |
+
print(" python batch_process.py")
|
184 |
+
|
185 |
+
return True
|
186 |
+
|
187 |
+
if __name__ == "__main__":
|
188 |
+
success = main()
|
189 |
+
if not success:
|
190 |
+
print("\nβ Setup failed. Please check the error messages above.")
|
191 |
+
sys.exit(1)
|
test_model.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Test script to verify the diabetic retinopathy detection model can be loaded correctly.
|
4 |
+
Run this before starting the main app to check for any issues.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import torch.nn as nn
|
9 |
+
from torchvision import models
|
10 |
+
import os
|
11 |
+
|
12 |
+
def test_model_loading():
|
13 |
+
"""Test if the model can be loaded successfully."""
|
14 |
+
print("π Testing model loading...")
|
15 |
+
|
16 |
+
try:
|
17 |
+
# Check if model file exists
|
18 |
+
model_path = "resnet50_dr_classifier.pth"
|
19 |
+
if not os.path.exists(model_path):
|
20 |
+
print(f"β Error: Model file '{model_path}' not found!")
|
21 |
+
print(" Please ensure the model file is in the current directory.")
|
22 |
+
return False
|
23 |
+
|
24 |
+
print(f"β
Model file found: {model_path}")
|
25 |
+
|
26 |
+
# Check file size
|
27 |
+
file_size = os.path.getsize(model_path) / (1024 * 1024) # MB
|
28 |
+
print(f"π Model file size: {file_size:.2f} MB")
|
29 |
+
|
30 |
+
# Try to load the model
|
31 |
+
device = torch.device("cpu")
|
32 |
+
print("π Loading model...")
|
33 |
+
|
34 |
+
model = models.resnet50(weights=None)
|
35 |
+
model.fc = nn.Linear(model.fc.in_features, 2)
|
36 |
+
|
37 |
+
# Load state dict
|
38 |
+
state_dict = torch.load(model_path, map_location=device)
|
39 |
+
model.load_state_dict(state_dict)
|
40 |
+
model.to(device)
|
41 |
+
model.eval()
|
42 |
+
|
43 |
+
print("β
Model loaded successfully!")
|
44 |
+
|
45 |
+
# Test with dummy input
|
46 |
+
print("π§ͺ Testing with dummy input...")
|
47 |
+
dummy_input = torch.randn(1, 3, 224, 224)
|
48 |
+
|
49 |
+
with torch.no_grad():
|
50 |
+
output = model(dummy_input)
|
51 |
+
probs = torch.nn.functional.softmax(output, dim=1)
|
52 |
+
pred = torch.argmax(probs, dim=1).item()
|
53 |
+
confidence = probs[0][pred].item()
|
54 |
+
|
55 |
+
print(f"β
Model inference successful!")
|
56 |
+
print(f" Prediction: {'DR' if pred == 0 else 'NoDR'}")
|
57 |
+
print(f" Confidence: {confidence:.4f}")
|
58 |
+
|
59 |
+
return True
|
60 |
+
|
61 |
+
except Exception as e:
|
62 |
+
print(f"β Error loading model: {str(e)}")
|
63 |
+
return False
|
64 |
+
|
65 |
+
def test_dependencies():
|
66 |
+
"""Test if required packages are available."""
|
67 |
+
print("\nπ Testing dependencies...")
|
68 |
+
|
69 |
+
try:
|
70 |
+
import gradio
|
71 |
+
print("β
Gradio imported successfully")
|
72 |
+
except ImportError:
|
73 |
+
print("β Gradio not found. Install with: pip install gradio")
|
74 |
+
return False
|
75 |
+
|
76 |
+
try:
|
77 |
+
import PIL
|
78 |
+
print("β
PIL/Pillow imported successfully")
|
79 |
+
except ImportError:
|
80 |
+
print("β PIL/Pillow not found. Install with: pip install pillow")
|
81 |
+
return False
|
82 |
+
|
83 |
+
try:
|
84 |
+
import numpy
|
85 |
+
print("β
NumPy imported successfully")
|
86 |
+
except ImportError:
|
87 |
+
print("β NumPy not found. Install with: pip install numpy")
|
88 |
+
return False
|
89 |
+
|
90 |
+
try:
|
91 |
+
from pytorch_grad_cam import GradCAM
|
92 |
+
print("β
PyTorch Grad-CAM imported successfully")
|
93 |
+
except ImportError:
|
94 |
+
print("β PyTorch Grad-CAM not found. Install with: pip install pytorch-grad-cam")
|
95 |
+
return False
|
96 |
+
|
97 |
+
return True
|
98 |
+
|
99 |
+
def main():
|
100 |
+
"""Main test function."""
|
101 |
+
print("π Diabetic Retinopathy Detection - Model Test")
|
102 |
+
print("=" * 50)
|
103 |
+
|
104 |
+
# Test dependencies
|
105 |
+
deps_ok = test_dependencies()
|
106 |
+
|
107 |
+
if not deps_ok:
|
108 |
+
print("\nβ Dependency test failed. Please install missing packages.")
|
109 |
+
return
|
110 |
+
|
111 |
+
# Test model loading
|
112 |
+
model_ok = test_model_loading()
|
113 |
+
|
114 |
+
print("\n" + "=" * 50)
|
115 |
+
if model_ok:
|
116 |
+
print("π All tests passed! The app should work correctly.")
|
117 |
+
print(" You can now run: python app.py")
|
118 |
+
else:
|
119 |
+
print("β Model test failed. Please check the error messages above.")
|
120 |
+
print(" Make sure the model file is correct and all dependencies are installed.")
|
121 |
+
|
122 |
+
if __name__ == "__main__":
|
123 |
+
main()
|