File size: 205,665 Bytes
082d9d1
 
 
 
 
ac157c8
 
 
a2a2a54
 
 
 
e2d3712
 
 
 
2872dcd
 
9fb4088
 
0182706
 
f695033
567736c
cd3e4fd
78fc423
f7c3e69
 
7eadac9
f168ae4
082d9d1
f4f6452
 
 
 
 
 
5d2d97e
 
 
586f5a7
 
f4f6452
 
50d1f17
 
 
 
 
082d9d1
148b186
082d9d1
e2d3712
8496c04
 
 
 
 
e2d3712
 
8496c04
 
e2d3712
e287280
 
082d9d1
 
d17644b
082d9d1
38d659c
 
 
8e42224
 
 
 
 
 
38d659c
8e42224
 
 
 
38d659c
 
 
 
 
4507a25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e95ecad
3ccd7b9
e95ecad
3ccd7b9
 
 
 
 
 
 
 
 
e95ecad
3ccd7b9
e95ecad
3ccd7b9
 
 
e95ecad
3ccd7b9
 
 
 
 
 
 
 
e95ecad
3ccd7b9
e95ecad
 
 
 
3ccd7b9
 
 
e95ecad
3ccd7b9
 
 
 
 
 
 
 
 
 
 
 
e95ecad
3ccd7b9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e95ecad
3ccd7b9
4507a25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d17644b
f4f6452
78fc423
38d659c
 
 
 
 
 
 
 
50d1f17
38d659c
78fc423
e2d3712
8496c04
 
 
 
e2d3712
8496c04
e2d3712
 
8496c04
 
e2d3712
78fc423
 
 
 
d17644b
78fc423
f4f6452
 
d17644b
f4f6452
50d1f17
e512265
50d1f17
 
 
e512265
 
 
 
 
 
 
 
 
 
 
 
50d1f17
 
 
 
 
 
 
 
 
 
e512265
 
50d1f17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e512265
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50d1f17
 
 
 
 
 
 
 
 
de52671
 
 
 
 
 
e512265
 
 
 
 
 
de52671
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e512265
de52671
 
 
 
 
 
 
 
 
 
e512265
de52671
 
 
 
 
 
 
 
 
 
 
 
 
 
e512265
de52671
 
 
 
 
 
 
 
 
e512265
 
 
 
 
 
 
 
 
 
 
de52671
 
d142097
 
c3b334f
 
 
 
 
d142097
 
 
 
 
 
 
 
 
e287280
 
 
 
 
f4191a0
 
 
 
 
d347f16
 
 
 
 
82a5d4c
 
 
 
 
7564378
0182706
 
6d86572
0182706
 
38d659c
 
 
 
 
7564378
 
 
 
7eadac9
 
be9e853
948839e
 
6c04625
 
3b8474b
6c04625
 
e4bd6c5
 
 
 
 
b250a34
5d51637
 
 
 
 
 
 
 
 
 
b250a34
 
0e5a693
 
fb4401e
 
 
 
 
531521f
 
 
 
 
01fa717
f2cd5ba
 
 
 
 
55971a2
 
 
 
f168ae4
 
 
 
 
0ec71f5
f777467
 
 
 
 
 
 
 
 
 
0ec71f5
 
 
 
 
 
 
 
 
fe5a100
 
 
 
 
8f11b16
 
 
 
 
d142097
 
 
273e79c
328b6bb
273e79c
 
 
 
 
 
 
 
082d9d1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e287280
 
 
 
 
 
 
 
a2a2a54
 
 
 
e2d3712
 
 
 
50d1f17
 
 
 
 
 
 
 
4507a25
 
 
 
3ccd7b9
 
 
 
082d9d1
 
 
 
2872dcd
f012c97
 
71f1c5d
f012c97
 
fb4401e
 
 
 
 
 
531521f
 
 
 
 
 
f2cd5ba
 
 
 
 
 
fe5a100
80eb86a
fe5a100
80eb86a
 
fe5a100
8f11b16
 
 
 
 
 
55971a2
 
 
 
 
 
f168ae4
 
 
f777467
 
 
 
 
 
 
 
 
 
 
 
0ec71f5
85b3812
0ec71f5
4acb12d
fb4401e
bbf54e8
e4bd6c5
 
006e9c8
 
e4bd6c5
 
0e5a693
1c798d5
3b8474b
 
71f1c5d
 
 
 
 
082d9d1
2872dcd
 
 
 
78fc423
 
 
 
 
 
 
 
 
 
082d9d1
 
 
e287280
 
 
 
 
 
 
 
 
 
 
082d9d1
 
 
 
 
 
 
e287280
 
 
 
 
 
 
 
 
 
082d9d1
 
78fc423
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
082d9d1
 
 
 
 
 
 
 
 
 
 
f6f53e6
4b7d538
f6f53e6
80eb86a
 
 
 
 
 
 
 
082d9d1
 
80eb86a
 
 
 
 
 
 
 
722a65e
 
 
f6f53e6
 
4b7d538
f6f53e6
082d9d1
 
148b186
 
80eb86a
 
 
 
 
 
 
4b8c1ed
 
 
 
 
 
 
 
 
 
 
 
 
 
4507a25
 
 
 
 
 
 
 
de2e4ad
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4507a25
 
de2e4ad
 
 
 
 
4507a25
 
de2e4ad
 
 
 
 
4507a25
 
de2e4ad
 
 
 
 
783914a
 
 
 
de2e4ad
 
 
 
783914a
 
 
 
 
 
4507a25
de2e4ad
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4507a25
 
 
 
 
 
 
 
 
 
 
 
 
e2ce9d8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3ccd7b9
 
 
 
e95ecad
3ccd7b9
 
 
 
3ea1b01
 
 
 
e95ecad
 
3ea1b01
 
e95ecad
 
3ea1b01
 
 
 
 
 
 
 
 
 
 
 
 
3ccd7b9
 
 
 
 
 
 
 
 
 
 
 
082d9d1
e0eaf95
082d9d1
 
e2d3712
082d9d1
f38f0e9
 
 
7564378
38d659c
 
f38f0e9
e287280
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18dcd5e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0811b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18dcd5e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f8f29e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0811b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f8f29e0
 
d0811b3
 
 
 
 
 
e287280
d0811b3
 
 
 
 
e287280
 
d0811b3
 
 
e287280
eb9b233
 
50d1f17
eb9b233
50d1f17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eb9b233
50d1f17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eb9b233
 
50d1f17
eb9b233
50d1f17
eb9b233
50d1f17
de52671
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78fc423
87df3aa
78fc423
 
f9931c4
78fc423
 
 
 
f9931c4
78fc423
87df3aa
f9931c4
78fc423
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f9931c4
78fc423
f9931c4
78fc423
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
082d9d1
38d659c
 
 
082d9d1
 
 
 
5d2d97e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
586f5a7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
082d9d1
 
60cb489
 
 
 
 
 
 
 
 
 
 
 
082d9d1
 
60cb489
 
 
 
 
082d9d1
60cb489
082d9d1
 
 
a2a2a54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ac157c8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a2a2a54
 
ac157c8
 
 
 
 
e2d3712
8496c04
e2d3712
 
 
 
 
 
 
 
 
 
a842cd5
e2d3712
a842cd5
 
 
 
 
 
 
 
 
 
 
 
e2d3712
 
a842cd5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e2d3712
8496c04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e2d3712
 
 
 
 
 
 
 
 
8496c04
e2d3712
 
 
 
 
 
 
 
 
 
 
 
 
8496c04
e2d3712
 
 
 
 
 
 
 
 
 
8496c04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e2d3712
 
 
 
 
 
8496c04
e2d3712
8496c04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e2d3712
 
 
 
8496c04
 
 
 
 
e2d3712
8496c04
 
e2d3712
8496c04
 
 
 
 
e2d3712
8496c04
 
 
 
 
 
 
 
 
 
 
 
 
 
e2d3712
 
 
 
8496c04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e2d3712
 
 
 
a842cd5
 
 
 
 
 
 
 
 
 
 
 
 
e2d3712
 
 
 
 
0182706
 
 
 
d0811b3
e0eaf95
 
 
 
f012c97
 
 
 
 
eb9b233
 
f012c97
 
 
eb9b233
 
 
 
 
 
de52671
 
 
3ccd7b9
 
eb9b233
f012c97
50d1f17
eb9b233
 
de52671
 
3ccd7b9
 
de52671
 
50d1f17
f4f6452
 
 
4507a25
 
3ccd7b9
 
f4f6452
 
f012c97
78fc423
f012c97
ac157c8
 
 
 
 
 
 
f012c97
e2d3712
 
 
 
 
 
 
 
a842cd5
 
 
 
 
 
 
 
 
 
 
f012c97
78fc423
 
f012c97
6d86572
 
0182706
 
 
 
 
 
6d86572
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d2d97e
 
 
 
 
 
0182706
6d86572
0182706
5d2d97e
0182706
6d86572
0182706
6d86572
0182706
 
18dcd5e
d0811b3
 
 
 
 
 
 
 
 
18dcd5e
 
0182706
 
 
 
18dcd5e
 
 
 
 
 
 
 
0182706
 
 
 
e2ce9d8
0182706
 
 
 
 
 
 
 
 
 
 
 
18dcd5e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0182706
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18dcd5e
d0811b3
 
 
 
 
 
 
 
 
18dcd5e
0182706
 
 
 
 
 
 
d0811b3
 
 
 
 
 
 
 
 
 
18dcd5e
5d2d97e
 
 
 
 
0182706
18dcd5e
0182706
5d2d97e
0182706
 
 
 
38d659c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0182706
f012c97
71f1c5d
e0eaf95
78fc423
e0eaf95
78fc423
e0eaf95
f168ae4
 
 
 
 
 
 
9140b33
f168ae4
80eb86a
9140b33
 
80eb86a
9140b33
 
80eb86a
9140b33
8f11b16
 
 
 
 
 
 
9140b33
 
 
 
 
 
 
e0eaf95
80eb86a
 
 
e0eaf95
f168ae4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80eb86a
4b8c1ed
 
 
 
 
 
 
 
 
80eb86a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78fc423
4507a25
 
 
 
 
de52671
4507a25
4af6f39
4507a25
4af6f39
e2ce9d8
4af6f39
de52671
 
 
 
e2ce9d8
de52671
 
 
e2ce9d8
de52671
4af6f39
4507a25
4af6f39
4507a25
4af6f39
4507a25
4af6f39
3ccd7b9
 
 
 
 
 
 
 
4af6f39
4507a25
eb9b233
 
4507a25
eb9b233
5d2d97e
 
 
 
 
586f5a7
 
4507a25
 
 
5d2d97e
4507a25
 
eb9b233
 
 
 
5d2d97e
 
 
 
 
586f5a7
 
4507a25
eb9b233
4507a25
5d2d97e
4507a25
 
5d2d97e
 
 
 
 
586f5a7
 
4507a25
 
 
5d2d97e
4507a25
f012c97
 
50d1f17
4507a25
 
 
 
de52671
4507a25
 
 
 
 
e2ce9d8
4507a25
 
de52671
 
 
 
 
e2ce9d8
de52671
 
 
e2ce9d8
de52671
 
4507a25
 
 
 
 
 
 
 
 
3ccd7b9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eb9b233
 
4af6f39
 
eb9b233
 
4af6f39
eb9b233
 
 
 
18dcd5e
d0811b3
 
 
 
 
 
 
 
 
 
18dcd5e
eb9b233
 
50d1f17
eb9b233
50d1f17
5d2d97e
50d1f17
 
 
 
18dcd5e
 
d0811b3
 
 
 
 
 
 
 
 
 
18dcd5e
 
5d2d97e
 
 
 
 
586f5a7
 
50d1f17
18dcd5e
50d1f17
5d2d97e
50d1f17
 
e0eaf95
 
 
 
78fc423
e0eaf95
 
9fb4088
 
cc7302a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bdf5606
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3b8474b
bdf5606
 
 
f1ddb3a
bdf5606
 
 
 
f1ddb3a
 
 
 
 
 
 
 
 
 
 
 
 
 
bdf5606
f1ddb3a
bdf5606
 
 
f1ddb3a
bdf5606
 
f1ddb3a
bdf5606
 
 
 
3b8474b
bdf5606
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9fb4088
3ca98cc
 
bdf5606
 
 
 
 
 
 
 
 
 
 
cd2bdda
bdf5606
3ca98cc
cd2bdda
3ca98cc
cd2bdda
 
 
 
9fb4088
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eb9b233
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e3bee37
eb9b233
e3bee37
eb9b233
e3bee37
eb9b233
e3bee37
eb9b233
e3bee37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eb9b233
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e3bee37
 
 
 
 
 
 
 
eb9b233
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85fc589
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c58b55c
85fc589
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c58b55c
85fc589
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e2d3712
85fc589
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e2d3712
082d9d1
 
f4f6452
082d9d1
273e79c
e0eaf95
d35fab9
e0eaf95
9dfa063
f7c3e69
eb9b233
85fc589
 
 
 
 
 
 
 
 
 
 
 
eb9b233
9f9ae1a
 
 
 
 
 
 
 
eb9b233
 
 
9dfa063
e2d3712
 
d35fab9
5644d41
9dfa063
586f5a7
f4f6452
586f5a7
f4f6452
 
 
 
 
5644d41
f4f6452
e2d3712
0e5d991
e2d3712
 
5644d41
9dfa063
ac157c8
9b0268d
a2a2a54
5644d41
ac157c8
e2d3712
 
5644d41
78fc423
d0811b3
 
 
 
 
 
e2d3712
5644d41
 
 
 
 
 
 
a6a4576
5644d41
 
 
 
4507a25
3ccd7b9
 
5644d41
 
 
 
 
a6a4576
5644d41
a6a4576
5644d41
 
e2d3712
 
d35fab9
5644d41
e2d3712
d0811b3
18dcd5e
d0811b3
18dcd5e
 
 
 
d0811b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9dfa063
 
273e79c
d35fab9
5644d41
9dfa063
f012c97
37c890e
 
e2d3712
 
 
 
 
 
 
 
 
 
 
37c890e
0e5d991
9dfa063
 
 
0e5d991
5644d41
e2d3712
 
9dfa063
0e5d991
9dfa063
0e5d991
9dfa063
5644d41
 
d35fab9
9dfa063
 
e2d3712
9dfa063
 
 
de52671
e2d3712
9dfa063
e2d3712
 
b7e5bf3
 
 
 
 
 
e2d3712
eb9b233
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d2d97e
 
 
 
 
 
 
586f5a7
 
 
5d2d97e
 
 
 
eb9b233
 
5d2d97e
 
eb9b233
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e0eaf95
f4f6452
 
 
4507a25
 
 
3ccd7b9
 
4507a25
 
5d2d97e
 
586f5a7
 
4507a25
 
 
f4f6452
4507a25
f4f6452
 
 
 
5d2d97e
 
586f5a7
 
5d2d97e
 
 
 
 
4507a25
 
e2ce9d8
f4f6452
5d2d97e
 
 
f4f6452
a6a4576
 
5644d41
a6a4576
 
de2e4ad
 
 
 
 
 
 
2923087
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5644d41
eb9b233
 
 
 
 
 
 
e0eaf95
 
d0811b3
e2d3712
a6a4576
 
 
 
2923087
 
 
 
e0eaf95
f4f6452
 
 
de2e4ad
 
e2d3712
a6a4576
eb9b233
 
 
 
 
c7dcb04
85fc589
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9fb4088
 
f7c3e69
 
 
e8d64af
7f3ae70
 
f7c3e69
4b7d538
f7c3e69
 
 
7f3ae70
eb9b233
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e8d64af
 
 
16ea7cc
4507a25
3ccd7b9
 
e8d64af
 
eb9b233
 
7f3ae70
cf34c7e
 
5d2566d
 
 
 
 
 
 
 
 
4b7d538
6039a02
4b7d538
6039a02
 
 
 
 
 
 
 
 
 
 
 
5d2566d
bdf5606
 
 
 
5d2566d
bdf5606
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cc7302a
 
 
bdf5606
5d2566d
 
 
 
 
 
 
 
 
 
 
 
eb9b233
 
5d2566d
eb9b233
 
 
 
 
5d2566d
 
 
 
4b7d538
6039a02
 
4507a25
de2e4ad
4507a25
f26e8e5
de2e4ad
 
 
 
 
 
 
 
 
 
 
 
f26e8e5
 
 
 
 
 
 
 
4507a25
 
 
 
 
 
f26e8e5
4507a25
f26e8e5
4507a25
f26e8e5
 
 
 
 
 
4507a25
f26e8e5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cc7302a
f26e8e5
cc7302a
 
f26e8e5
 
 
 
 
 
 
 
cc7302a
 
 
4507a25
 
cf34c7e
 
2923087
 
de2e4ad
2923087
 
 
 
 
 
 
 
 
 
 
de2e4ad
2923087
 
de2e4ad
2923087
 
 
de2e4ad
 
 
 
3ccd7b9
 
 
 
 
 
 
 
 
32ee128
3ccd7b9
 
 
 
cf34c7e
28c971b
32ee128
cf34c7e
 
 
 
 
 
 
 
 
32ee128
 
cf34c7e
 
32ee128
28c971b
 
3ccd7b9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28c971b
3ccd7b9
32ee128
3ccd7b9
 
 
32ee128
3ccd7b9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28c971b
3ccd7b9
 
 
 
 
32ee128
3ccd7b9
 
 
 
 
 
cc7302a
 
 
3ccd7b9
28c971b
3ccd7b9
 
 
 
cf34c7e
 
 
 
 
4b7d538
e8d64af
7eadac9
e8d64af
cc7302a
 
 
 
7eadac9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eb9b233
 
7eadac9
eb9b233
 
 
 
7eadac9
 
eb9b233
7eadac9
 
 
a6a4576
bdf5606
 
 
 
7eadac9
bdf5606
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cc7302a
 
 
bdf5606
 
7eadac9
 
 
 
 
 
 
 
 
 
 
eb9b233
 
7eadac9
eb9b233
 
 
 
 
7eadac9
 
 
f7c3e69
 
9fb4088
f7c3e69
a6a4576
f7c3e69
9fb4088
f7c3e69
 
9fb4088
0a632f8
18dcd5e
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
import os
import re
from http import HTTPStatus
from typing import Dict, List, Optional, Tuple
import base64
import mimetypes
import PyPDF2
import docx
import cv2
import numpy as np
from PIL import Image
import pytesseract
import requests
from urllib.parse import urlparse, urljoin
from bs4 import BeautifulSoup
import html2text
import json
import time
import webbrowser
import urllib.parse
import copy
import html

import gradio as gr
from huggingface_hub import InferenceClient
from tavily import TavilyClient
from huggingface_hub import HfApi
import tempfile
from openai import OpenAI
from mistralai import Mistral

# Gradio supported languages for syntax highlighting
GRADIO_SUPPORTED_LANGUAGES = [
    "python", "c", "cpp", "markdown", "latex", "json", "html", "css", "javascript", "jinja2", "typescript", "yaml", "dockerfile", "shell", "r", "sql", "sql-msSQL", "sql-mySQL", "sql-mariaDB", "sql-sqlite", "sql-cassandra", "sql-plSQL", "sql-hive", "sql-pgSQL", "sql-gql", "sql-gpSQL", "sql-sparkSQL", "sql-esper", None
]

def get_gradio_language(language):
    # Map composite options to a supported syntax highlighting
    if language == "streamlit":
        return "python"
    if language == "gradio":
        return "python"
    return language if language in GRADIO_SUPPORTED_LANGUAGES else None

# Search/Replace Constants
SEARCH_START = "<<<<<<< SEARCH"
DIVIDER = "======="
REPLACE_END = ">>>>>>> REPLACE"

# Configuration
HTML_SYSTEM_PROMPT = """ONLY USE HTML, CSS AND JAVASCRIPT. If you want to use ICON make sure to import the library first. Try to create the best UI possible by using only HTML, CSS and JAVASCRIPT. MAKE IT RESPONSIVE USING MODERN CSS. Use as much as you can modern CSS for the styling, if you can't do something with modern CSS, then use custom CSS. Also, try to elaborate as much as you can, to create something unique. ALWAYS GIVE THE RESPONSE INTO A SINGLE HTML FILE

For website redesign tasks:
- Use the provided original HTML code as the starting point for redesign
- Preserve all original content, structure, and functionality
- Keep the same semantic HTML structure but enhance the styling
- Reuse all original images and their URLs from the HTML code
- Create a modern, responsive design with improved typography and spacing
- Use modern CSS frameworks and design patterns
- Ensure accessibility and mobile responsiveness
- Maintain the same navigation and user flow
- Enhance the visual design while keeping the original layout structure

If an image is provided, analyze it and use the visual information to better understand the user's requirements.

Always respond with code that can be executed or rendered directly.

Always output only the HTML code inside a ```html ... ``` code block, and do not include any explanations or extra text. Do NOT add the language name at the top of the code output."""

# Stricter prompt for GLM-4.5V to ensure a complete, runnable HTML document with no escaped characters
GLM45V_HTML_SYSTEM_PROMPT = """You are an expert front-end developer.

Output a COMPLETE, STANDALONE HTML document that renders directly in a browser.

Hard constraints:
- DO NOT use React, ReactDOM, JSX, Babel, Vue, Angular, Svelte, or any SPA framework.
- Use ONLY plain HTML, CSS, and vanilla JavaScript.
- Allowed external resources: Tailwind CSS CDN, Font Awesome CDN, Google Fonts.
- Do NOT escape characters (no \\n, \\t, or escaped quotes). Output raw HTML/JS/CSS.

Structural requirements:
- Include <!DOCTYPE html>, <html>, <head>, and <body> with proper nesting
- Include required <link> tags for any CSS you reference (e.g., Tailwind, Font Awesome, Google Fonts)
- Keep everything in ONE file; inline CSS/JS as needed

Return ONLY the code inside a single ```html ... ``` code block. No additional text before or after.
"""

TRANSFORMERS_JS_SYSTEM_PROMPT = """You are an expert web developer creating a transformers.js application. You will generate THREE separate files: index.html, index.js, and style.css.

IMPORTANT: You MUST output ALL THREE files in the following format:

```html
<!-- index.html content here -->
```

```javascript
// index.js content here
```

```css
/* style.css content here */
```

Requirements:
1. Create a modern, responsive web application using transformers.js
2. Use the transformers.js library for AI/ML functionality
3. Create a clean, professional UI with good user experience
4. Make the application fully responsive for mobile devices
5. Use modern CSS practices and JavaScript ES6+ features
6. Include proper error handling and loading states
7. Follow accessibility best practices

The index.html should contain the basic HTML structure and link to the CSS and JS files.
The index.js should contain all the JavaScript logic including transformers.js integration.
The style.css should contain all the styling for the application.

Always output only the three code blocks as shown above, and do not include any explanations or extra text."""

SVELTE_SYSTEM_PROMPT = """You are an expert Svelte developer creating a modern Svelte application. You will generate ONLY the custom files that need user-specific content for the user's requested application.

IMPORTANT: You MUST output files in the following format. Generate ONLY the files needed for the user's specific request:

```svelte
<!-- src/App.svelte content here -->
```

```css
/* src/app.css content here */
```

If you need additional components for the user's specific app, add them like:
```svelte
<!-- src/lib/ComponentName.svelte content here -->
```

Requirements:
1. Create a modern, responsive Svelte application based on the user's specific request
2. Use TypeScript for better type safety
3. Create a clean, professional UI with good user experience
4. Make the application fully responsive for mobile devices
5. Use modern CSS practices and Svelte best practices
6. Include proper error handling and loading states
7. Follow accessibility best practices
8. Use Svelte's reactive features effectively
9. Include proper component structure and organization
10. Generate ONLY components that are actually needed for the user's requested application

Files you should generate:
- src/App.svelte: Main application component (ALWAYS required)
- src/app.css: Global styles (ALWAYS required)
- src/lib/[ComponentName].svelte: Additional components (ONLY if needed for the user's specific app)

The other files (index.html, package.json, vite.config.ts, tsconfig files, svelte.config.js, src/main.ts, src/vite-env.d.ts) are provided by the Svelte template and don't need to be generated.

Always output only the two code blocks as shown above, and do not include any explanations or extra text."""

SVELTE_SYSTEM_PROMPT_WITH_SEARCH = """You are an expert Svelte developer creating a modern Svelte application. You have access to real-time web search. When needed, use web search to find the latest information, best practices, or specific Svelte technologies.

You will generate ONLY the custom files that need user-specific content.

IMPORTANT: You MUST output ONLY the custom files in the following format:

```svelte
<!-- src/App.svelte content here -->
```

```css
/* src/app.css content here -->
```

Requirements:
1. Create a modern, responsive Svelte application
2. Use TypeScript for better type safety
3. Create a clean, professional UI with good user experience
4. Make the application fully responsive for mobile devices
5. Use modern CSS practices and Svelte best practices
6. Include proper error handling and loading states
7. Follow accessibility best practices
8. Use Svelte's reactive features effectively
9. Include proper component structure and organization
10. Use web search to find the latest Svelte patterns, libraries, and best practices

The files you generate are:
- src/App.svelte: Main application component (your custom app logic)
- src/app.css: Global styles (your custom styling)

The other files (index.html, package.json, vite.config.ts, tsconfig files, svelte.config.js, src/main.ts, src/vite-env.d.ts) are provided by the Svelte template and don't need to be generated.

Always output only the two code blocks as shown above, and do not include any explanations or extra text."""

TRANSFORMERS_JS_SYSTEM_PROMPT_WITH_SEARCH = """You are an expert web developer creating a transformers.js application. You have access to real-time web search. When needed, use web search to find the latest information, best practices, or specific technologies for transformers.js.

You will generate THREE separate files: index.html, index.js, and style.css.

IMPORTANT: You MUST output ALL THREE files in the following format:

```html
<!-- index.html content here -->
```

```javascript
// index.js content here
```

```css
/* style.css content here */
```

Requirements:
1. Create a modern, responsive web application using transformers.js
2. Use the transformers.js library for AI/ML functionality
3. Use web search to find current best practices and latest transformers.js features
4. Create a clean, professional UI with good user experience
5. Make the application fully responsive for mobile devices
6. Use modern CSS practices and JavaScript ES6+ features
7. Include proper error handling and loading states
8. Follow accessibility best practices

The index.html should contain the basic HTML structure and link to the CSS and JS files.
The index.js should contain all the JavaScript logic including transformers.js integration.
The style.css should contain all the styling for the application.

Always output only the three code blocks as shown above, and do not include any explanations or extra text."""

GENERIC_SYSTEM_PROMPT = """You are an expert {language} developer. Write clean, idiomatic, and runnable {language} code for the user's request. If possible, include comments and best practices. Output ONLY the code inside a ``` code block, and do not include any explanations or extra text. If the user provides a file or other context, use it as a reference. If the code is for a script or app, make it as self-contained as possible. Do NOT add the language name at the top of the code output."""

# System prompt with search capability
HTML_SYSTEM_PROMPT_WITH_SEARCH = """You are an expert front-end developer. You have access to real-time web search.

Output a COMPLETE, STANDALONE HTML document that renders directly in a browser. Requirements:
- Include <!DOCTYPE html>, <html>, <head>, and <body> with proper nesting
- Include all required <link> and <script> tags for any libraries you use
- Do NOT escape characters (no \\n, \\t, or escaped quotes). Output raw HTML/JS/CSS.
- If you use React or Tailwind, include correct CDN tags
- Keep everything in ONE file; inline CSS/JS as needed

Use web search when needed to find the latest best practices or correct CDN links.

For website redesign tasks:
- Use the provided original HTML code as the starting point for redesign
- Preserve all original content, structure, and functionality
- Keep the same semantic HTML structure but enhance the styling
- Reuse all original images and their URLs from the HTML code
- Use web search to find current design trends and best practices for the specific type of website
- Create a modern, responsive design with improved typography and spacing
- Use modern CSS frameworks and design patterns
- Ensure accessibility and mobile responsiveness
- Maintain the same navigation and user flow
- Enhance the visual design while keeping the original layout structure

If an image is provided, analyze it and use the visual information to better understand the user's requirements.

Always respond with code that can be executed or rendered directly.

Always output only the HTML code inside a ```html ... ``` code block, and do not include any explanations or extra text. Do NOT add the language name at the top of the code output."""

GENERIC_SYSTEM_PROMPT_WITH_SEARCH = """You are an expert {language} developer. You have access to real-time web search. When needed, use web search to find the latest information, best practices, or specific technologies for {language}.

Write clean, idiomatic, and runnable {language} code for the user's request. If possible, include comments and best practices. Output ONLY the code inside a ``` code block, and do not include any explanations or extra text. If the user provides a file or other context, use it as a reference. If the code is for a script or app, make it as self-contained as possible. Do NOT add the language name at the top of the code output."""

# Follow-up system prompt for modifying existing HTML files
FollowUpSystemPrompt = f"""You are an expert web developer modifying an existing project.
The user wants to apply changes based on their request.
You MUST output ONLY the changes required using the following SEARCH/REPLACE block format. Do NOT output the entire file.
Explain the changes briefly *before* the blocks if necessary, but the code changes THEMSELVES MUST be within the blocks.

IMPORTANT: When the user reports an ERROR MESSAGE, analyze it carefully to determine which file needs fixing:
- ImportError/ModuleNotFoundError β†’ Fix requirements.txt by adding missing packages
- Syntax errors in Python code β†’ Fix app.py or the main Python file
- HTML/CSS/JavaScript errors β†’ Fix the respective HTML/CSS/JS files
- Configuration errors β†’ Fix config files, Docker files, etc.

For Python applications (Gradio/Streamlit), the project structure typically includes:
- app.py (main application file)
- requirements.txt (dependencies)
- Other supporting files as needed

Format Rules:
1. Start with {SEARCH_START}
2. Provide the exact lines from the current code that need to be replaced.
3. Use {DIVIDER} to separate the search block from the replacement.
4. Provide the new lines that should replace the original lines.
5. End with {REPLACE_END}
6. You can use multiple SEARCH/REPLACE blocks if changes are needed in different parts of the file.
7. To insert code, use an empty SEARCH block (only {SEARCH_START} and {DIVIDER} on their lines) if inserting at the very beginning, otherwise provide the line *before* the insertion point in the SEARCH block and include that line plus the new lines in the REPLACE block.
8. To delete code, provide the lines to delete in the SEARCH block and leave the REPLACE block empty (only {DIVIDER} and {REPLACE_END} on their lines).
9. IMPORTANT: The SEARCH block must *exactly* match the current code, including indentation and whitespace.
10. For multi-file projects, specify which file you're modifying by starting with the filename before the search/replace block.

Example Modifying Code:
```
Some explanation...
{SEARCH_START}
    <h1>Old Title</h1>
{DIVIDER}
    <h1>New Title</h1>
{REPLACE_END}
{SEARCH_START}
  </body>
{DIVIDER}
    <script>console.log("Added script");</script>
  </body>
{REPLACE_END}
```

Example Fixing Dependencies (requirements.txt):
```
Adding missing dependency to fix ImportError...
=== requirements.txt ===
{SEARCH_START}
gradio
streamlit
{DIVIDER}
gradio
streamlit
mistral-common
{REPLACE_END}
```

Example Deleting Code:
```
Removing the paragraph...
{SEARCH_START}
  <p>This paragraph will be deleted.</p>
{DIVIDER}
{REPLACE_END}
```"""

# Follow-up system prompt for modifying existing transformers.js applications
TransformersJSFollowUpSystemPrompt = f"""You are an expert web developer modifying an existing transformers.js application.
The user wants to apply changes based on their request.
You MUST output ONLY the changes required using the following SEARCH/REPLACE block format. Do NOT output the entire file.
Explain the changes briefly *before* the blocks if necessary, but the code changes THEMSELVES MUST be within the blocks.

IMPORTANT: When the user reports an ERROR MESSAGE, analyze it carefully to determine which file needs fixing:
- JavaScript errors/module loading issues β†’ Fix index.js
- HTML rendering/DOM issues β†’ Fix index.html
- Styling/visual issues β†’ Fix style.css
- CDN/library loading errors β†’ Fix script tags in index.html

The transformers.js application consists of three files: index.html, index.js, and style.css.
When making changes, specify which file you're modifying by starting your search/replace blocks with the file name.

Format Rules:
1. Start with {SEARCH_START}
2. Provide the exact lines from the current code that need to be replaced.
3. Use {DIVIDER} to separate the search block from the replacement.
4. Provide the new lines that should replace the original lines.
5. End with {REPLACE_END}
6. You can use multiple SEARCH/REPLACE blocks if changes are needed in different parts of the file.
7. To insert code, use an empty SEARCH block (only {SEARCH_START} and {DIVIDER} on their lines) if inserting at the very beginning, otherwise provide the line *before* the insertion point in the SEARCH block and include that line plus the new lines in the REPLACE block.
8. To delete code, provide the lines to delete in the SEARCH block and leave the REPLACE block empty (only {DIVIDER} and {REPLACE_END} on their lines).
9. IMPORTANT: The SEARCH block must *exactly* match the current code, including indentation and whitespace.

Example Modifying HTML:
```
Changing the title in index.html...
=== index.html ===
{SEARCH_START}
    <title>Old Title</title>
{DIVIDER}
    <title>New Title</title>
{REPLACE_END}
```

Example Modifying JavaScript:
```
Adding a new function to index.js...
=== index.js ===
{SEARCH_START}
// Existing code
{DIVIDER}
// Existing code

function newFunction() {{
    console.log("New function added");
}}
{REPLACE_END}
```

Example Modifying CSS:
```
Changing background color in style.css...
=== style.css ===
{SEARCH_START}
body {{
    background-color: white;
}}
{DIVIDER}
body {{
    background-color: #f0f0f0;
}}
{REPLACE_END}
```

Example Fixing Library Loading Error:
```
Fixing transformers.js CDN loading error...
=== index.html ===
{SEARCH_START}
<script type="module" src="https://cdn.jsdelivr.net/npm/@xenova/transformers@2.6.0"></script>
{DIVIDER}
<script type="module" src="https://cdn.jsdelivr.net/npm/@xenova/transformers@2.17.2"></script>
{REPLACE_END}
```"""

# Available models
AVAILABLE_MODELS = [
    {
        "name": "Moonshot Kimi-K2",
        "id": "moonshotai/Kimi-K2-Instruct",
        "description": "Moonshot AI Kimi-K2-Instruct model for code generation and general tasks"
    },
    {
        "name": "DeepSeek V3",
        "id": "deepseek-ai/DeepSeek-V3-0324",
        "description": "DeepSeek V3 model for code generation"
    },
    {
        "name": "DeepSeek R1", 
        "id": "deepseek-ai/DeepSeek-R1-0528",
        "description": "DeepSeek R1 model for code generation"
    },
    {
        "name": "ERNIE-4.5-VL",
        "id": "baidu/ERNIE-4.5-VL-424B-A47B-Base-PT",
        "description": "ERNIE-4.5-VL model for multimodal code generation with image support"
    },
    {
        "name": "MiniMax M1",
        "id": "MiniMaxAI/MiniMax-M1-80k",
        "description": "MiniMax M1 model for code generation and general tasks"
    },
    {
        "name": "Qwen3-235B-A22B",
        "id": "Qwen/Qwen3-235B-A22B",
        "description": "Qwen3-235B-A22B model for code generation and general tasks"
    },
    {
        "name": "SmolLM3-3B",
        "id": "HuggingFaceTB/SmolLM3-3B",
        "description": "SmolLM3-3B model for code generation and general tasks"
    },
    {
        "name": "GLM-4.5",
        "id": "zai-org/GLM-4.5",
        "description": "GLM-4.5 model with thinking capabilities for advanced code generation"
    },
    {
        "name": "GLM-4.5V",
        "id": "zai-org/GLM-4.5V",
        "description": "GLM-4.5V multimodal model with image understanding for code generation"
    },
    {
        "name": "GLM-4.1V-9B-Thinking",
        "id": "THUDM/GLM-4.1V-9B-Thinking",
        "description": "GLM-4.1V-9B-Thinking model for multimodal code generation with image support"
    },
    {
        "name": "Qwen3-235B-A22B-Instruct-2507",
        "id": "Qwen/Qwen3-235B-A22B-Instruct-2507",
        "description": "Qwen3-235B-A22B-Instruct-2507 model for code generation and general tasks"
    },
    {
        "name": "Qwen3-Coder-480B-A35B-Instruct",
        "id": "Qwen/Qwen3-Coder-480B-A35B-Instruct",
        "description": "Qwen3-Coder-480B-A35B-Instruct model for advanced code generation and programming tasks"
    },
    {
        "name": "Qwen3-32B",
        "id": "Qwen/Qwen3-32B",
        "description": "Qwen3-32B model for code generation and general tasks"
    },
    {
        "name": "Qwen3-4B-Instruct-2507",
        "id": "Qwen/Qwen3-4B-Instruct-2507",
        "description": "Qwen3-4B-Instruct-2507 model for code generation and general tasks"
    },
    {
        "name": "Qwen3-4B-Thinking-2507",
        "id": "Qwen/Qwen3-4B-Thinking-2507",
        "description": "Qwen3-4B-Thinking-2507 model with advanced reasoning capabilities for code generation and general tasks"
    },
    {
        "name": "Qwen3-235B-A22B-Thinking",
        "id": "Qwen/Qwen3-235B-A22B-Thinking-2507",
        "description": "Qwen3-235B-A22B-Thinking model with advanced reasoning capabilities"
    },
    {
        "name": "Qwen3-30B-A3B-Instruct-2507",
        "id": "qwen3-30b-a3b-instruct-2507",
        "description": "Qwen3-30B-A3B-Instruct model via Alibaba Cloud DashScope API"
    },
    {
        "name": "Qwen3-30B-A3B-Thinking-2507",
        "id": "qwen3-30b-a3b-thinking-2507",
        "description": "Qwen3-30B-A3B-Thinking model with advanced reasoning via Alibaba Cloud DashScope API"
    },
    {
        "name": "Qwen3-Coder-30B-A3B-Instruct",
        "id": "qwen3-coder-30b-a3b-instruct",
        "description": "Qwen3-Coder-30B-A3B-Instruct model for advanced code generation via Alibaba Cloud DashScope API"
    },
    {
        "name": "StepFun Step-3",
        "id": "step-3",
        "description": "StepFun Step-3 model - AI chat assistant by ι˜Άθ·ƒζ˜ŸθΎ° with multilingual capabilities"
    },
    {
        "name": "Codestral 2508",
        "id": "codestral-2508",
        "description": "Mistral Codestral model - specialized for code generation and programming tasks"
    },
    {
        "name": "Gemini 2.5 Flash",
        "id": "gemini-2.5-flash",
        "description": "Google Gemini 2.5 Flash via OpenAI-compatible API"
    },
    {
        "name": "Gemini 2.5 Pro",
        "id": "gemini-2.5-pro",
        "description": "Google Gemini 2.5 Pro via OpenAI-compatible API"
    },
    {
        "name": "GPT-OSS-120B",
        "id": "openai/gpt-oss-120b",
        "description": "OpenAI GPT-OSS-120B model for advanced code generation and general tasks"
    },
    {
        "name": "GPT-OSS-20B",
        "id": "openai/gpt-oss-20b",
        "description": "OpenAI GPT-OSS-20B model for code generation and general tasks"
    },
    {
        "name": "GPT-5",
        "id": "gpt-5",
        "description": "OpenAI GPT-5 model for advanced code generation and general tasks"
    },
    {
        "name": "Grok-4",
        "id": "grok-4",
        "description": "Grok-4 model via Poe (OpenAI-compatible) for advanced tasks"
    }
]

# Default model selection
DEFAULT_MODEL_NAME = "Grok-4"
DEFAULT_MODEL = None
for _m in AVAILABLE_MODELS:
    if _m.get("name") == DEFAULT_MODEL_NAME:
        DEFAULT_MODEL = _m
        break
if DEFAULT_MODEL is None and AVAILABLE_MODELS:
    DEFAULT_MODEL = AVAILABLE_MODELS[0]

DEMO_LIST = [
    {
        "title": "Todo App",
        "description": "Create a simple todo application with add, delete, and mark as complete functionality"
    },
    {
        "title": "Calculator",
        "description": "Build a basic calculator with addition, subtraction, multiplication, and division"
    },
    {
        "title": "Chat Interface",
        "description": "Build a chat interface with message history and user input"
    },
    {
        "title": "E-commerce Product Card",
        "description": "Create a product card component for an e-commerce website"
    },
    {
        "title": "Login Form",
        "description": "Build a responsive login form with validation"
    },
    {
        "title": "Dashboard Layout",
        "description": "Create a dashboard layout with sidebar navigation and main content area"
    },
    {
        "title": "Data Table",
        "description": "Build a data table with sorting and filtering capabilities"
    },
    {
        "title": "Image Gallery",
        "description": "Create an image gallery with lightbox functionality and responsive grid layout"
    },
    {
        "title": "UI from Image",
        "description": "Upload an image of a UI design and I'll generate the HTML/CSS code for it"
    },
    {
        "title": "Extract Text from Image",
        "description": "Upload an image containing text and I'll extract and process the text content"
    },
    {
        "title": "Website Redesign",
        "description": "Enter a website URL to extract its content and redesign it with a modern, responsive layout"
    },
    {
        "title": "Modify HTML",
        "description": "After generating HTML, ask me to modify it with specific changes using search/replace format"
    },
    {
        "title": "Search/Replace Example",
        "description": "Generate HTML first, then ask: 'Change the title to My New Title' or 'Add a blue background to the body'"
    },
    {
        "title": "Transformers.js App",
        "description": "Create a transformers.js application with AI/ML functionality using the transformers.js library"
    },
    {
        "title": "Svelte App",
        "description": "Create a modern Svelte application with TypeScript, Vite, and responsive design"
    }
]

# HF Inference Client
HF_TOKEN = os.getenv('HF_TOKEN')
if not HF_TOKEN:
    raise RuntimeError("HF_TOKEN environment variable is not set. Please set it to your Hugging Face API token.")

def get_inference_client(model_id, provider="auto"):
    """Return an InferenceClient with provider based on model_id and user selection."""
    if model_id == "qwen3-30b-a3b-instruct-2507":
        # Use DashScope OpenAI client
        return OpenAI(
            api_key=os.getenv("DASHSCOPE_API_KEY"),
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )
    elif model_id == "qwen3-30b-a3b-thinking-2507":
        # Use DashScope OpenAI client for Thinking model
        return OpenAI(
            api_key=os.getenv("DASHSCOPE_API_KEY"),
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )
    elif model_id == "qwen3-coder-30b-a3b-instruct":
        # Use DashScope OpenAI client for Coder model
        return OpenAI(
            api_key=os.getenv("DASHSCOPE_API_KEY"),
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )
    elif model_id == "gpt-5":
        # Use Poe (OpenAI-compatible) client for GPT-5 model
        return OpenAI(
            api_key=os.getenv("POE_API_KEY"),
            base_url="https://api.poe.com/v1"
        )
    elif model_id == "grok-4":
        # Use Poe (OpenAI-compatible) client for Grok-4 model
        return OpenAI(
            api_key=os.getenv("POE_API_KEY"),
            base_url="https://api.poe.com/v1"
        )
    elif model_id == "step-3":
        # Use StepFun API client for Step-3 model
        return OpenAI(
            api_key=os.getenv("STEP_API_KEY"),
            base_url="https://api.stepfun.com/v1"
        )
    elif model_id == "codestral-2508":
        # Use Mistral client for Codestral model
        return Mistral(api_key=os.getenv("MISTRAL_API_KEY"))
    elif model_id == "gemini-2.5-flash":
        # Use Google Gemini (OpenAI-compatible) client
        return OpenAI(
            api_key=os.getenv("GEMINI_API_KEY"),
            base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
        )
    elif model_id == "gemini-2.5-pro":
        # Use Google Gemini Pro (OpenAI-compatible) client
        return OpenAI(
            api_key=os.getenv("GEMINI_API_KEY"),
            base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
        )
    elif model_id == "openai/gpt-oss-120b":
        provider = "cerebras"
    elif model_id == "openai/gpt-oss-20b":
        provider = "groq"
    elif model_id == "moonshotai/Kimi-K2-Instruct":
        provider = "groq"
    elif model_id == "Qwen/Qwen3-235B-A22B":
        provider = "cerebras"
    elif model_id == "Qwen/Qwen3-235B-A22B-Instruct-2507":
        provider = "cerebras"
    elif model_id == "Qwen/Qwen3-32B":
        provider = "cerebras"
    elif model_id == "Qwen/Qwen3-235B-A22B-Thinking-2507":
        provider = "cerebras"
    elif model_id == "Qwen/Qwen3-Coder-480B-A35B-Instruct":
        provider = "cerebras"
    return InferenceClient(
        provider=provider,
        api_key=HF_TOKEN,
        bill_to="huggingface"
    )

# Type definitions
History = List[Tuple[str, str]]
Messages = List[Dict[str, str]]

# Tavily Search Client
TAVILY_API_KEY = os.getenv('TAVILY_API_KEY')
tavily_client = None
if TAVILY_API_KEY:
    try:
        tavily_client = TavilyClient(api_key=TAVILY_API_KEY)
    except Exception as e:
        print(f"Failed to initialize Tavily client: {e}")
        tavily_client = None

def history_to_messages(history: History, system: str) -> Messages:
    messages = [{'role': 'system', 'content': system}]
    for h in history:
        # Handle multimodal content in history
        user_content = h[0]
        if isinstance(user_content, list):
            # Extract text from multimodal content
            text_content = ""
            for item in user_content:
                if isinstance(item, dict) and item.get("type") == "text":
                    text_content += item.get("text", "")
            user_content = text_content if text_content else str(user_content)
        
        messages.append({'role': 'user', 'content': user_content})
        messages.append({'role': 'assistant', 'content': h[1]})
    return messages

def messages_to_history(messages: Messages) -> Tuple[str, History]:
    assert messages[0]['role'] == 'system'
    history = []
    for q, r in zip(messages[1::2], messages[2::2]):
        # Extract text content from multimodal messages for history
        user_content = q['content']
        if isinstance(user_content, list):
            text_content = ""
            for item in user_content:
                if isinstance(item, dict) and item.get("type") == "text":
                    text_content += item.get("text", "")
            user_content = text_content if text_content else str(user_content)
        
        history.append([user_content, r['content']])
    return history

def history_to_chatbot_messages(history: History) -> List[Dict[str, str]]:
    """Convert history tuples to chatbot message format"""
    messages = []
    for user_msg, assistant_msg in history:
        # Handle multimodal content
        if isinstance(user_msg, list):
            text_content = ""
            for item in user_msg:
                if isinstance(item, dict) and item.get("type") == "text":
                    text_content += item.get("text", "")
            user_msg = text_content if text_content else str(user_msg)
        
        messages.append({"role": "user", "content": user_msg})
        messages.append({"role": "assistant", "content": assistant_msg})
    return messages

def remove_code_block(text):
    # Try to match code blocks with language markers
    patterns = [
        r'```(?:html|HTML)\n([\s\S]+?)\n```',  # Match ```html or ```HTML
        r'```\n([\s\S]+?)\n```',               # Match code blocks without language markers
        r'```([\s\S]+?)```'                      # Match code blocks without line breaks
    ]
    for pattern in patterns:
        match = re.search(pattern, text, re.DOTALL)
        if match:
            extracted = match.group(1).strip()
            # Remove a leading language marker line (e.g., 'python') if present
            if extracted.split('\n', 1)[0].strip().lower() in ['python', 'html', 'css', 'javascript', 'json', 'c', 'cpp', 'markdown', 'latex', 'jinja2', 'typescript', 'yaml', 'dockerfile', 'shell', 'r', 'sql', 'sql-mssql', 'sql-mysql', 'sql-mariadb', 'sql-sqlite', 'sql-cassandra', 'sql-plSQL', 'sql-hive', 'sql-pgsql', 'sql-gql', 'sql-gpsql', 'sql-sparksql', 'sql-esper']:
                return extracted.split('\n', 1)[1] if '\n' in extracted else ''
            # If HTML markup starts later in the block (e.g., Poe injected preface), trim to first HTML root
            html_root_idx = None
            for tag in ['<!DOCTYPE html', '<html']:
                idx = extracted.find(tag)
                if idx != -1:
                    html_root_idx = idx if html_root_idx is None else min(html_root_idx, idx)
            if html_root_idx is not None and html_root_idx > 0:
                return extracted[html_root_idx:].strip()
            return extracted
    # If no code block is found, check if the entire text is HTML
    stripped = text.strip()
    if stripped.startswith('<!DOCTYPE html>') or stripped.startswith('<html') or stripped.startswith('<'):
        # If HTML root appears later (e.g., Poe preface), trim to first HTML root
        for tag in ['<!DOCTYPE html', '<html']:
            idx = stripped.find(tag)
            if idx > 0:
                return stripped[idx:].strip()
        return stripped
    # Special handling for python: remove python marker
    if text.strip().startswith('```python'):
        return text.strip()[9:-3].strip()
    # Remove a leading language marker line if present (fallback)
    lines = text.strip().split('\n', 1)
    if lines[0].strip().lower() in ['python', 'html', 'css', 'javascript', 'json', 'c', 'cpp', 'markdown', 'latex', 'jinja2', 'typescript', 'yaml', 'dockerfile', 'shell', 'r', 'sql', 'sql-mssql', 'sql-mysql', 'sql-mariadb', 'sql-sqlite', 'sql-cassandra', 'sql-plSQL', 'sql-hive', 'sql-pgsql', 'sql-gql', 'sql-gpsql', 'sql-sparksql', 'sql-esper']:
        return lines[1] if len(lines) > 1 else ''
    return text.strip()

## React CDN compatibility fixer removed per user preference

def strip_placeholder_thinking(text: str) -> str:
    """Remove placeholder 'Thinking...' status lines from streamed text."""
    if not text:
        return text
    # Matches lines like: "Thinking..." or "Thinking... (12s elapsed)"
    return re.sub(r"(?mi)^[\t ]*Thinking\.\.\.(?:\s*\(\d+s elapsed\))?[\t ]*$\n?", "", text)

def is_placeholder_thinking_only(text: str) -> bool:
    """Return True if text contains only 'Thinking...' placeholder lines (with optional elapsed)."""
    if not text:
        return False
    stripped = text.strip()
    if not stripped:
        return False
    return re.fullmatch(r"(?s)(?:\s*Thinking\.\.\.(?:\s*\(\d+s elapsed\))?\s*)+", stripped) is not None

def extract_last_thinking_line(text: str) -> str:
    """Extract the last 'Thinking...' line to display as status."""
    matches = list(re.finditer(r"Thinking\.\.\.(?:\s*\(\d+s elapsed\))?", text))
    return matches[-1].group(0) if matches else "Thinking..."

def parse_transformers_js_output(text):
    """Parse transformers.js output and extract the three files (index.html, index.js, style.css)"""
    files = {
        'index.html': '',
        'index.js': '',
        'style.css': ''
    }
    
    # Multiple patterns to match the three code blocks with different variations
    html_patterns = [
        r'```html\s*\n([\s\S]+?)\n```',
        r'```htm\s*\n([\s\S]+?)\n```',
        r'```\s*(?:index\.html|html)\s*\n([\s\S]+?)\n```'
    ]
    
    js_patterns = [
        r'```javascript\s*\n([\s\S]+?)\n```',
        r'```js\s*\n([\s\S]+?)\n```',
        r'```\s*(?:index\.js|javascript)\s*\n([\s\S]+?)\n```'
    ]
    
    css_patterns = [
        r'```css\s*\n([\s\S]+?)\n```',
        r'```\s*(?:style\.css|css)\s*\n([\s\S]+?)\n```'
    ]
    
    # Extract HTML content
    for pattern in html_patterns:
        html_match = re.search(pattern, text, re.IGNORECASE)
        if html_match:
            files['index.html'] = html_match.group(1).strip()
            break
    
    # Extract JavaScript content
    for pattern in js_patterns:
        js_match = re.search(pattern, text, re.IGNORECASE)
        if js_match:
            files['index.js'] = js_match.group(1).strip()
            break
    
    # Extract CSS content
    for pattern in css_patterns:
        css_match = re.search(pattern, text, re.IGNORECASE)
        if css_match:
            files['style.css'] = css_match.group(1).strip()
            break
    
    # Fallback: support === index.html === format if any file is missing
    if not (files['index.html'] and files['index.js'] and files['style.css']):
        # Use regex to extract sections
        html_fallback = re.search(r'===\s*index\.html\s*===\s*\n([\s\S]+?)(?=\n===|$)', text, re.IGNORECASE)
        js_fallback = re.search(r'===\s*index\.js\s*===\s*\n([\s\S]+?)(?=\n===|$)', text, re.IGNORECASE)
        css_fallback = re.search(r'===\s*style\.css\s*===\s*\n([\s\S]+?)(?=\n===|$)', text, re.IGNORECASE)
        
        if html_fallback:
            files['index.html'] = html_fallback.group(1).strip()
        if js_fallback:
            files['index.js'] = js_fallback.group(1).strip()
        if css_fallback:
            files['style.css'] = css_fallback.group(1).strip()
    
    # Additional fallback: extract from numbered sections or file headers
    if not (files['index.html'] and files['index.js'] and files['style.css']):
        # Try patterns like "1. index.html:" or "**index.html**"
        patterns = [
            (r'(?:^\d+\.\s*|^##\s*|^\*\*\s*)index\.html(?:\s*:|\*\*:?)\s*\n([\s\S]+?)(?=\n(?:\d+\.|##|\*\*|===)|$)', 'index.html'),
            (r'(?:^\d+\.\s*|^##\s*|^\*\*\s*)index\.js(?:\s*:|\*\*:?)\s*\n([\s\S]+?)(?=\n(?:\d+\.|##|\*\*|===)|$)', 'index.js'),
            (r'(?:^\d+\.\s*|^##\s*|^\*\*\s*)style\.css(?:\s*:|\*\*:?)\s*\n([\s\S]+?)(?=\n(?:\d+\.|##|\*\*|===)|$)', 'style.css')
        ]
        
        for pattern, file_key in patterns:
            if not files[file_key]:
                match = re.search(pattern, text, re.IGNORECASE | re.MULTILINE)
                if match:
                    # Clean up the content by removing any code block markers
                    content = match.group(1).strip()
                    content = re.sub(r'^```\w*\s*\n', '', content)
                    content = re.sub(r'\n```\s*$', '', content)
                    files[file_key] = content.strip()
    
    return files

def format_transformers_js_output(files):
    """Format the three files into a single display string"""
    output = []
    output.append("=== index.html ===")
    output.append(files['index.html'])
    output.append("\n=== index.js ===")
    output.append(files['index.js'])
    output.append("\n=== style.css ===")
    output.append(files['style.css'])
    return '\n'.join(output)

def build_transformers_inline_html(files: dict) -> str:
    """Merge transformers.js three-file output into a single self-contained HTML document.

    - Inlines style.css into a <style> tag
    - Inlines index.js into a <script type="module"> tag
    - Rewrites ESM imports for transformers.js to a stable CDN URL so it works in data: iframes
    """
    import re as _re

    html = files.get('index.html') or ''
    js = files.get('index.js') or ''
    css = files.get('style.css') or ''

    # Normalize JS imports to CDN (handle both @huggingface/transformers and legacy @xenova/transformers)
    cdn_url = "https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.7.1"
    js = _re.sub(r"from\s+['\"]@huggingface/transformers['\"]", f"from '{cdn_url}'", js)
    js = _re.sub(r"from\s+['\"]@xenova/transformers['\"]", f"from '{cdn_url}'", js)

    # Prepend a small prelude to reduce persistent caching during preview
    # Note: importing env alongside user's own imports is fine in ESM
    if js.strip():
        prelude = (
            f"import {{ env }} from '{cdn_url}';\n"
            "try { env.useBrowserCache = false; } catch (e) {}\n"
        )
        js = prelude + js

    # If index.html missing or doesn't look like a full document, create a minimal shell
    doc = html.strip()
    if not doc or ('<html' not in doc.lower()):
        doc = (
            "<!DOCTYPE html>\n"
            "<html>\n<head>\n<meta charset=\"UTF-8\">\n<meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n<title>Transformers.js App</title>\n</head>\n"
            "<body>\n<div id=\"app\"></div>\n</body>\n</html>"
        )

    # Remove local references to style.css and index.js to avoid duplicates when inlining
    doc = _re.sub(r"<link[^>]+href=\"[^\"]*style\.css\"[^>]*>\s*", "", doc, flags=_re.IGNORECASE)
    doc = _re.sub(r"<script[^>]+src=\"[^\"]*index\.js\"[^>]*>\s*</script>\s*", "", doc, flags=_re.IGNORECASE)

    # Inline CSS: insert before </head> or create a <head>
    style_tag = f"<style>\n{css}\n</style>" if css else ""
    if style_tag:
        if '</head>' in doc.lower():
            # Preserve original casing by finding closing head case-insensitively
            match = _re.search(r"</head>", doc, flags=_re.IGNORECASE)
            if match:
                idx = match.start()
                doc = doc[:idx] + style_tag + doc[idx:]
        else:
            # No head; insert at top of body
            match = _re.search(r"<body[^>]*>", doc, flags=_re.IGNORECASE)
            if match:
                idx = match.end()
                doc = doc[:idx] + "\n" + style_tag + doc[idx:]
            else:
                # Append at beginning
                doc = style_tag + doc

    # Inline JS: insert before </body>
    script_tag = f"<script type=\"module\">\n{js}\n</script>" if js else ""
    # Cleanup script to clear Cache Storage and IndexedDB on unload to free model weights
    cleanup_tag = (
        "<script>\n"
        "(function(){\n"
        "  function cleanup(){\n"
        "    try { if (window.caches && caches.keys) { caches.keys().then(keys => keys.forEach(k => caches.delete(k))); } } catch(e){}\n"
        "    try { if (window.indexedDB && indexedDB.databases) { indexedDB.databases().then(dbs => dbs.forEach(db => db && db.name && indexedDB.deleteDatabase(db.name))); } } catch(e){}\n"
        "  }\n"
        "  window.addEventListener('pagehide', cleanup, { once: true });\n"
        "  window.addEventListener('beforeunload', cleanup, { once: true });\n"
        "})();\n"
        "</script>"
    )
    if script_tag:
        match = _re.search(r"</body>", doc, flags=_re.IGNORECASE)
        if match:
            idx = match.start()
            doc = doc[:idx] + script_tag + cleanup_tag + doc[idx:]
        else:
            # Append at end
            doc = doc + script_tag + cleanup_tag

    return doc

def send_transformers_to_sandbox(files: dict) -> str:
    """Build a self-contained HTML document from transformers.js files and return an iframe preview."""
    merged_html = build_transformers_inline_html(files)
    return send_to_sandbox(merged_html)

def parse_svelte_output(text):
    """Parse Svelte output to extract individual files"""
    files = {
        'src/App.svelte': '',
        'src/app.css': ''
    }
    
    import re
    
    # First try to extract using code block patterns
    svelte_pattern = r'```svelte\s*\n([\s\S]+?)\n```'
    css_pattern = r'```css\s*\n([\s\S]+?)\n```'
    
    # Extract svelte block for App.svelte
    svelte_match = re.search(svelte_pattern, text, re.IGNORECASE)
    css_match = re.search(css_pattern, text, re.IGNORECASE)
    
    if svelte_match:
        files['src/App.svelte'] = svelte_match.group(1).strip()
    if css_match:
        files['src/app.css'] = css_match.group(1).strip()
    
    # Fallback: support === filename === format if any file is missing
    if not (files['src/App.svelte'] and files['src/app.css']):
        # Use regex to extract sections
        app_svelte_fallback = re.search(r'===\s*src/App\.svelte\s*===\n([\s\S]+?)(?=\n===|$)', text, re.IGNORECASE)
        app_css_fallback = re.search(r'===\s*src/app\.css\s*===\n([\s\S]+?)(?=\n===|$)', text, re.IGNORECASE)
        
        if app_svelte_fallback:
            files['src/App.svelte'] = app_svelte_fallback.group(1).strip()
        if app_css_fallback:
            files['src/app.css'] = app_css_fallback.group(1).strip()
    
    return files

def format_svelte_output(files):
    """Format Svelte files into a single display string"""
    output = []
    output.append("=== src/App.svelte ===")
    output.append(files['src/App.svelte'])
    output.append("\n=== src/app.css ===")
    output.append(files['src/app.css'])
    return '\n'.join(output)

def history_render(history: History):
    return gr.update(visible=True), history

def clear_history():
    return [], [], None, ""  # Empty lists for both tuple format and chatbot messages, None for file, empty string for website URL

def update_image_input_visibility(model):
    """Update image input visibility based on selected model"""
    is_ernie_vl = model.get("id") == "baidu/ERNIE-4.5-VL-424B-A47B-Base-PT"
    is_glm_vl = model.get("id") == "THUDM/GLM-4.1V-9B-Thinking"
    is_glm_45v = model.get("id") == "zai-org/GLM-4.5V"
    return gr.update(visible=is_ernie_vl or is_glm_vl or is_glm_45v)

def process_image_for_model(image):
    """Convert image to base64 for model input"""
    if image is None:
        return None
    
    # Convert numpy array to PIL Image if needed
    import io
    import base64
    import numpy as np
    from PIL import Image
    
    # Handle numpy array from Gradio
    if isinstance(image, np.ndarray):
        image = Image.fromarray(image)
    
    buffer = io.BytesIO()
    image.save(buffer, format='PNG')
    img_str = base64.b64encode(buffer.getvalue()).decode()
    return f"data:image/png;base64,{img_str}"

def generate_image_with_qwen(prompt: str, image_index: int = 0) -> str:
    """Generate image using Qwen image model via Hugging Face InferenceClient with optimized data URL"""
    try:
        # Check if HF_TOKEN is available
        if not os.getenv('HF_TOKEN'):
            return "Error: HF_TOKEN environment variable is not set. Please set it to your Hugging Face API token."
        
        # Create InferenceClient for Qwen image generation
        client = InferenceClient(
            provider="auto",
            api_key=os.getenv('HF_TOKEN'),
            bill_to="huggingface",
        )
        
        # Generate image using Qwen/Qwen-Image model
        image = client.text_to_image(
            prompt,
            model="Qwen/Qwen-Image",
        )
        
        # Resize image to reduce size while maintaining quality
        max_size = 512
        if image.width > max_size or image.height > max_size:
            image.thumbnail((max_size, max_size), Image.Resampling.LANCZOS)
        
        # Convert PIL Image to optimized base64 for HTML embedding
        import io
        import base64
        
        buffer = io.BytesIO()
        # Save as JPEG with compression for smaller file size
        image.convert('RGB').save(buffer, format='JPEG', quality=85, optimize=True)
        img_str = base64.b64encode(buffer.getvalue()).decode()
        
        # Return HTML img tag with optimized data URL
        return f'<img src="data:image/jpeg;base64,{img_str}" alt="{prompt}" style="max-width: 100%; height: auto; border-radius: 8px; margin: 10px 0;" loading="lazy" />'
        
    except Exception as e:
        print(f"Image generation error: {str(e)}")
        return f"Error generating image: {str(e)}"

def generate_image_to_image(input_image_data, prompt: str) -> str:
    """Generate an image using image-to-image with FLUX.1-Kontext-dev via Hugging Face InferenceClient.

    Returns an HTML <img> tag with optimized base64 JPEG data, similar to text-to-image output.
    """
    try:
        # Check token
        if not os.getenv('HF_TOKEN'):
            return "Error: HF_TOKEN environment variable is not set. Please set it to your Hugging Face API token."

        # Prepare client
        client = InferenceClient(
            provider="auto",
            api_key=os.getenv('HF_TOKEN'),
            bill_to="huggingface",
        )

        # Normalize input image to bytes
        import io
        from PIL import Image
        try:
            import numpy as np
        except Exception:
            np = None

        if hasattr(input_image_data, 'read'):
            # File-like object
            raw = input_image_data.read()
            pil_image = Image.open(io.BytesIO(raw))
        elif hasattr(input_image_data, 'mode') and hasattr(input_image_data, 'size'):
            # PIL Image
            pil_image = input_image_data
        elif np is not None and isinstance(input_image_data, np.ndarray):
            pil_image = Image.fromarray(input_image_data)
        elif isinstance(input_image_data, (bytes, bytearray)):
            pil_image = Image.open(io.BytesIO(input_image_data))
        else:
            # Fallback: try to convert via bytes
            pil_image = Image.open(io.BytesIO(bytes(input_image_data)))

        # Ensure RGB
        if pil_image.mode != 'RGB':
            pil_image = pil_image.convert('RGB')

        buf = io.BytesIO()
        pil_image.save(buf, format='PNG')
        input_bytes = buf.getvalue()

        # Call image-to-image
        image = client.image_to_image(
            input_bytes,
            prompt=prompt,
            model="black-forest-labs/FLUX.1-Kontext-dev",
        )

        # Resize/optimize
        max_size = 512
        if image.width > max_size or image.height > max_size:
            image.thumbnail((max_size, max_size), Image.Resampling.LANCZOS)

        out_buf = io.BytesIO()
        image.convert('RGB').save(out_buf, format='JPEG', quality=85, optimize=True)

        import base64
        img_str = base64.b64encode(out_buf.getvalue()).decode()
        return f"<img src=\"data:image/jpeg;base64,{img_str}\" alt=\"{prompt}\" style=\"max-width: 100%; height: auto; border-radius: 8px; margin: 10px 0;\" loading=\"lazy\" />"
    except Exception as e:
        print(f"Image-to-image generation error: {str(e)}")
        return f"Error generating image (image-to-image): {str(e)}"

def extract_image_prompts_from_text(text: str, num_images_needed: int = 1) -> list:
    """Extract image generation prompts from the full text based on number of images needed"""
    # Use the entire text as the base prompt for image generation
    # Clean up the text and create variations for the required number of images
    
    # Clean the text
    cleaned_text = text.strip()
    if not cleaned_text:
        return []
    
    # Create variations of the prompt for the required number of images
    prompts = []
    
    # Generate exactly the number of images needed
    for i in range(num_images_needed):
        if i == 0:
            # First image: Use the full prompt as-is
            prompts.append(cleaned_text)
        elif i == 1:
            # Second image: Add "visual representation" to make it more image-focused
            prompts.append(f"Visual representation of {cleaned_text}")
        elif i == 2:
            # Third image: Add "illustration" to create a different style
            prompts.append(f"Illustration of {cleaned_text}")
        else:
            # For additional images, use different variations
            variations = [
                f"Digital art of {cleaned_text}",
                f"Modern design of {cleaned_text}",
                f"Professional illustration of {cleaned_text}",
                f"Clean design of {cleaned_text}",
                f"Beautiful visualization of {cleaned_text}",
                f"Stylish representation of {cleaned_text}",
                f"Contemporary design of {cleaned_text}",
                f"Elegant illustration of {cleaned_text}"
            ]
            variation_index = (i - 3) % len(variations)
            prompts.append(variations[variation_index])
    
    return prompts

def create_image_replacement_blocks(html_content: str, user_prompt: str) -> str:
    """Create search/replace blocks to replace placeholder images with generated Qwen images"""
    if not user_prompt:
        return ""
    
    # Find existing image placeholders in the HTML first
    import re
    
    # Common patterns for placeholder images
    placeholder_patterns = [
        r'<img[^>]*src=["\'](?:placeholder|dummy|sample|example)[^"\']*["\'][^>]*>',
        r'<img[^>]*src=["\']https?://via\.placeholder\.com[^"\']*["\'][^>]*>',
        r'<img[^>]*src=["\']https?://picsum\.photos[^"\']*["\'][^>]*>',
        r'<img[^>]*src=["\']https?://dummyimage\.com[^"\']*["\'][^>]*>',
        r'<img[^>]*alt=["\'][^"\']*placeholder[^"\']*["\'][^>]*>',
        r'<img[^>]*class=["\'][^"\']*placeholder[^"\']*["\'][^>]*>',
        r'<img[^>]*id=["\'][^"\']*placeholder[^"\']*["\'][^>]*>',
        r'<img[^>]*src=["\']data:image[^"\']*["\'][^>]*>',  # Base64 images
        r'<img[^>]*src=["\']#["\'][^>]*>',  # Empty src
        r'<img[^>]*src=["\']about:blank["\'][^>]*>',  # About blank
    ]
    
    # Find all placeholder images
    placeholder_images = []
    for pattern in placeholder_patterns:
        matches = re.findall(pattern, html_content, re.IGNORECASE)
        placeholder_images.extend(matches)
    
    # If no placeholder images found, look for any img tags
    if not placeholder_images:
        img_pattern = r'<img[^>]*>'
        placeholder_images = re.findall(img_pattern, html_content)
    
    # Also look for div elements that might be image placeholders
    div_placeholder_patterns = [
        r'<div[^>]*class=["\'][^"\']*(?:image|img|photo|picture)[^"\']*["\'][^>]*>.*?</div>',
        r'<div[^>]*id=["\'][^"\']*(?:image|img|photo|picture)[^"\']*["\'][^>]*>.*?</div>',
    ]
    
    for pattern in div_placeholder_patterns:
        matches = re.findall(pattern, html_content, re.IGNORECASE | re.DOTALL)
        placeholder_images.extend(matches)
    
    # Count how many images we need to generate
    num_images_needed = len(placeholder_images)
    
    if num_images_needed == 0:
        return ""
    
    # Generate image prompts based on the number of images found
    image_prompts = extract_image_prompts_from_text(user_prompt, num_images_needed)
    
    # Generate images for each prompt
    generated_images = []
    for i, prompt in enumerate(image_prompts):
        image_html = generate_image_with_qwen(prompt, i)
        if not image_html.startswith("Error"):
            generated_images.append((i, image_html))
    
    if not generated_images:
        return ""
    
    # Create search/replace blocks
    replacement_blocks = []
    
    for i, (prompt_index, generated_image) in enumerate(generated_images):
        if i < len(placeholder_images):
            # Replace existing placeholder
            placeholder = placeholder_images[i]
            # Clean up the placeholder for better matching
            placeholder_clean = re.sub(r'\s+', ' ', placeholder.strip())
            
            # Try multiple variations of the placeholder for better matching
            placeholder_variations = [
                placeholder_clean,
                placeholder_clean.replace('"', "'"),
                placeholder_clean.replace("'", '"'),
                re.sub(r'\s+', ' ', placeholder_clean),
                placeholder_clean.replace('  ', ' '),
            ]
            
            # Create a replacement block for each variation
            for variation in placeholder_variations:
                replacement_blocks.append(f"""{SEARCH_START}
{variation}
{DIVIDER}
{generated_image}
{REPLACE_END}""")
        else:
            # Add new image if we have more generated images than placeholders
            # Find a good insertion point (after body tag or main content)
            if '<body' in html_content:
                body_end = html_content.find('>', html_content.find('<body')) + 1
                insertion_point = html_content[:body_end] + '\n    '
                replacement_blocks.append(f"""{SEARCH_START}
{insertion_point}
{DIVIDER}
{insertion_point}
    {generated_image}
{REPLACE_END}""")
    
    return '\n\n'.join(replacement_blocks)

def create_image_replacement_blocks_text_to_image_single(html_content: str, prompt: str) -> str:
    """Create search/replace blocks that generate and insert ONLY ONE text-to-image result.

    Replaces the first detected placeholder; if none found, inserts one image near the top of <body>.
    """
    if not prompt or not prompt.strip():
        return ""

    import re

    # Detect placeholders similarly to the multi-image version
    placeholder_patterns = [
        r'<img[^>]*src=["\'](?:placeholder|dummy|sample|example)[^"\']*["\'][^>]*>',
        r'<img[^>]*src=["\']https?://via\.placeholder\.com[^"\']*["\'][^>]*>',
        r'<img[^>]*src=["\']https?://picsum\.photos[^"\']*["\'][^>]*>',
        r'<img[^>]*src=["\']https?://dummyimage\.com[^"\']*["\'][^>]*>',
        r'<img[^>]*alt=["\'][^"\']*placeholder[^"\']*["\'][^>]*>',
        r'<img[^>]*class=["\'][^"\']*placeholder[^"\']*["\'][^>]*>',
        r'<img[^>]*id=["\'][^"\']*placeholder[^"\']*["\'][^>]*>',
        r'<img[^>]*src=["\']data:image[^"\']*["\'][^>]*>',
        r'<img[^>]*src=["\']#["\'][^>]*>',
        r'<img[^>]*src=["\']about:blank["\'][^>]*>',
    ]

    placeholder_images = []
    for pattern in placeholder_patterns:
        matches = re.findall(pattern, html_content, re.IGNORECASE)
        if matches:
            placeholder_images.extend(matches)

    # Fallback to any <img> if no placeholders
    if not placeholder_images:
        img_pattern = r'<img[^>]*>'
        placeholder_images = re.findall(img_pattern, html_content)

    # Generate a single image
    image_html = generate_image_with_qwen(prompt, 0)
    if image_html.startswith("Error"):
        return ""

    # Replace first placeholder if present
    if placeholder_images:
        placeholder = placeholder_images[0]
        placeholder_clean = re.sub(r'\s+', ' ', placeholder.strip())
        placeholder_variations = [
            placeholder_clean,
            placeholder_clean.replace('"', "'"),
            placeholder_clean.replace("'", '"'),
            re.sub(r'\s+', ' ', placeholder_clean),
            placeholder_clean.replace('  ', ' '),
        ]
        blocks = []
        for variation in placeholder_variations:
            blocks.append(f"""{SEARCH_START}
{variation}
{DIVIDER}
{image_html}
{REPLACE_END}""")
        return '\n\n'.join(blocks)

    # Otherwise insert after <body>
    if '<body' in html_content:
        body_end = html_content.find('>', html_content.find('<body')) + 1
        insertion_point = html_content[:body_end] + '\n    '
        return f"""{SEARCH_START}
{insertion_point}
{DIVIDER}
{insertion_point}
    {image_html}
{REPLACE_END}"""

    # If no <body>, just append
    return f"{SEARCH_START}\n\n{DIVIDER}\n{image_html}\n{REPLACE_END}"

def create_image_replacement_blocks_from_input_image(html_content: str, user_prompt: str, input_image_data, max_images: int = 1) -> str:
    """Create search/replace blocks using image-to-image generation with a provided input image.

    Mirrors placeholder detection from create_image_replacement_blocks but uses generate_image_to_image.
    """
    if not user_prompt:
        return ""

    import re

    placeholder_patterns = [
        r'<img[^>]*src=["\'](?:placeholder|dummy|sample|example)[^"\']*["\'][^>]*>',
        r'<img[^>]*src=["\']https?://via\.placeholder\.com[^"\']*["\'][^>]*>',
        r'<img[^>]*src=["\']https?://picsum\.photos[^"\']*["\'][^>]*>',
        r'<img[^>]*src=["\']https?://dummyimage\.com[^"\']*["\'][^>]*>',
        r'<img[^>]*alt=["\'][^"\']*placeholder[^"\']*["\'][^>]*>',
        r'<img[^>]*class=["\'][^"\']*placeholder[^"\']*["\'][^>]*>',
        r'<img[^>]*id=["\'][^"\']*placeholder[^"\']*["\'][^>]*>',
        r'<img[^>]*src=["\']data:image[^"\']*["\'][^>]*>',
        r'<img[^>]*src=["\']#["\'][^>]*>',
        r'<img[^>]*src=["\']about:blank["\'][^>]*>',
    ]

    placeholder_images = []
    for pattern in placeholder_patterns:
        matches = re.findall(pattern, html_content, re.IGNORECASE)
        placeholder_images.extend(matches)

    if not placeholder_images:
        img_pattern = r'<img[^>]*>'
        placeholder_images = re.findall(img_pattern, html_content)

    div_placeholder_patterns = [
        r'<div[^>]*class=["\'][^"\']*(?:image|img|photo|picture)[^"\']*["\'][^>]*>.*?</div>',
        r'<div[^>]*id=["\'][^"\']*(?:image|img|photo|picture)[^"\']*["\'][^>]*>.*?</div>',
    ]
    for pattern in div_placeholder_patterns:
        matches = re.findall(pattern, html_content, re.IGNORECASE | re.DOTALL)
        placeholder_images.extend(matches)

    num_images_needed = len(placeholder_images)
    num_to_replace = min(num_images_needed, max(0, int(max_images)))
    if num_images_needed == 0:
        # No placeholders; generate one image to append (only if at least one upload is present)
        if num_to_replace <= 0:
            return ""
        prompts = extract_image_prompts_from_text(user_prompt, 1)
        if not prompts:
            return ""
        image_html = generate_image_to_image(input_image_data, prompts[0])
        if image_html.startswith("Error"):
            return ""
        return f"{SEARCH_START}\n\n{DIVIDER}\n<div class=\"generated-images\">{image_html}</div>\n{REPLACE_END}"

    if num_to_replace <= 0:
        return ""
    image_prompts = extract_image_prompts_from_text(user_prompt, num_to_replace)

    generated_images = []
    for i, prompt in enumerate(image_prompts):
        image_html = generate_image_to_image(input_image_data, prompt)
        if not image_html.startswith("Error"):
            generated_images.append((i, image_html))

    if not generated_images:
        return ""

    replacement_blocks = []
    for i, (prompt_index, generated_image) in enumerate(generated_images):
        if i < num_to_replace and i < len(placeholder_images):
            placeholder = placeholder_images[i]
            placeholder_clean = re.sub(r'\s+', ' ', placeholder.strip())
            placeholder_variations = [
                placeholder_clean,
                placeholder_clean.replace('"', "'"),
                placeholder_clean.replace("'", '"'),
                re.sub(r'\s+', ' ', placeholder_clean),
                placeholder_clean.replace('  ', ' '),
            ]
            for variation in placeholder_variations:
                replacement_blocks.append(f"""{SEARCH_START}
{variation}
{DIVIDER}
{generated_image}
{REPLACE_END}""")
        # Do not insert additional images beyond the uploaded count

    return '\n\n'.join(replacement_blocks)

def apply_generated_images_to_html(html_content: str, user_prompt: str, enable_text_to_image: bool, enable_image_to_image: bool, input_image_data, image_to_image_prompt: str | None = None, text_to_image_prompt: str | None = None) -> str:
    """Apply text-to-image and/or image-to-image replacements to HTML content.

    If both toggles are enabled, text-to-image replacements run first, then image-to-image.
    """
    result = html_content
    try:
        # If an input image is provided and image-to-image is enabled, we only replace one image
        # and skip text-to-image to satisfy the requirement to replace exactly the number of uploaded images.
        if enable_image_to_image and input_image_data is not None and (result.strip().startswith('<!DOCTYPE html>') or result.strip().startswith('<html')):
            # Prefer the dedicated image-to-image prompt if provided
            i2i_prompt = (image_to_image_prompt or user_prompt or "").strip()
            blocks2 = create_image_replacement_blocks_from_input_image(result, i2i_prompt, input_image_data, max_images=1)
            if blocks2:
                result = apply_search_replace_changes(result, blocks2)
            return result

        if enable_text_to_image and (result.strip().startswith('<!DOCTYPE html>') or result.strip().startswith('<html')):
            t2i_prompt = (text_to_image_prompt or user_prompt or "").strip()
            # Single-image flow for text-to-image
            blocks = create_image_replacement_blocks_text_to_image_single(result, t2i_prompt)
            if blocks:
                result = apply_search_replace_changes(result, blocks)
    except Exception:
        return html_content
    return result

def create_multimodal_message(text, image=None):
    """Create a chat message. For broad provider compatibility, always return content as a string.

    Some providers (e.g., Hugging Face router endpoints like Cerebras) expect `content` to be a string,
    not a list of typed parts. To avoid 422 validation errors, we inline a brief note when an image is provided.
    """
    if image is None:
        return {"role": "user", "content": text}
    # Keep providers happy: avoid structured multimodal payloads; add a short note instead
    # If needed, this can be enhanced per-model with proper multimodal schemas.
    return {"role": "user", "content": f"{text}\n\n[An image was provided as reference.]"}

def apply_search_replace_changes(original_content: str, changes_text: str) -> str:
    """Apply search/replace changes to content (HTML, Python, etc.)"""
    if not changes_text.strip():
        return original_content
    
    # Split the changes text into individual search/replace blocks
    blocks = []
    current_block = ""
    lines = changes_text.split('\n')
    
    for line in lines:
        if line.strip() == SEARCH_START:
            if current_block.strip():
                blocks.append(current_block.strip())
            current_block = line + '\n'
        elif line.strip() == REPLACE_END:
            current_block += line + '\n'
            blocks.append(current_block.strip())
            current_block = ""
        else:
            current_block += line + '\n'
    
    if current_block.strip():
        blocks.append(current_block.strip())
    
    modified_content = original_content
    
    for block in blocks:
        if not block.strip():
            continue
            
        # Parse the search/replace block
        lines = block.split('\n')
        search_lines = []
        replace_lines = []
        in_search = False
        in_replace = False
        
        for line in lines:
            if line.strip() == SEARCH_START:
                in_search = True
                in_replace = False
            elif line.strip() == DIVIDER:
                in_search = False
                in_replace = True
            elif line.strip() == REPLACE_END:
                in_replace = False
            elif in_search:
                search_lines.append(line)
            elif in_replace:
                replace_lines.append(line)
        
        # Apply the search/replace
        if search_lines:
            search_text = '\n'.join(search_lines).strip()
            replace_text = '\n'.join(replace_lines).strip()
            
            if search_text in modified_content:
                modified_content = modified_content.replace(search_text, replace_text)
            else:
                print(f"Warning: Search text not found in content: {search_text[:100]}...")
    
    return modified_content

def apply_transformers_js_search_replace_changes(original_formatted_content: str, changes_text: str) -> str:
    """Apply search/replace changes to transformers.js formatted content (three files)"""
    if not changes_text.strip():
        return original_formatted_content
    
    # Parse the original formatted content to get the three files
    files = parse_transformers_js_output(original_formatted_content)
    
    # Split the changes text into individual search/replace blocks
    blocks = []
    current_block = ""
    lines = changes_text.split('\n')
    
    for line in lines:
        if line.strip() == SEARCH_START:
            if current_block.strip():
                blocks.append(current_block.strip())
            current_block = line + '\n'
        elif line.strip() == REPLACE_END:
            current_block += line + '\n'
            blocks.append(current_block.strip())
            current_block = ""
        else:
            current_block += line + '\n'
    
    if current_block.strip():
        blocks.append(current_block.strip())
    
    # Process each block and apply changes to the appropriate file
    for block in blocks:
        if not block.strip():
            continue
            
        # Parse the search/replace block
        lines = block.split('\n')
        search_lines = []
        replace_lines = []
        in_search = False
        in_replace = False
        target_file = None
        
        for line in lines:
            if line.strip() == SEARCH_START:
                in_search = True
                in_replace = False
            elif line.strip() == DIVIDER:
                in_search = False
                in_replace = True
            elif line.strip() == REPLACE_END:
                in_replace = False
            elif in_search:
                search_lines.append(line)
            elif in_replace:
                replace_lines.append(line)
        
        # Determine which file this change targets based on the search content
        if search_lines:
            search_text = '\n'.join(search_lines).strip()
            replace_text = '\n'.join(replace_lines).strip()
            
            # Check which file contains the search text
            if search_text in files['index.html']:
                target_file = 'index.html'
            elif search_text in files['index.js']:
                target_file = 'index.js'
            elif search_text in files['style.css']:
                target_file = 'style.css'
            
            # Apply the change to the target file
            if target_file and search_text in files[target_file]:
                files[target_file] = files[target_file].replace(search_text, replace_text)
            else:
                print(f"Warning: Search text not found in any transformers.js file: {search_text[:100]}...")
    
    # Reformat the modified files
    return format_transformers_js_output(files)

# Updated for faster Tavily search and closer prompt usage
# Uses 'advanced' search_depth and auto_parameters=True for speed and relevance

def perform_web_search(query: str, max_results: int = 5, include_domains=None, exclude_domains=None) -> str:
    """Perform web search using Tavily with default parameters"""
    if not tavily_client:
        return "Web search is not available. Please set the TAVILY_API_KEY environment variable."
    
    try:
        # Use Tavily defaults with advanced search depth for better results
        search_params = {
            "search_depth": "advanced",
            "max_results": min(max(1, max_results), 20)
        }
        if include_domains is not None:
            search_params["include_domains"] = include_domains
        if exclude_domains is not None:
            search_params["exclude_domains"] = exclude_domains

        response = tavily_client.search(query, **search_params)
        
        search_results = []
        for result in response.get('results', []):
            title = result.get('title', 'No title')
            url = result.get('url', 'No URL')
            content = result.get('content', 'No content')
            search_results.append(f"Title: {title}\nURL: {url}\nContent: {content}\n")
        
        if search_results:
            return "Web Search Results:\n\n" + "\n---\n".join(search_results)
        else:
            return "No search results found."
            
    except Exception as e:
        return f"Search error: {str(e)}"

def enhance_query_with_search(query: str, enable_search: bool) -> str:
    """Enhance the query with web search results if search is enabled"""
    if not enable_search or not tavily_client:
        return query
    
    # Perform search to get relevant information
    search_results = perform_web_search(query)
    
    # Combine original query with search results
    enhanced_query = f"""Original Query: {query}

{search_results}

Please use the search results above to help create the requested application with the most up-to-date information and best practices."""
    
    return enhanced_query

def send_to_sandbox(code):
    """Render HTML in a sandboxed iframe. Assumes full HTML is provided by prompts."""
    html_doc = (code or "").strip()
    encoded_html = base64.b64encode(html_doc.encode('utf-8')).decode('utf-8')
    data_uri = f"data:text/html;charset=utf-8;base64,{encoded_html}"
    iframe = f'<iframe src="{data_uri}" width="100%" height="920px" sandbox="allow-scripts allow-same-origin allow-forms allow-popups allow-modals allow-presentation" allow="display-capture"></iframe>'
    return iframe

def is_streamlit_code(code: str) -> bool:
    """Heuristic check to determine if Python code is a Streamlit app."""
    if not code:
        return False
    lowered = code.lower()
    return ("import streamlit" in lowered) or ("from streamlit" in lowered) or ("st." in code and "streamlit" in lowered)

def send_streamlit_to_stlite(code: str) -> str:
    """Render Streamlit code using stlite inside a sandboxed iframe for preview."""
    # Build an HTML document that loads stlite and mounts the Streamlit app defined inline
    html_doc = (
        """<!doctype html>
<html>
  <head>
    <meta charset=\"UTF-8\" />
    <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\" />
    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, shrink-to-fit=no\" />
    <title>Streamlit Preview</title>
    <link rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/@stlite/browser@0.86.0/build/stlite.css\" />
    <style>html,body{margin:0;padding:0;height:100%;} streamlit-app{display:block;height:100%;}</style>
    <script type=\"module\" src=\"https://cdn.jsdelivr.net/npm/@stlite/browser@0.86.0/build/stlite.js\"></script>
  </head>
  <body>
    <streamlit-app>
"""
        + (code or "")
        + """
    </streamlit-app>
  </body>
</html>
"""
    )
    encoded_html = base64.b64encode(html_doc.encode('utf-8')).decode('utf-8')
    data_uri = f"data:text/html;charset=utf-8;base64,{encoded_html}"
    iframe = f'<iframe src="{data_uri}" width="100%" height="920px" sandbox="allow-scripts allow-same-origin allow-forms allow-popups allow-modals allow-presentation" allow="display-capture"></iframe>'
    return iframe

def is_gradio_code(code: str) -> bool:
    """Heuristic check to determine if Python code is a Gradio app."""
    if not code:
        return False
    lowered = code.lower()
    return (
        "import gradio" in lowered
        or "from gradio" in lowered
        or "gr.Interface(" in code
        or "gr.Blocks(" in code
    )

def send_gradio_to_lite(code: str) -> str:
    """Render Gradio code using gradio-lite inside a sandboxed iframe for preview."""
    html_doc = (
        """<!doctype html>
<html>
  <head>
    <meta charset=\"UTF-8\" />
    <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\" />
    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, shrink-to-fit=no\" />
    <title>Gradio Preview</title>
    <script type=\"module\" crossorigin src=\"https://cdn.jsdelivr.net/npm/@gradio/lite/dist/lite.js\"></script>
    <link rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/@gradio/lite/dist/lite.css\" />
    <style>html,body{margin:0;padding:0;height:100%;} gradio-lite{display:block;height:100%;}</style>
  </head>
  <body>
    <gradio-lite>
"""
        + (code or "")
        + """
    </gradio-lite>
  </body>
</html>
"""
    )
    encoded_html = base64.b64encode(html_doc.encode('utf-8')).decode('utf-8')
    data_uri = f"data:text/html;charset=utf-8;base64,{encoded_html}"
    iframe = f'<iframe src="{data_uri}" width="100%" height="920px" sandbox="allow-scripts allow-same-origin allow-forms allow-popups allow-modals allow-presentation" allow="display-capture"></iframe>'
    return iframe

def demo_card_click(e: gr.EventData):
    try:
        # Get the index from the event data
        if hasattr(e, '_data') and e._data:
            # Try different ways to get the index
            if 'index' in e._data:
                index = e._data['index']
            elif 'component' in e._data and 'index' in e._data['component']:
                index = e._data['component']['index']
            elif 'target' in e._data and 'index' in e._data['target']:
                index = e._data['target']['index']
            else:
                # If we can't get the index, try to extract it from the card data
                index = 0
        else:
            index = 0
        
        # Ensure index is within bounds
        if index >= len(DEMO_LIST):
            index = 0
            
        return DEMO_LIST[index]['description']
    except (KeyError, IndexError, AttributeError) as e:
        # Return the first demo description as fallback
        return DEMO_LIST[0]['description']

def extract_text_from_image(image_path):
    """Extract text from image using OCR"""
    try:
        # Check if tesseract is available
        try:
            pytesseract.get_tesseract_version()
        except Exception:
            return "Error: Tesseract OCR is not installed. Please install Tesseract to extract text from images. See install_tesseract.md for instructions."
        
        # Read image using OpenCV
        image = cv2.imread(image_path)
        if image is None:
            return "Error: Could not read image file"
        
        # Convert to RGB (OpenCV uses BGR)
        image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        
        # Preprocess image for better OCR results
        # Convert to grayscale
        gray = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY)
        
        # Apply thresholding to get binary image
        _, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        
        # Extract text using pytesseract
        text = pytesseract.image_to_string(binary, config='--psm 6')
        
        return text.strip() if text.strip() else "No text found in image"
        
    except Exception as e:
        return f"Error extracting text from image: {e}"

def extract_text_from_file(file_path):
    if not file_path:
        return ""
    mime, _ = mimetypes.guess_type(file_path)
    ext = os.path.splitext(file_path)[1].lower()
    try:
        if ext == ".pdf":
            with open(file_path, "rb") as f:
                reader = PyPDF2.PdfReader(f)
                return "\n".join(page.extract_text() or "" for page in reader.pages)
        elif ext in [".txt", ".md"]:
            with open(file_path, "r", encoding="utf-8") as f:
                return f.read()
        elif ext == ".csv":
            with open(file_path, "r", encoding="utf-8") as f:
                return f.read()
        elif ext == ".docx":
            doc = docx.Document(file_path)
            return "\n".join([para.text for para in doc.paragraphs])
        elif ext.lower() in [".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".tif", ".gif", ".webp"]:
            return extract_text_from_image(file_path)
        else:
            return ""
    except Exception as e:
        return f"Error extracting text: {e}"

def extract_website_content(url: str) -> str:
    """Extract HTML code and content from a website URL"""
    try:
        # Validate URL
        parsed_url = urlparse(url)
        if not parsed_url.scheme:
            url = "https://" + url
            parsed_url = urlparse(url)
        
        if not parsed_url.netloc:
            return "Error: Invalid URL provided"
        
        # Set comprehensive headers to mimic a real browser request
        headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.9',
            'Accept-Encoding': 'gzip, deflate, br',
            'DNT': '1',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Sec-Fetch-User': '?1',
            'Cache-Control': 'max-age=0'
        }
        
        # Create a session to maintain cookies and handle redirects
        session = requests.Session()
        session.headers.update(headers)
        
        # Make the request with retry logic
        max_retries = 3
        for attempt in range(max_retries):
            try:
                response = session.get(url, timeout=15, allow_redirects=True)
                response.raise_for_status()
                break
            except requests.exceptions.HTTPError as e:
                if e.response.status_code == 403 and attempt < max_retries - 1:
                    # Try with different User-Agent on 403
                    session.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
                    continue
                else:
                    raise
        
        # Get the raw HTML content with proper encoding
        try:
            # Try to get the content with automatic encoding detection
            response.encoding = response.apparent_encoding
            raw_html = response.text
        except:
            # Fallback to UTF-8 if encoding detection fails
            raw_html = response.content.decode('utf-8', errors='ignore')
        
        # Debug: Check if we got valid HTML
        if not raw_html.strip().startswith('<!DOCTYPE') and not raw_html.strip().startswith('<html'):
            print(f"Warning: Response doesn't look like HTML. First 200 chars: {raw_html[:200]}")
            print(f"Response headers: {dict(response.headers)}")
            print(f"Response encoding: {response.encoding}")
            print(f"Apparent encoding: {response.apparent_encoding}")
            
            # Try alternative approaches
            try:
                raw_html = response.content.decode('latin-1', errors='ignore')
                print("Tried latin-1 decoding")
            except:
                try:
                    raw_html = response.content.decode('utf-8', errors='ignore')
                    print("Tried UTF-8 decoding")
                except:
                    raw_html = response.content.decode('cp1252', errors='ignore')
                    print("Tried cp1252 decoding")
        
        # Parse HTML content for analysis
        soup = BeautifulSoup(raw_html, 'html.parser')
        
        # Check if this is a JavaScript-heavy site
        script_tags = soup.find_all('script')
        if len(script_tags) > 10:
            print(f"Warning: This site has {len(script_tags)} script tags - it may be a JavaScript-heavy site")
            print("The content might be loaded dynamically and not available in the initial HTML")
        
        # Extract title
        title = soup.find('title')
        title_text = title.get_text().strip() if title else "No title found"
        
        # Extract meta description
        meta_desc = soup.find('meta', attrs={'name': 'description'})
        description = meta_desc.get('content', '') if meta_desc else ""
        
        # Extract main content areas for analysis
        content_sections = []
        main_selectors = [
            'main', 'article', '.content', '.main-content', '.post-content',
            '#content', '#main', '.entry-content', '.post-body'
        ]
        
        for selector in main_selectors:
            elements = soup.select(selector)
            for element in elements:
                text = element.get_text().strip()
                if len(text) > 100:  # Only include substantial content
                    content_sections.append(text)
        
        # Extract navigation links for analysis
        nav_links = []
        nav_elements = soup.find_all(['nav', 'header'])
        for nav in nav_elements:
            links = nav.find_all('a')
            for link in links:
                link_text = link.get_text().strip()
                link_href = link.get('href', '')
                if link_text and link_href:
                    nav_links.append(f"{link_text}: {link_href}")
        
        # Extract and fix image URLs in the HTML
        img_elements = soup.find_all('img')
        for img in img_elements:
            src = img.get('src', '')
            if src:
                # Handle different URL formats
                if src.startswith('//'):
                    # Protocol-relative URL
                    absolute_src = 'https:' + src
                    img['src'] = absolute_src
                elif src.startswith('/'):
                    # Root-relative URL
                    absolute_src = urljoin(url, src)
                    img['src'] = absolute_src
                elif not src.startswith(('http://', 'https://')):
                    # Relative URL
                    absolute_src = urljoin(url, src)
                    img['src'] = absolute_src
                # If it's already absolute, keep it as is
                
                # Also check for data-src (lazy loading) and other common attributes
                data_src = img.get('data-src', '')
                if data_src and not src:
                    # Use data-src if src is empty
                    if data_src.startswith('//'):
                        absolute_data_src = 'https:' + data_src
                        img['src'] = absolute_data_src
                    elif data_src.startswith('/'):
                        absolute_data_src = urljoin(url, data_src)
                        img['src'] = absolute_data_src
                    elif not data_src.startswith(('http://', 'https://')):
                        absolute_data_src = urljoin(url, data_src)
                        img['src'] = absolute_data_src
                    else:
                        img['src'] = data_src
        
        # Also fix background image URLs in style attributes
        elements_with_style = soup.find_all(attrs={'style': True})
        for element in elements_with_style:
            style_attr = element.get('style', '')
            # Find and replace relative URLs in background-image
            import re
            bg_pattern = r'background-image:\s*url\(["\']?([^"\']+)["\']?\)'
            matches = re.findall(bg_pattern, style_attr, re.IGNORECASE)
            for match in matches:
                if match:
                    if match.startswith('//'):
                        absolute_bg = 'https:' + match
                        style_attr = style_attr.replace(match, absolute_bg)
                    elif match.startswith('/'):
                        absolute_bg = urljoin(url, match)
                        style_attr = style_attr.replace(match, absolute_bg)
                    elif not match.startswith(('http://', 'https://')):
                        absolute_bg = urljoin(url, match)
                        style_attr = style_attr.replace(match, absolute_bg)
            element['style'] = style_attr
        
        # Fix background images in <style> tags
        style_elements = soup.find_all('style')
        for style in style_elements:
            if style.string:
                style_content = style.string
                # Find and replace relative URLs in background-image
                bg_pattern = r'background-image:\s*url\(["\']?([^"\']+)["\']?\)'
                matches = re.findall(bg_pattern, style_content, re.IGNORECASE)
                for match in matches:
                    if match:
                        if match.startswith('//'):
                            absolute_bg = 'https:' + match
                            style_content = style_content.replace(match, absolute_bg)
                        elif match.startswith('/'):
                            absolute_bg = urljoin(url, match)
                            style_content = style_content.replace(match, absolute_bg)
                        elif not match.startswith(('http://', 'https://')):
                            absolute_bg = urljoin(url, match)
                            style_content = style_content.replace(match, absolute_bg)
                style.string = style_content
        
        # Extract images for analysis (after fixing URLs)
        images = []
        img_elements = soup.find_all('img')
        for img in img_elements:
            src = img.get('src', '')
            alt = img.get('alt', '')
            if src:
                images.append({'src': src, 'alt': alt})
        
        # Debug: Print some image URLs to see what we're getting
        print(f"Found {len(images)} images:")
        for i, img in enumerate(images[:5]):  # Show first 5 images
            print(f"  {i+1}. {img['alt'] or 'No alt'} - {img['src']}")
        
        # Test a few image URLs to see if they're accessible
        def test_image_url(img_url):
            try:
                test_response = requests.head(img_url, timeout=5, allow_redirects=True)
                return test_response.status_code == 200
            except:
                return False
        
        # Test first few images
        working_images = []
        for img in images[:10]:  # Test first 10 images
            if test_image_url(img['src']):
                working_images.append(img)
            else:
                print(f"  ❌ Broken image: {img['src']}")
        
        print(f"Working images: {len(working_images)} out of {len(images)}")
        
        # Get the modified HTML with absolute URLs
        modified_html = str(soup)
        
        # Clean and format the HTML for better readability
        # Remove unnecessary whitespace and comments
        import re
        cleaned_html = re.sub(r'<!--.*?-->', '', modified_html, flags=re.DOTALL)  # Remove HTML comments
        cleaned_html = re.sub(r'\s+', ' ', cleaned_html)  # Normalize whitespace
        cleaned_html = re.sub(r'>\s+<', '><', cleaned_html)  # Remove whitespace between tags
        
        # Limit HTML size to avoid token limits (keep first 15000 chars)
        if len(cleaned_html) > 15000:
            cleaned_html = cleaned_html[:15000] + "\n<!-- ... HTML truncated for length ... -->"
        
                # Check if we got any meaningful content
        if not title_text or title_text == "No title found":
            title_text = url.split('/')[-1] or url.split('/')[-2] or "Website"
        
        # If we couldn't extract any meaningful content, provide a fallback
        if len(cleaned_html.strip()) < 100:
            website_content = f"""
WEBSITE REDESIGN - EXTRACTION FAILED
====================================

URL: {url}
Title: {title_text}

ERROR: Could not extract meaningful HTML content from this website. This could be due to:
1. The website uses heavy JavaScript to load content dynamically
2. The website has anti-bot protection
3. The website requires authentication
4. The website is using advanced compression or encoding

FALLBACK APPROACH:
Please create a modern, responsive website design for a {title_text.lower()} website. Since I couldn't extract the original content, you can:

1. Create a typical layout for this type of website
2. Use placeholder content that would be appropriate
3. Include modern design elements and responsive features
4. Use a clean, professional design with good typography
5. Make it mobile-friendly and accessible

The website appears to be: {title_text}
"""
            return website_content.strip()
        
        # Compile the extracted content with the actual HTML code
        website_content = f"""
WEBSITE REDESIGN - ORIGINAL HTML CODE
=====================================

URL: {url}
Title: {title_text}
Description: {description}

PAGE ANALYSIS:
- This appears to be a {title_text.lower()} website
- Contains {len(content_sections)} main content sections
- Has {len(nav_links)} navigation links
- Includes {len(images)} images

IMAGES FOUND (use these exact URLs in your redesign):
{chr(10).join([f"β€’ {img['alt'] or 'Image'} - {img['src']}" for img in working_images[:20]]) if working_images else "No working images found"}

ALL IMAGES (including potentially broken ones):
{chr(10).join([f"β€’ {img['alt'] or 'Image'} - {img['src']}" for img in images[:20]]) if images else "No images found"}

ORIGINAL HTML CODE (use this as the base for redesign):
```html
{cleaned_html}
```

REDESIGN INSTRUCTIONS:
Please redesign this website with a modern, responsive layout while:
1. Preserving all the original content and structure
2. Maintaining the same navigation and functionality
3. Using the original images and their URLs (listed above)
4. Creating a modern, clean design with improved typography and spacing
5. Making it fully responsive for mobile devices
6. Using modern CSS frameworks and best practices
7. Keeping the same semantic structure but with enhanced styling

IMPORTANT: All image URLs in the HTML code above have been converted to absolute URLs and are ready to use. Make sure to preserve these exact image URLs in your redesigned version.

The HTML code above contains the complete original website structure with all images properly linked. Use it as your starting point and create a modernized version.
"""
        
        return website_content.strip()
        
    except requests.exceptions.HTTPError as e:
        if e.response.status_code == 403:
            return f"Error: Website blocked access (403 Forbidden). This website may have anti-bot protection. Try a different website or provide a description of what you want to build instead."
        elif e.response.status_code == 404:
            return f"Error: Website not found (404). Please check the URL and try again."
        elif e.response.status_code >= 500:
            return f"Error: Website server error ({e.response.status_code}). Please try again later."
        else:
            return f"Error accessing website: HTTP {e.response.status_code} - {str(e)}"
    except requests.exceptions.Timeout:
        return "Error: Request timed out. The website may be slow or unavailable."
    except requests.exceptions.ConnectionError:
        return "Error: Could not connect to the website. Please check your internet connection and the URL."
    except requests.exceptions.RequestException as e:
        return f"Error accessing website: {str(e)}"
    except Exception as e:
        return f"Error extracting website content: {str(e)}"


stop_generation = False


def generation_code(query: Optional[str], image: Optional[gr.Image], file: Optional[str], website_url: Optional[str], _setting: Dict[str, str], _history: Optional[History], _current_model: Dict, enable_search: bool = False, language: str = "html", provider: str = "auto", enable_image_generation: bool = False, enable_image_to_image: bool = False, image_to_image_prompt: Optional[str] = None, text_to_image_prompt: Optional[str] = None):
    if query is None:
        query = ''
    if _history is None:
        _history = []
    # Ensure _history is always a list of lists with at least 2 elements per item
    if not isinstance(_history, list):
        _history = []
    _history = [h for h in _history if isinstance(h, list) and len(h) == 2]

    # Check if there's existing content in history to determine if this is a modification request
    has_existing_content = False
    last_assistant_msg = ""
    if _history and len(_history[-1]) > 1:
        last_assistant_msg = _history[-1][1]
        # Check for various content types that indicate an existing project
        if ('<!DOCTYPE html>' in last_assistant_msg or 
            '<html' in last_assistant_msg or
            'import gradio' in last_assistant_msg or
            'import streamlit' in last_assistant_msg or
            'def ' in last_assistant_msg and 'app' in last_assistant_msg or
            'IMPORTED PROJECT FROM HUGGING FACE SPACE' in last_assistant_msg or
            '=== index.html ===' in last_assistant_msg or
            '=== index.js ===' in last_assistant_msg or
            '=== style.css ===' in last_assistant_msg or
            '=== src/App.svelte ===' in last_assistant_msg):
            has_existing_content = True

    # Choose system prompt based on context
    if has_existing_content:
        # Use follow-up prompt for modifying existing content
        if language == "transformers.js":
            system_prompt = TransformersJSFollowUpSystemPrompt
        elif language == "svelte":
            system_prompt = FollowUpSystemPrompt  # Use generic follow-up for Svelte
        else:
            system_prompt = FollowUpSystemPrompt
    else:
        # Use language-specific prompt
        if language == "html":
            system_prompt = HTML_SYSTEM_PROMPT_WITH_SEARCH if enable_search else HTML_SYSTEM_PROMPT
        elif language == "transformers.js":
            system_prompt = TRANSFORMERS_JS_SYSTEM_PROMPT_WITH_SEARCH if enable_search else TRANSFORMERS_JS_SYSTEM_PROMPT
        elif language == "svelte":
            system_prompt = SVELTE_SYSTEM_PROMPT_WITH_SEARCH if enable_search else SVELTE_SYSTEM_PROMPT
        else:
            system_prompt = GENERIC_SYSTEM_PROMPT_WITH_SEARCH.format(language=language) if enable_search else GENERIC_SYSTEM_PROMPT.format(language=language)

    messages = history_to_messages(_history, system_prompt)

    # Extract file text and append to query if file is present
    file_text = ""
    if file:
        file_text = extract_text_from_file(file)
        if file_text:
            file_text = file_text[:5000]  # Limit to 5000 chars for prompt size
            query = f"{query}\n\n[Reference file content below]\n{file_text}"

    # Extract website content and append to query if website URL is present
    website_text = ""
    if website_url and website_url.strip():
        website_text = extract_website_content(website_url.strip())
        if website_text and not website_text.startswith("Error"):
            website_text = website_text[:8000]  # Limit to 8000 chars for prompt size
            query = f"{query}\n\n[Website content to redesign below]\n{website_text}"
        elif website_text.startswith("Error"):
            # Provide helpful guidance when website extraction fails
            fallback_guidance = """
Since I couldn't extract the website content, please provide additional details about what you'd like to build:

1. What type of website is this? (e.g., e-commerce, blog, portfolio, dashboard)
2. What are the main features you want?
3. What's the target audience?
4. Any specific design preferences? (colors, style, layout)

This will help me create a better design for you."""
            query = f"{query}\n\n[Error extracting website: {website_text}]{fallback_guidance}"

    # Enhance query with search if enabled
    enhanced_query = enhance_query_with_search(query, enable_search)

    # Check if this is GLM-4.5 model and handle with simple HuggingFace InferenceClient
    if _current_model["id"] == "zai-org/GLM-4.5":
        if image is not None:
            messages.append(create_multimodal_message(enhanced_query, image))
        else:
            messages.append({'role': 'user', 'content': enhanced_query})
        
        try:
            client = InferenceClient(
                provider="auto",
                api_key=os.environ["HF_TOKEN"],
                bill_to="huggingface",
            )
            
            stream = client.chat.completions.create(
                model="zai-org/GLM-4.5",
                messages=messages,
                stream=True,
            )
            
            content = ""
            for chunk in stream:
                if chunk.choices[0].delta.content:
                    content += chunk.choices[0].delta.content
                    clean_code = remove_code_block(content)
                    # Live streaming preview
                    preview_val = None
                    if language == "html":
                        preview_val = send_to_sandbox(clean_code)
                    elif language == "python" and is_streamlit_code(clean_code):
                        preview_val = send_streamlit_to_stlite(clean_code)
                    yield {
                        code_output: gr.update(value=clean_code, language=get_gradio_language(language)),
                        history_output: history_to_chatbot_messages(_history),
                        sandbox: preview_val or "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML or Streamlit-in-Python.</div>",
                    }
            
        except Exception as e:
            content = f"Error with GLM-4.5: {str(e)}\n\nPlease make sure HF_TOKEN environment variable is set."
        
        clean_code = remove_code_block(content)
        
        # Apply image generation (text→image and/or image→image)
        final_content = apply_generated_images_to_html(
            content,
            query,
            enable_text_to_image=enable_image_generation,
            enable_image_to_image=enable_image_to_image,
            input_image_data=image,
            image_to_image_prompt=image_to_image_prompt,
        )
        
        _history.append([query, final_content])
        
        if language == "transformers.js":
            files = parse_transformers_js_output(clean_code)
            if files['index.html'] and files['index.js'] and files['style.css']:
                # Apply image generation if enabled
                if enable_image_generation:
                    # Create search/replace blocks for image replacement based on images found in code
                    image_replacement_blocks = create_image_replacement_blocks(files['index.html'], query)
                    if image_replacement_blocks:
                        # Apply the image replacements using existing search/replace logic
                        files['index.html'] = apply_search_replace_changes(files['index.html'], image_replacement_blocks)
                
                formatted_output = format_transformers_js_output(files)
                yield {
                    code_output: formatted_output,
                    history: _history,
                    sandbox: send_transformers_to_sandbox(files),
                    history_output: history_to_chatbot_messages(_history),
                }
            else:
                yield {
                    code_output: clean_code,
                    history: _history,
                    sandbox: "<div style='padding:1em;color:#888;text-align:center;'>Error parsing transformers.js output. Please try again.</div>",
                    history_output: history_to_chatbot_messages(_history),
                }
        elif language == "svelte":
            files = parse_svelte_output(clean_code)
            if files['src/App.svelte'] and files['src/app.css']:
                # Apply image generation if enabled (add image generation logic to Svelte)
                if enable_image_generation:
                    # For Svelte, we'll add a script section that generates images dynamically
                    # This is more appropriate for Svelte than trying to inject static images
                    image_generation_script = """
<script>
    import { onMount } from 'svelte';
    
    let generatedImages = [];
    
    onMount(async () => {
        // Generate images using Qwen API based on the user prompt
        const userPrompt = """ + repr(query) + """;
        
        // Create variations for multiple images
        const imagePrompts = [
            userPrompt,
            `Visual representation of ${userPrompt}`,
            `Illustration of ${userPrompt}`
        ];
        
        for (const prompt of imagePrompts) {
            try {
                // This would need to be implemented with actual API calls
                // For now, we'll create placeholder elements
                generatedImages = [...generatedImages, {
                    prompt: prompt,
                    src: `data:image/svg+xml;base64,${btoa('<svg xmlns="http://www.w3.org/2000/svg" width="300" height="200"><rect width="100%" height="100%" fill="#f0f0f0"/><text x="50%" y="50%" text-anchor="middle" dy=".3em" fill="#666">Generated: ${prompt}</text></svg>')}`,
                    alt: prompt
                }];
            } catch (error) {
                console.error('Error generating image:', error);
            }
        }
    });
</script>

<!-- Generated Images Section -->
{#if generatedImages.length > 0}
    <div class="generated-images">
        <h3>Generated Images</h3>
        <div class="image-grid">
            {#each generatedImages as image}
                <img src={image.src} alt={image.alt} style="max-width: 100%; height: auto; border-radius: 8px; margin: 10px 0;" />
            {/each}
        </div>
    </div>
{/if}"""
                    
                    # Add the image generation script to App.svelte
                    if '<script>' in files['src/App.svelte']:
                        # Find the end of the script section and add after it
                        script_end = files['src/App.svelte'].find('</script>') + 8
                        files['src/App.svelte'] = files['src/App.svelte'][:script_end] + '\n' + image_generation_script + files['src/App.svelte'][script_end:]
                    else:
                        # Add script section at the beginning
                        files['src/App.svelte'] = image_generation_script + '\n\n' + files['src/App.svelte']
                    
                    # Add CSS for generated images
                    image_css = """
/* Generated Images Styling */
.generated-images {
    margin: 20px 0;
    padding: 20px;
    background: #f8f9fa;
    border-radius: 8px;
    border: 1px solid #e9ecef;
}

.generated-images h3 {
    margin: 0 0 15px 0;
    color: #495057;
    font-size: 1.2em;
}

.image-grid {
    display: grid;
    grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
    gap: 15px;
    align-items: start;
}

.image-grid img {
    width: 100%;
    height: auto;
    border-radius: 8px;
    box-shadow: 0 2px 8px rgba(0,0,0,0.1);
    transition: transform 0.2s ease;
}

.image-grid img:hover {
    transform: scale(1.02);
}
"""
                    
                    # Add CSS to app.css
                    if files['src/app.css']:
                        files['src/app.css'] += '\n' + image_css
                    else:
                        files['src/app.css'] = image_css
                
                formatted_output = format_svelte_output(files)
                yield {
                    code_output: formatted_output,
                    history: _history,
                    sandbox: "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your Svelte code using the download button above.</div>",
                    history_output: history_to_chatbot_messages(_history),
                }
            else:
                yield {
                    code_output: clean_code,
                    history: _history,
                    sandbox: "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your Svelte code using the download button above.</div>",
                    history_output: history_to_chatbot_messages(_history),
                }
        else:
            if has_existing_content and not (clean_code.strip().startswith("<!DOCTYPE html>") or clean_code.strip().startswith("<html")):
                last_content = _history[-1][1] if _history and len(_history[-1]) > 1 else ""
                modified_content = apply_search_replace_changes(last_content, clean_code)
                clean_content = remove_code_block(modified_content)
                
                # Apply image generation (text→image and/or image→image)
                clean_content = apply_generated_images_to_html(
                    clean_content,
                    query,
                    enable_text_to_image=enable_image_generation,
                    enable_image_to_image=enable_image_to_image,
                    input_image_data=image,
                    image_to_image_prompt=image_to_image_prompt,
                )
                
                yield {
                    code_output: clean_content,
                    history: _history,
                    sandbox: send_to_sandbox(clean_content) if language == "html" else "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your code using the download button above.</div>",
                    history_output: history_to_chatbot_messages(_history),
                }
            else:
                # Apply image generation (text→image and/or image→image)
                final_content = apply_generated_images_to_html(
                    clean_code,
                    query,
                    enable_text_to_image=enable_image_generation,
                    enable_image_to_image=enable_image_to_image,
                    input_image_data=image,
                    image_to_image_prompt=image_to_image_prompt,
                    text_to_image_prompt=text_to_image_prompt,
                )
                
                preview_val = None
                if language == "html":
                    preview_val = send_to_sandbox(final_content)
                elif language == "python" and is_streamlit_code(final_content):
                    preview_val = send_streamlit_to_stlite(final_content)
                yield {
                    code_output: final_content,
                    history: _history,
                    sandbox: preview_val or "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML or Streamlit-in-Python.</div>",
                    history_output: history_to_chatbot_messages(_history),
                }
        return
    
    # Handle GLM-4.5V (multimodal vision)
    if _current_model["id"] == "zai-org/GLM-4.5V":
        # Build structured messages with a strong system prompt to enforce full HTML output
        structured = [
            {"role": "system", "content": GLM45V_HTML_SYSTEM_PROMPT}
        ]
        if image is not None:
            user_msg = {
                "role": "user",
                "content": [
                    {"type": "text", "text": enhanced_query},
                ],
            }
            try:
                import io, base64
                from PIL import Image
                import numpy as np
                if isinstance(image, np.ndarray):
                    image = Image.fromarray(image)
                buf = io.BytesIO()
                image.save(buf, format="PNG")
                b64 = base64.b64encode(buf.getvalue()).decode()
                user_msg["content"].append({
                    "type": "image_url",
                    "image_url": {"url": f"data:image/png;base64,{b64}"}
                })
                structured.append(user_msg)
            except Exception:
                structured.append({"role": "user", "content": enhanced_query})
        else:
            structured.append({"role": "user", "content": enhanced_query})

        try:
            client = InferenceClient(
                provider="auto",
                api_key=os.environ["HF_TOKEN"],
                bill_to="huggingface",
            )
            stream = client.chat.completions.create(
                model="zai-org/GLM-4.5V",
                messages=structured,
                stream=True,
            )
            content = ""
            for chunk in stream:
                if getattr(chunk, "choices", None) and chunk.choices and getattr(chunk.choices[0], "delta", None) and getattr(chunk.choices[0].delta, "content", None):
                    content += chunk.choices[0].delta.content
                    clean_code = remove_code_block(content)
                    # Ensure escaped newlines/tabs from model are rendered correctly
                    if "\\n" in clean_code:
                        clean_code = clean_code.replace("\\n", "\n")
                    if "\\t" in clean_code:
                        clean_code = clean_code.replace("\\t", "\t")
                    preview_val = None
                    if language == "html":
                        preview_val = send_to_sandbox(clean_code)
                    elif language == "python" and is_streamlit_code(clean_code):
                        preview_val = send_streamlit_to_stlite(clean_code)
                    yield {
                        code_output: gr.update(value=clean_code, language=get_gradio_language(language)),
                        history_output: history_to_chatbot_messages(_history),
                        sandbox: preview_val or "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML or Streamlit-in-Python.</div>",
                    }
        except Exception as e:
            content = f"Error with GLM-4.5V: {str(e)}\n\nPlease make sure HF_TOKEN environment variable is set."

        clean_code = remove_code_block(content)
        if "\\n" in clean_code:
            clean_code = clean_code.replace("\\n", "\n")
        if "\\t" in clean_code:
            clean_code = clean_code.replace("\\t", "\t")
        _history.append([query, clean_code])
        preview_val = None
        if language == "html":
            preview_val = send_to_sandbox(clean_code)
        elif language == "python" and is_streamlit_code(clean_code):
            preview_val = send_streamlit_to_stlite(clean_code)
        yield {
            code_output: clean_code,
            history: _history,
            sandbox: preview_val or "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML or Streamlit-in-Python.</div>",
            history_output: history_to_chatbot_messages(_history),
        }
        return

    # Use dynamic client based on selected model (for non-GLM-4.5 models)
    client = get_inference_client(_current_model["id"], provider)

    if image is not None:
        messages.append(create_multimodal_message(enhanced_query, image))
    else:
        messages.append({'role': 'user', 'content': enhanced_query})
    try:
        # Handle Mistral API method difference
        if _current_model["id"] == "codestral-2508":
            completion = client.chat.stream(
                model=_current_model["id"],
                messages=messages,
                max_tokens=16384
            )

        else:
            # Poe expects model id "GPT-5" and uses max_tokens
            if _current_model["id"] == "gpt-5":
                completion = client.chat.completions.create(
                    model="GPT-5",
                    messages=messages,
                    stream=True,
                    max_tokens=16384
                )
            elif _current_model["id"] == "grok-4":
                completion = client.chat.completions.create(
                    model="Grok-4",
                    messages=messages,
                    stream=True,
                    max_tokens=16384
                )
            else:
                completion = client.chat.completions.create(
                    model=_current_model["id"],
                    messages=messages,
                    stream=True,
                    max_tokens=16384
                )
        content = ""
        # For Poe/GPT-5, maintain a simple code-fence state machine to only accumulate code
        poe_inside_code_block = False
        poe_partial_buffer = ""
        for chunk in completion:
            # Handle different response formats for Mistral vs others
            chunk_content = None
            if _current_model["id"] == "codestral-2508":
                # Mistral format: chunk.data.choices[0].delta.content
                if (
                    hasattr(chunk, "data") and chunk.data and
                    hasattr(chunk.data, "choices") and chunk.data.choices and 
                    hasattr(chunk.data.choices[0], "delta") and 
                    hasattr(chunk.data.choices[0].delta, "content") and 
                    chunk.data.choices[0].delta.content is not None
                ):
                    chunk_content = chunk.data.choices[0].delta.content
            else:
                # OpenAI format: chunk.choices[0].delta.content
                if (
                    hasattr(chunk, "choices") and chunk.choices and 
                    hasattr(chunk.choices[0], "delta") and 
                    hasattr(chunk.choices[0].delta, "content") and 
                    chunk.choices[0].delta.content is not None
                ):
                    chunk_content = chunk.choices[0].delta.content
            
            if chunk_content:
                if _current_model["id"] == "gpt-5":
                    # If this chunk is only placeholder thinking, surface a status update without polluting content
                    if is_placeholder_thinking_only(chunk_content):
                        status_line = extract_last_thinking_line(chunk_content)
                        yield {
                            code_output: gr.update(value=(content or "") + "\n<!-- " + status_line + " -->", language="html"),
                            history_output: history_to_chatbot_messages(_history),
                            sandbox: "<div style='padding:1em;color:#888;text-align:center;'>" + status_line + "</div>",
                        }
                        continue
                    # Filter placeholders
                    incoming = strip_placeholder_thinking(chunk_content)
                    # Process code fences incrementally, only keep content inside fences
                    s = poe_partial_buffer + incoming
                    append_text = ""
                    i = 0
                    # Find all triple backticks positions
                    for m in re.finditer(r"```", s):
                        if not poe_inside_code_block:
                            # Opening fence. Require a newline to confirm full opener so we can skip optional language line
                            nl = s.find("\n", m.end())
                            if nl == -1:
                                # Incomplete opener; buffer from this fence and wait for more
                                poe_partial_buffer = s[m.start():]
                                s = None
                                break
                            # Enter code, skip past newline after optional language token
                            poe_inside_code_block = True
                            i = nl + 1
                        else:
                            # Closing fence, append content inside and exit code
                            append_text += s[i:m.start()]
                            poe_inside_code_block = False
                            i = m.end()
                    if s is not None:
                        if poe_inside_code_block:
                            append_text += s[i:]
                            poe_partial_buffer = ""
                        else:
                            poe_partial_buffer = s[i:]
                    if append_text:
                        content += append_text
                else:
                    # Append content, filtering out placeholder thinking lines
                    content += strip_placeholder_thinking(chunk_content)
                search_status = " (with web search)" if enable_search and tavily_client else ""
                
                # Handle transformers.js output differently
                if language == "transformers.js":
                    files = parse_transformers_js_output(content)
                    if files['index.html'] and files['index.js'] and files['style.css']:
                        # Model returned complete transformers.js output
                        formatted_output = format_transformers_js_output(files)
                        yield {
                            code_output: gr.update(value=formatted_output, language="html"),
                            history_output: history_to_chatbot_messages(_history),
                            sandbox: send_transformers_to_sandbox(files) if files['index.html'] else "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your code using the download button above.</div>",
                        }
                    elif has_existing_content:
                        # Model is returning search/replace changes for transformers.js - apply them
                        last_content = _history[-1][1] if _history and len(_history[-1]) > 1 else ""
                        modified_content = apply_transformers_js_search_replace_changes(last_content, content)
                        _mf = parse_transformers_js_output(modified_content)
                        yield {
                            code_output: gr.update(value=modified_content, language="html"),
                            history_output: history_to_chatbot_messages(_history),
                            sandbox: send_transformers_to_sandbox(_mf) if _mf['index.html'] else "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your code using the download button above.</div>",
                        }
                    else:
                        # Still streaming, show partial content
                        yield {
                            code_output: gr.update(value=content, language="html"),
                            history_output: history_to_chatbot_messages(_history),
                            sandbox: "<div style='padding:1em;color:#888;text-align:center;'>Generating transformers.js app...</div>",
                        }
                elif language == "svelte":
                    # For Svelte, just show the content as it streams
                    # We'll parse it properly in the final response
                    yield {
                        code_output: gr.update(value=content, language="html"),
                        history_output: history_to_chatbot_messages(_history),
                        sandbox: "<div style='padding:1em;color:#888;text-align:center;'>Generating Svelte app...</div>",
                    }
                else:
                    clean_code = remove_code_block(content)
                    if has_existing_content:
                        # Handle modification of existing content
                        if clean_code.strip().startswith("<!DOCTYPE html>") or clean_code.strip().startswith("<html"):
                            # Model returned a complete HTML file
                            preview_val = None
                            if language == "html":
                                preview_val = send_to_sandbox(clean_code)
                            elif language == "python" and is_streamlit_code(clean_code):
                                preview_val = send_streamlit_to_stlite(clean_code)
                            elif language == "gradio" or (language == "python" and is_gradio_code(clean_code)):
                                preview_val = send_gradio_to_lite(clean_code)
                            yield {
                                code_output: gr.update(value=clean_code, language=get_gradio_language(language)),
                                history_output: history_to_chatbot_messages(_history),
                                sandbox: preview_val or "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML or Streamlit-in-Python.</div>",
                            }
                        else:
                            # Model returned search/replace changes - apply them
                            last_content = _history[-1][1] if _history and len(_history[-1]) > 1 else ""
                            modified_content = apply_search_replace_changes(last_content, clean_code)
                            clean_content = remove_code_block(modified_content)
                            preview_val = None
                            if language == "html":
                                preview_val = send_to_sandbox(clean_content)
                            elif language == "python" and is_streamlit_code(clean_content):
                                preview_val = send_streamlit_to_stlite(clean_content)
                            elif language == "gradio" or (language == "python" and is_gradio_code(clean_content)):
                                preview_val = send_gradio_to_lite(clean_content)
                            yield {
                                code_output: gr.update(value=clean_content, language=get_gradio_language(language)),
                                history_output: history_to_chatbot_messages(_history),
                                sandbox: preview_val or "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML or Streamlit-in-Python.</div>",
                            }
                    else:
                        preview_val = None
                        if language == "html":
                            preview_val = send_to_sandbox(clean_code)
                        elif language == "python" and is_streamlit_code(clean_code):
                            preview_val = send_streamlit_to_stlite(clean_code)
                        elif language == "gradio" or (language == "python" and is_gradio_code(clean_code)):
                            preview_val = send_gradio_to_lite(clean_code)
                        yield {
                            code_output: gr.update(value=clean_code, language=get_gradio_language(language)),
                            history_output: history_to_chatbot_messages(_history),
                            sandbox: preview_val or "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML or Streamlit-in-Python.</div>",
                        }
            # Skip chunks with empty choices (end of stream)
            # Do not treat as error
        # Handle response based on whether this is a modification or new generation
        if language == "transformers.js":
            # Handle transformers.js output
            files = parse_transformers_js_output(content)
            if files['index.html'] and files['index.js'] and files['style.css']:
                # Model returned complete transformers.js output
                formatted_output = format_transformers_js_output(files)
                _history.append([query, formatted_output])
                yield {
                    code_output: formatted_output,
                    history: _history,
                    sandbox: send_transformers_to_sandbox(files),
                    history_output: history_to_chatbot_messages(_history),
                }
            elif has_existing_content:
                # Model returned search/replace changes for transformers.js - apply them
                last_content = _history[-1][1] if _history and len(_history[-1]) > 1 else ""
                modified_content = apply_transformers_js_search_replace_changes(last_content, content)
                _history.append([query, modified_content])
                _mf = parse_transformers_js_output(modified_content)
                yield {
                    code_output: modified_content,
                    history: _history,
                    sandbox: send_transformers_to_sandbox(_mf),
                    history_output: history_to_chatbot_messages(_history),
                }
            else:
                # Fallback if parsing failed
                _history.append([query, content])
                yield {
                    code_output: content,
                    history: _history,
                    sandbox: "<div style='padding:1em;color:#888;text-align:center;'>Error parsing transformers.js output. Please try again.</div>",
                    history_output: history_to_chatbot_messages(_history),
                }
        elif language == "svelte":
            # Handle Svelte output
            files = parse_svelte_output(content)
            if files['src/App.svelte'] and files['src/app.css']:
                # Model returned complete Svelte output
                formatted_output = format_svelte_output(files)
                _history.append([query, formatted_output])
                yield {
                    code_output: formatted_output,
                    history: _history,
                    sandbox: "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your Svelte code using the download button above.</div>",
                    history_output: history_to_chatbot_messages(_history),
                }
            elif has_existing_content:
                # Model returned search/replace changes for Svelte - apply them
                last_content = _history[-1][1] if _history and len(_history[-1]) > 1 else ""
                modified_content = apply_search_replace_changes(last_content, content)
                _history.append([query, modified_content])
                yield {
                    code_output: modified_content,
                    history: _history,
                    sandbox: "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your Svelte code using the download button above.</div>",
                    history_output: history_to_chatbot_messages(_history),
                }
            else:
                # Fallback if parsing failed - just use the raw content
                _history.append([query, content])
                yield {
                    code_output: content,
                    history: _history,
                    sandbox: "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your Svelte code using the download button above.</div>",
                    history_output: history_to_chatbot_messages(_history),
                }
        elif has_existing_content:
            # Handle modification of existing content
            final_code = remove_code_block(content)
            if final_code.strip().startswith("<!DOCTYPE html>") or final_code.strip().startswith("<html"):
                # Model returned a complete HTML file
                clean_content = final_code
            else:
                # Model returned search/replace changes - apply them
                last_content = _history[-1][1] if _history and len(_history[-1]) > 1 else ""
                modified_content = apply_search_replace_changes(last_content, final_code)
                clean_content = remove_code_block(modified_content)
            
            # Apply image generation (text→image and/or image→image)
            clean_content = apply_generated_images_to_html(
                clean_content,
                query,
                enable_text_to_image=enable_image_generation,
                enable_image_to_image=enable_image_to_image,
                input_image_data=image,
                image_to_image_prompt=image_to_image_prompt,
                text_to_image_prompt=text_to_image_prompt,
            )
            
            # Update history with the cleaned content
            _history.append([query, clean_content])
            yield {
                code_output: clean_content,
                history: _history,
                sandbox: (send_to_sandbox(clean_content) if language == "html" else (send_streamlit_to_stlite(clean_content) if (language == "python" and is_streamlit_code(clean_content)) else "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML or Streamlit-in-Python.</div>")),
                history_output: history_to_chatbot_messages(_history),
            }
        else:
            # Regular generation - use the content as is
            final_content = remove_code_block(content)
            
            # Apply image generation (text→image and/or image→image)
            final_content = apply_generated_images_to_html(
                final_content,
                query,
                enable_text_to_image=enable_image_generation,
                enable_image_to_image=enable_image_to_image,
                input_image_data=image,
                image_to_image_prompt=image_to_image_prompt,
                text_to_image_prompt=text_to_image_prompt,
            )
            
            _history.append([query, final_content])
            preview_val = None
            if language == "html":
                preview_val = send_to_sandbox(final_content)
            elif language == "python" and is_streamlit_code(final_content):
                preview_val = send_streamlit_to_stlite(final_content)
            elif language == "gradio" or (language == "python" and is_gradio_code(final_content)):
                preview_val = send_gradio_to_lite(final_content)
            yield {
                code_output: final_content,
                history: _history,
                sandbox: preview_val or "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML or Streamlit-in-Python.</div>",
                history_output: history_to_chatbot_messages(_history),
            }
    except Exception as e:
        error_message = f"Error: {str(e)}"
        yield {
            code_output: error_message,
            history_output: history_to_chatbot_messages(_history),
        }

# Deploy to Spaces logic

def add_anycoder_tag_to_readme(api, repo_id):
    """Download existing README, add anycoder tag, and upload back."""
    try:
        import tempfile
        import re
        
        # Download the existing README
        readme_path = api.hf_hub_download(
            repo_id=repo_id,
            filename="README.md",
            repo_type="space"
        )
        
        # Read the existing README content
        with open(readme_path, 'r', encoding='utf-8') as f:
            content = f.read()
        
        # Parse frontmatter and content
        if content.startswith('---'):
            # Split frontmatter and body
            parts = content.split('---', 2)
            if len(parts) >= 3:
                frontmatter = parts[1].strip()
                body = parts[2] if len(parts) > 2 else ""
                
                # Check if tags already exist
                if 'tags:' in frontmatter:
                    # Add anycoder to existing tags if not present
                    if '- anycoder' not in frontmatter:
                        frontmatter = re.sub(r'(tags:\s*\n(?:\s*-\s*[^\n]+\n)*)', r'\1- anycoder\n', frontmatter)
                else:
                    # Add tags section with anycoder
                    frontmatter += '\ntags:\n- anycoder'
                
                # Reconstruct the README
                new_content = f"---\n{frontmatter}\n---{body}"
            else:
                # Malformed frontmatter, just add tags at the end of frontmatter
                new_content = content.replace('---', '---\ntags:\n- anycoder\n---', 1)
        else:
            # No frontmatter, add it at the beginning
            new_content = f"---\ntags:\n- anycoder\n---\n\n{content}"
        
        # Upload the modified README
        with tempfile.NamedTemporaryFile("w", suffix=".md", delete=False, encoding='utf-8') as f:
            f.write(new_content)
            temp_path = f.name
        
        api.upload_file(
            path_or_fileobj=temp_path,
            path_in_repo="README.md",
            repo_id=repo_id,
            repo_type="space"
        )
        
        import os
        os.unlink(temp_path)
        
    except Exception as e:
        print(f"Warning: Could not modify README.md to add anycoder tag: {e}")

def extract_import_statements(code):
    """Extract import statements from generated code."""
    import ast
    import re
    
    import_statements = []
    
    # Built-in Python modules to exclude
    builtin_modules = {
        'os', 'sys', 'json', 'time', 'datetime', 'random', 'math', 're', 'collections',
        'itertools', 'functools', 'pathlib', 'urllib', 'http', 'email', 'html', 'xml',
        'csv', 'tempfile', 'shutil', 'subprocess', 'threading', 'multiprocessing',
        'asyncio', 'logging', 'typing', 'base64', 'hashlib', 'secrets', 'uuid',
        'copy', 'pickle', 'io', 'contextlib', 'warnings', 'sqlite3', 'gzip', 'zipfile',
        'tarfile', 'socket', 'ssl', 'platform', 'getpass', 'pwd', 'grp', 'stat',
        'glob', 'fnmatch', 'linecache', 'traceback', 'inspect', 'keyword', 'token',
        'tokenize', 'ast', 'code', 'codeop', 'dis', 'py_compile', 'compileall',
        'importlib', 'pkgutil', 'modulefinder', 'runpy', 'site', 'sysconfig'
    }
    
    try:
        # Try to parse as Python AST
        tree = ast.parse(code)
        
        for node in ast.walk(tree):
            if isinstance(node, ast.Import):
                for alias in node.names:
                    module_name = alias.name.split('.')[0]
                    if module_name not in builtin_modules and not module_name.startswith('_'):
                        import_statements.append(f"import {alias.name}")
            
            elif isinstance(node, ast.ImportFrom):
                if node.module:
                    module_name = node.module.split('.')[0]
                    if module_name not in builtin_modules and not module_name.startswith('_'):
                        names = [alias.name for alias in node.names]
                        import_statements.append(f"from {node.module} import {', '.join(names)}")
    
    except SyntaxError:
        # Fallback: use regex to find import statements
        for line in code.split('\n'):
            line = line.strip()
            if line.startswith('import ') or line.startswith('from '):
                # Check if it's not a builtin module
                if line.startswith('import '):
                    module_name = line.split()[1].split('.')[0]
                elif line.startswith('from '):
                    module_name = line.split()[1].split('.')[0]
                
                if module_name not in builtin_modules and not module_name.startswith('_'):
                    import_statements.append(line)
    
    return list(set(import_statements))  # Remove duplicates

def generate_requirements_txt_with_llm(import_statements):
    """Generate requirements.txt content using LLM based on import statements."""
    if not import_statements:
        return "# No additional dependencies required\n"
    
    # Use a lightweight model for this task
    try:
        client = get_inference_client("Qwen/Qwen3-Coder-480B-A35B-Instruct", "auto")
        
        imports_text = '\n'.join(import_statements)
        
        prompt = f"""Based on the following Python import statements, generate a comprehensive requirements.txt file with all necessary and commonly used related packages:

{imports_text}

Instructions:
- Include the direct packages needed for the imports
- Include commonly used companion packages and dependencies for better functionality
- Use correct PyPI package names (e.g., cv2 -> opencv-python, PIL -> Pillow, sklearn -> scikit-learn)
- Examples of comprehensive dependencies:
  * transformers often needs: accelerate, torch, tokenizers, datasets
  * gradio often needs: requests, Pillow for image handling
  * pandas often needs: numpy, openpyxl for Excel files
  * matplotlib often needs: numpy, pillow for image saving
  * sklearn often needs: numpy, scipy, joblib
  * streamlit often needs: pandas, numpy, requests
  * opencv-python often needs: numpy, pillow
  * fastapi often needs: uvicorn, pydantic
  * torch often needs: torchvision, torchaudio (if doing computer vision/audio)
- Include packages for common file formats if relevant (openpyxl, python-docx, PyPDF2)
- Do not include Python built-in modules
- Do not specify versions unless there are known compatibility issues
- One package per line
- If no external packages are needed, return "# No additional dependencies required"

Generate a comprehensive requirements.txt that ensures the application will work smoothly:"""

        messages = [
            {"role": "system", "content": "You are a Python packaging expert specializing in creating comprehensive, production-ready requirements.txt files. Your goal is to ensure applications work smoothly by including not just direct dependencies but also commonly needed companion packages, popular extensions, and supporting libraries that developers typically need together."},
            {"role": "user", "content": prompt}
        ]
        
        response = client.chat.completions.create(
            model="Qwen/Qwen3-Coder-480B-A35B-Instruct",
            messages=messages,
            max_tokens=1024,
            temperature=0.1
        )
        
        requirements_content = response.choices[0].message.content.strip()
        
        # Clean up the response in case it includes extra formatting
        if '```' in requirements_content:
            # Extract content between code blocks
            lines = requirements_content.split('\n')
            in_code_block = False
            clean_lines = []
            for line in lines:
                if line.strip().startswith('```'):
                    in_code_block = not in_code_block
                    continue
                if in_code_block:
                    clean_lines.append(line)
            requirements_content = '\n'.join(clean_lines).strip()
        
        # Ensure it ends with a newline
        if requirements_content and not requirements_content.endswith('\n'):
            requirements_content += '\n'
            
        return requirements_content if requirements_content else "# No additional dependencies required\n"
        
    except Exception as e:
        # Fallback: simple extraction with basic mapping
        dependencies = set()
        special_cases = {
            'cv2': 'opencv-python',
            'PIL': 'Pillow', 
            'sklearn': 'scikit-learn',
            'skimage': 'scikit-image',
            'bs4': 'beautifulsoup4'
        }
        
        for stmt in import_statements:
            if stmt.startswith('import '):
                module_name = stmt.split()[1].split('.')[0]
                package_name = special_cases.get(module_name, module_name)
                dependencies.add(package_name)
            elif stmt.startswith('from '):
                module_name = stmt.split()[1].split('.')[0]
                package_name = special_cases.get(module_name, module_name)
                dependencies.add(package_name)
        
        if dependencies:
            return '\n'.join(sorted(dependencies)) + '\n'
        else:
            return "# No additional dependencies required\n"

def wrap_html_in_gradio_app(html_code):
    # Escape triple quotes for safe embedding
    safe_html = html_code.replace('"""', r'\"\"\"')
    
    # Extract import statements and generate requirements.txt with LLM
    import_statements = extract_import_statements(html_code)
    requirements_comment = ""
    if import_statements:
        requirements_content = generate_requirements_txt_with_llm(import_statements)
        requirements_comment = (
            "# Generated requirements.txt content (create this file manually if needed):\n"
            + '\n'.join(f"# {line}" for line in requirements_content.strip().split('\n')) + '\n\n'
        )
    
    return (
        f'{requirements_comment}'
        'import gradio as gr\n\n'
        'def show_html():\n'
        f'    return """{safe_html}"""\n\n'
        'demo = gr.Interface(fn=show_html, inputs=None, outputs=gr.HTML())\n\n'
        'if __name__ == "__main__":\n'
        '    demo.launch()\n'
    )

def deploy_to_spaces(code):
    if not code or not code.strip():
        return  # Do nothing if code is empty
    # Wrap the HTML code in a Gradio app
    app_py = wrap_html_in_gradio_app(code.strip())
    base_url = "https://huggingface.co/new-space"
    params = urllib.parse.urlencode({
        "name": "new-space",
        "sdk": "gradio"
    })
    # Use urlencode for file params
    files_params = urllib.parse.urlencode({
        "files[0][path]": "app.py",
        "files[0][content]": app_py
    })
    full_url = f"{base_url}?{params}&{files_params}"
    webbrowser.open_new_tab(full_url)

def wrap_html_in_static_app(html_code):
    # For static Spaces, just use the HTML code as-is
    return html_code

def deploy_to_spaces_static(code):
    if not code or not code.strip():
        return  # Do nothing if code is empty
    # Use the HTML code directly for static Spaces
    app_html = wrap_html_in_static_app(code.strip())
    base_url = "https://huggingface.co/new-space"
    params = urllib.parse.urlencode({
        "name": "new-space",
        "sdk": "static"
    })
    files_params = urllib.parse.urlencode({
        "files[0][path]": "index.html",
        "files[0][content]": app_html
    })
    full_url = f"{base_url}?{params}&{files_params}"
    webbrowser.open_new_tab(full_url)

def check_hf_space_url(url: str) -> Tuple[bool, Optional[str], Optional[str]]:
    """Check if URL is a valid Hugging Face Spaces URL and extract username/project"""
    import re
    
    # Pattern to match HF Spaces URLs
    url_pattern = re.compile(
        r'^(https?://)?(huggingface\.co|hf\.co)/spaces/([\w-]+)/([\w-]+)$',
        re.IGNORECASE
    )
    
    match = url_pattern.match(url.strip())
    if match:
        username = match.group(3)
        project_name = match.group(4)
        return True, username, project_name
    return False, None, None

def fetch_hf_space_content(username: str, project_name: str) -> str:
    """Fetch content from a Hugging Face Space"""
    try:
        import requests
        from huggingface_hub import HfApi
        
        # Try to get space info first
        api = HfApi()
        space_info = api.space_info(f"{username}/{project_name}")
        
        # Try to fetch the main file based on SDK
        sdk = space_info.sdk
        main_file = None
        
        # Define file patterns to try based on SDK
        if sdk == "static":
            file_patterns = ["index.html"]
        elif sdk == "gradio":
            file_patterns = ["app.py", "main.py", "gradio_app.py"]
        elif sdk == "streamlit":
            file_patterns = ["streamlit_app.py", "src/streamlit_app.py", "app.py", "src/app.py", "main.py", "src/main.py", "Home.py", "src/Home.py", "🏠_Home.py", "src/🏠_Home.py", "1_🏠_Home.py", "src/1_🏠_Home.py"]
        else:
            # Try common files for unknown SDKs
            file_patterns = ["app.py", "src/app.py", "index.html", "streamlit_app.py", "src/streamlit_app.py", "main.py", "src/main.py", "Home.py", "src/Home.py"]
        
        # Try to find and download the main file
        for file in file_patterns:
            try:
                content = api.hf_hub_download(
                    repo_id=f"{username}/{project_name}",
                    filename=file,
                    repo_type="space"
                )
                main_file = file
                break
            except:
                continue
        
        # If still no main file found, try to list repository files and find Python files
        if not main_file and sdk in ["streamlit", "gradio"]:
            try:
                from huggingface_hub import list_repo_files
                files = list_repo_files(repo_id=f"{username}/{project_name}", repo_type="space")
                
                # Look for Python files that might be the main file (root and src/ directory)
                python_files = [f for f in files if f.endswith('.py') and not f.startswith('.') and 
                              (('/' not in f) or f.startswith('src/'))]
                
                for py_file in python_files:
                    try:
                        content = api.hf_hub_download(
                            repo_id=f"{username}/{project_name}",
                            filename=py_file,
                            repo_type="space"
                        )
                        main_file = py_file
                        break
                    except:
                        continue
            except:
                pass
        
        if main_file:
            content = api.hf_hub_download(
                repo_id=f"{username}/{project_name}",
                filename=main_file,
                repo_type="space"
            )
            
            # Read the file content
            with open(content, 'r', encoding='utf-8') as f:
                file_content = f.read()
            
            return f"""IMPORTED PROJECT FROM HUGGING FACE SPACE
==============================================

Space: {username}/{project_name}
SDK: {sdk}
Main File: {main_file}

{file_content}"""
        else:
            # Try to get more information about available files for debugging
            try:
                from huggingface_hub import list_repo_files
                files = list_repo_files(repo_id=f"{username}/{project_name}", repo_type="space")
                available_files = [f for f in files if not f.startswith('.') and not f.endswith('.md')]
                return f"Error: Could not find main file in space {username}/{project_name}.\n\nSDK: {sdk}\nAvailable files: {', '.join(available_files[:10])}{'...' if len(available_files) > 10 else ''}\n\nTried looking for: {', '.join(file_patterns)}"
            except:
                return f"Error: Could not find main file in space {username}/{project_name}. Expected files for {sdk} SDK: {', '.join(file_patterns) if 'file_patterns' in locals() else 'standard files'}"
            
    except Exception as e:
        return f"Error fetching space content: {str(e)}"

def load_project_from_url(url: str) -> Tuple[str, str]:
    """Load project from Hugging Face Space URL"""
    # Validate URL
    is_valid, username, project_name = check_hf_space_url(url)
    
    if not is_valid:
        return "Error: Please enter a valid Hugging Face Spaces URL.\n\nExpected format: https://huggingface.co/spaces/username/project", ""
    
    # Fetch content
    content = fetch_hf_space_content(username, project_name)
    
    if content.startswith("Error:"):
        return content, ""
    
    # Extract the actual code content by removing metadata
    lines = content.split('\n')
    code_start = 0
    for i, line in enumerate(lines):
        # Skip metadata lines and find the start of actual code
        if (line.strip() and 
            not line.startswith('=') and 
            not line.startswith('IMPORTED PROJECT') and
            not line.startswith('Space:') and
            not line.startswith('SDK:') and
            not line.startswith('Main File:')):
            code_start = i
            break
    
    code_content = '\n'.join(lines[code_start:])
    
    return f"βœ… Successfully imported project from {username}/{project_name}", code_content

# Gradio Theme Configurations with proper theme objects
def get_saved_theme():
    """Get the saved theme preference from file"""
    try:
        if os.path.exists('.theme_preference'):
            with open('.theme_preference', 'r') as f:
                return f.read().strip()
    except:
        pass
    return "Developer"

def save_theme_preference(theme_name):
    """Save theme preference to file"""
    try:
        with open('.theme_preference', 'w') as f:
            f.write(theme_name)
    except:
        pass

THEME_CONFIGS = {
    "Default": {
        "theme": gr.themes.Default(),
        "description": "Gradio's standard theme with clean orange accents"
    },
    "Base": {
        "theme": gr.themes.Base(
            primary_hue="blue",
            secondary_hue="slate",
            neutral_hue="slate",
            text_size="sm",
            spacing_size="sm",
            radius_size="md"
        ),
        "description": "Minimal foundation theme with blue accents"
    },
    "Soft": {
        "theme": gr.themes.Soft(
            primary_hue="emerald",
            secondary_hue="emerald",
            neutral_hue="slate",
            text_size="sm",
            spacing_size="md",
            radius_size="lg"
        ),
        "description": "Gentle rounded theme with soft emerald colors"
    },
    "Monochrome": {
        "theme": gr.themes.Monochrome(
            primary_hue="slate",
            secondary_hue="slate",
            neutral_hue="slate",
            text_size="sm",
            spacing_size="sm",
            radius_size="sm"
        ),
        "description": "Elegant black and white design"
    },
    "Glass": {
        "theme": gr.themes.Glass(
            primary_hue="blue",
            secondary_hue="blue",
            neutral_hue="slate",
            text_size="sm",
            spacing_size="md",
            radius_size="lg"
        ),
        "description": "Modern glassmorphism with blur effects"
    },
    "Dark Ocean": {
        "theme": gr.themes.Base(
            primary_hue="blue",
            secondary_hue="slate", 
            neutral_hue="slate",
            text_size="sm",
            spacing_size="sm",
            radius_size="md"
        ).set(
            body_background_fill="#0f172a",
            body_background_fill_dark="#0f172a",
            background_fill_primary="#3b82f6",
            background_fill_secondary="#1e293b",
            border_color_primary="#334155",
            block_background_fill="#1e293b",
            block_border_color="#334155",
            body_text_color="#f1f5f9",
            body_text_color_dark="#f1f5f9",
            block_label_text_color="#f1f5f9",
            block_label_text_color_dark="#f1f5f9",
            block_title_text_color="#f1f5f9",
            block_title_text_color_dark="#f1f5f9",
            input_background_fill="#0f172a",
            input_background_fill_dark="#0f172a",
            input_border_color="#334155",
            input_border_color_dark="#334155",
            button_primary_background_fill="#3b82f6",
            button_primary_border_color="#3b82f6",
            button_secondary_background_fill="#334155",
            button_secondary_border_color="#475569"
        ),
        "description": "Deep blue dark theme perfect for coding"
    },
    "Cyberpunk": {
        "theme": gr.themes.Base(
            primary_hue="fuchsia",
            secondary_hue="cyan",
            neutral_hue="slate",
            text_size="sm",
            spacing_size="sm",
            radius_size="none",
            font="Orbitron"
        ).set(
            body_background_fill="#0a0a0f",
            body_background_fill_dark="#0a0a0f",
            background_fill_primary="#ff10f0",
            background_fill_secondary="#1a1a2e",
            border_color_primary="#00f5ff",
            block_background_fill="#1a1a2e",
            block_border_color="#00f5ff",
            body_text_color="#00f5ff",
            body_text_color_dark="#00f5ff",
            block_label_text_color="#ff10f0",
            block_label_text_color_dark="#ff10f0",
            block_title_text_color="#ff10f0",
            block_title_text_color_dark="#ff10f0",
            input_background_fill="#0a0a0f",
            input_background_fill_dark="#0a0a0f",
            input_border_color="#00f5ff",
            input_border_color_dark="#00f5ff",
            button_primary_background_fill="#ff10f0",
            button_primary_border_color="#ff10f0",
            button_secondary_background_fill="#1a1a2e",
            button_secondary_border_color="#00f5ff"
        ),
        "description": "Futuristic neon cyber aesthetics"
    },
    "Forest": {
        "theme": gr.themes.Soft(
            primary_hue="emerald",
            secondary_hue="green",
            neutral_hue="emerald",
            text_size="sm",
            spacing_size="md",
            radius_size="lg"
        ).set(
            body_background_fill="#f0fdf4",
            body_background_fill_dark="#064e3b",
            background_fill_primary="#059669",
            background_fill_secondary="#ecfdf5",
            border_color_primary="#bbf7d0",
            block_background_fill="#ffffff",
            block_border_color="#d1fae5",
            body_text_color="#064e3b",
            body_text_color_dark="#f0fdf4",
            block_label_text_color="#064e3b",
            block_label_text_color_dark="#f0fdf4",
            block_title_text_color="#059669",
            block_title_text_color_dark="#10b981"
        ),
        "description": "Nature-inspired green earth tones"
    },
    "High Contrast": {
        "theme": gr.themes.Base(
            primary_hue="yellow",
            secondary_hue="slate",
            neutral_hue="slate",
            text_size="lg",
            spacing_size="lg",
            radius_size="sm"
        ).set(
            body_background_fill="#ffffff",
            body_background_fill_dark="#ffffff",
            background_fill_primary="#000000",
            background_fill_secondary="#ffffff",
            border_color_primary="#000000",
            block_background_fill="#ffffff",
            block_border_color="#000000",
            body_text_color="#000000",
            body_text_color_dark="#000000",
            block_label_text_color="#000000",
            block_label_text_color_dark="#000000",
            block_title_text_color="#000000",
            block_title_text_color_dark="#000000",
            input_background_fill="#ffffff",
            input_background_fill_dark="#ffffff",
            input_border_color="#000000",
            input_border_color_dark="#000000",
            button_primary_background_fill="#ffff00",
            button_primary_border_color="#000000",
            button_secondary_background_fill="#ffffff",
            button_secondary_border_color="#000000"
        ),
        "description": "Accessibility-focused high visibility"
    },
    "Developer": {
        "theme": gr.themes.Base(
            primary_hue="blue",
            secondary_hue="slate",
            neutral_hue="slate",
            text_size="sm",
            spacing_size="sm",
            radius_size="sm",
            font="Consolas"
        ).set(
            # VS Code exact colors
            body_background_fill="#1e1e1e",           # VS Code editor background
            body_background_fill_dark="#1e1e1e",
            background_fill_primary="#007acc",        # VS Code blue accent
            background_fill_secondary="#252526",      # VS Code sidebar background
            border_color_primary="#3e3e42",          # VS Code border color
            block_background_fill="#252526",         # VS Code panel background
            block_border_color="#3e3e42",           # VS Code subtle borders
            body_text_color="#cccccc",               # VS Code default text
            body_text_color_dark="#cccccc",
            block_label_text_color="#cccccc",
            block_label_text_color_dark="#cccccc",
            block_title_text_color="#ffffff",        # VS Code active text
            block_title_text_color_dark="#ffffff",
            input_background_fill="#2d2d30",         # VS Code input background
            input_background_fill_dark="#2d2d30",
            input_border_color="#3e3e42",           # VS Code input border
            input_border_color_dark="#3e3e42",
            input_border_color_focus="#007acc",      # VS Code focus border
            input_border_color_focus_dark="#007acc",
            button_primary_background_fill="#007acc", # VS Code button blue
            button_primary_border_color="#007acc",
            button_primary_background_fill_hover="#0e639c", # VS Code button hover
            button_secondary_background_fill="#2d2d30",
            button_secondary_border_color="#3e3e42",
            button_secondary_text_color="#cccccc"
        ),
        "description": "Authentic VS Code dark theme with exact color matching"
    }
}

# Additional theme information for developers
THEME_FEATURES = {
    "Default": ["Orange accents", "Clean layout", "Standard Gradio look"],
    "Base": ["Blue accents", "Minimal styling", "Clean foundation"],
    "Soft": ["Rounded corners", "Emerald colors", "Comfortable viewing"],
    "Monochrome": ["Black & white", "High elegance", "Timeless design"],
    "Glass": ["Glassmorphism", "Blur effects", "Translucent elements"],
    "Dark Ocean": ["Deep blue palette", "Dark theme", "Easy on eyes"],
    "Cyberpunk": ["Neon cyan/magenta", "Futuristic fonts", "Cyber vibes"],
    "Forest": ["Nature inspired", "Green tones", "Organic rounded"],
    "High Contrast": ["Black/white/yellow", "High visibility", "Accessibility"],
    "Developer": ["Authentic VS Code colors", "Consolas/Monaco fonts", "Exact theme matching"]
}

# Load saved theme and apply it
current_theme_name = get_saved_theme()
current_theme = THEME_CONFIGS[current_theme_name]["theme"]

# Main application with proper Gradio theming
with gr.Blocks(
    title="AnyCoder - AI Code Generator",
    theme=current_theme,
    css="""
        .theme-info { font-size: 0.9em; opacity: 0.8; }
        .theme-description { padding: 8px 0; }
        .theme-status { 
            padding: 10px; 
            border-radius: 8px; 
            background: rgba(34, 197, 94, 0.1); 
            border: 1px solid rgba(34, 197, 94, 0.2); 
            margin: 8px 0; 
        }
        .restart-needed {
            padding: 12px;
            border-radius: 8px;
            background: rgba(255, 193, 7, 0.1);
            border: 1px solid rgba(255, 193, 7, 0.3);
            margin: 8px 0;
            text-align: center;
        }
    """
) as demo:
    history = gr.State([])
    setting = gr.State({
        "system": HTML_SYSTEM_PROMPT,
    })
    current_model = gr.State(DEFAULT_MODEL)
    open_panel = gr.State(None)
    last_login_state = gr.State(None)

    with gr.Sidebar():
        login_button = gr.LoginButton()
        
        # Theme Selector (hidden for end users, developers can modify code)
        with gr.Column(visible=False):
            theme_dropdown = gr.Dropdown(
                choices=list(THEME_CONFIGS.keys()),
                value=current_theme_name,
                label="Select Theme",
                info="Choose your preferred visual style"
            )
            theme_description = gr.Markdown("")
            apply_theme_btn = gr.Button("Apply Theme", variant="primary", size="sm")
            theme_status = gr.Markdown("")
        
        # Add Load Project section
        gr.Markdown("πŸ“₯ Load Existing Project")
        load_project_url = gr.Textbox(
            label="Hugging Face Space URL",
            placeholder="https://huggingface.co/spaces/username/project",
            lines=1
        )
        load_project_btn = gr.Button("Import Project", variant="secondary", size="sm")
        load_project_status = gr.Markdown(visible=False)
        
        gr.Markdown("---")
        
        input = gr.Textbox(
            label="What would you like to build?",
            placeholder="Describe your application...",
            lines=3,
            visible=True
        )
        # Language dropdown for code generation (add Streamlit and Gradio as first-class options)
        language_choices = [
            "html", "streamlit", "gradio", "python", "transformers.js", "svelte", "c", "cpp", "markdown", "latex", "json", "css", "javascript", "jinja2", "typescript", "yaml", "dockerfile", "shell", "r", "sql", "sql-msSQL", "sql-mySQL", "sql-mariaDB", "sql-sqlite", "sql-cassandra", "sql-plSQL", "sql-hive", "sql-pgSQL", "sql-gql", "sql-gpSQL", "sql-sparkSQL", "sql-esper"
        ]
        language_dropdown = gr.Dropdown(
            choices=language_choices,
            value="html",
            label="Code Language",
            visible=True
        )
        website_url_input = gr.Textbox(
            label="website for redesign",
            placeholder="https://example.com",
            lines=1,
            visible=True
        )
        file_input = gr.File(
            label="Reference file (OCR only)",
            file_types=[".pdf", ".txt", ".md", ".csv", ".docx", ".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".tif", ".gif", ".webp"],
            visible=True
        )
        image_input = gr.Image(
            label="UI design image",
            visible=False
        )
        image_to_image_prompt = gr.Textbox(
            label="Image-to-Image Prompt",
            placeholder="Describe how to transform the uploaded image (e.g., 'Turn the cat into a tiger.')",
            lines=2,
            visible=False
        )
        with gr.Row():
            btn = gr.Button("Generate", variant="primary", size="lg", scale=2, visible=True)
            clear_btn = gr.Button("Clear", variant="secondary", size="sm", scale=1, visible=True)
        # --- Move deploy/app name/sdk here, right before web search ---
        space_name_input = gr.Textbox(
            label="app name (e.g. my-cool-app)",
            placeholder="Enter your app name",
            lines=1,
            visible=False
        )
        sdk_choices = [
            ("Gradio (Python)", "gradio"),
            ("Streamlit (Python)", "streamlit"),
            ("Static (HTML)", "static"),
            ("Transformers.js", "transformers.js"),
            ("Svelte", "svelte")
        ]
        sdk_dropdown = gr.Dropdown(
            choices=[x[0] for x in sdk_choices],
            value="Static (HTML)",
            label="App SDK",
            visible=False
        )
        deploy_btn = gr.Button("πŸš€ Deploy App", variant="primary", visible=False)
        deploy_status = gr.Markdown(visible=False, label="Deploy status")
        # --- End move ---
        search_toggle = gr.Checkbox(
            label="πŸ” Web search",
            value=False,
            visible=True
        )
        # Image generation toggles
        image_generation_toggle = gr.Checkbox(
            label="🎨 Generate Images (text β†’ image)",
            value=False,
            visible=True,
            info="Include generated images in your outputs using Qwen image model"
        )
        text_to_image_prompt = gr.Textbox(
            label="Text-to-Image Prompt",
            placeholder="Describe the image to generate (e.g., 'A minimalist dashboard hero illustration in pastel colors.')",
            lines=2,
            visible=False
        )
        image_to_image_toggle = gr.Checkbox(
            label="πŸ–ΌοΈ Image to Image (uses input image)",
            value=False,
            visible=True,
            info="Transform your uploaded image using FLUX.1-Kontext-dev"
        )

        def on_image_to_image_toggle(toggled):
            # Show image input and its prompt when image-to-image is enabled
            return gr.update(visible=bool(toggled)), gr.update(visible=bool(toggled))

        def on_text_to_image_toggle(toggled):
            return gr.update(visible=bool(toggled))

        image_to_image_toggle.change(
            on_image_to_image_toggle,
            inputs=[image_to_image_toggle],
            outputs=[image_input, image_to_image_prompt]
        )
        image_generation_toggle.change(
            on_text_to_image_toggle,
            inputs=[image_generation_toggle],
            outputs=[text_to_image_prompt]
        )
        model_dropdown = gr.Dropdown(
            choices=[model['name'] for model in AVAILABLE_MODELS],
            value=DEFAULT_MODEL_NAME,
            label="Model",
            visible=True
        )
        provider_state = gr.State("auto")
        gr.Markdown("**Quick start**", visible=True)
        with gr.Column(visible=True) as quick_examples_col:
            for i, demo_item in enumerate(DEMO_LIST[:3]):
                demo_card = gr.Button(
                    value=demo_item['title'], 
                    variant="secondary",
                    size="sm"
                )
                demo_card.click(
                    fn=lambda idx=i: gr.update(value=DEMO_LIST[idx]['description']),
                    outputs=input
                )
        if not tavily_client:
            gr.Markdown("⚠️ Web search unavailable", visible=True)
        # Remove model display and web search available line
        def on_model_change(model_name):
            for m in AVAILABLE_MODELS:
                if m['name'] == model_name:
                    return m, update_image_input_visibility(m)
            return AVAILABLE_MODELS[0], update_image_input_visibility(AVAILABLE_MODELS[0])
        def save_prompt(input):
            return {setting: {"system": input}}
        model_dropdown.change(
            lambda model_name: on_model_change(model_name),
            inputs=model_dropdown,
            outputs=[current_model, image_input]
        )
        # --- Remove deploy/app name/sdk from bottom column ---
        # (delete the gr.Column() block containing space_name_input, sdk_dropdown, deploy_btn, deploy_status)

    with gr.Column():
        with gr.Tabs():
            with gr.Tab("Code"):
                code_output = gr.Code(
                    language="html", 
                    lines=25, 
                    interactive=True,
                    label="Generated code"
                )
            with gr.Tab("Preview"):
                sandbox = gr.HTML(label="Live preview")
            # History tab hidden per user request
            # with gr.Tab("History"):
            #     history_output = gr.Chatbot(show_label=False, height=400, type="messages")
        
        # Keep history_output as hidden component to maintain functionality
        history_output = gr.Chatbot(show_label=False, height=400, type="messages", visible=False)

    # Load project function
    def handle_load_project(url):
        if not url.strip():
            return gr.update(value="Please enter a URL.", visible=True)
        
        status, code = load_project_from_url(url)
        
        if code:
            # Extract space info for deployment
            is_valid, username, project_name = check_hf_space_url(url)
            space_info = f"{username}/{project_name}" if is_valid else ""
            
            # Success - update the code output and show success message
            # Also update history to include the loaded project
            loaded_history = [[f"Loaded project from {url}", code]]
            # Determine preview based on content (HTML or Streamlit)
            if code and (code.strip().startswith('<!DOCTYPE html>') or code.strip().startswith('<html')):
                preview_html = send_to_sandbox(code)
                code_lang = "html"
            elif is_streamlit_code(code):
                preview_html = send_streamlit_to_stlite(code)
                code_lang = "python"
            elif is_gradio_code(code):
                preview_html = send_gradio_to_lite(code)
                code_lang = "python"
            else:
                preview_html = "<div style='padding:1em;color:#888;text-align:center;'>Preview not available for this file type.</div>"
                code_lang = "html"

            return [
                gr.update(value=status, visible=True),
                gr.update(value=code, language=code_lang),
                gr.update(value=preview_html),
                gr.update(value=""),
                loaded_history,
                history_to_chatbot_messages(loaded_history),
                gr.update(value=space_info, visible=True),  # Update space name with loaded project
                gr.update(value="Update Existing Space", visible=True)  # Change button text
            ]
        else:
            # Error - just show error message
            return [
                gr.update(value=status, visible=True),
                gr.update(),
                gr.update(),
                gr.update(),
                [],
                [],
                gr.update(value="", visible=False),
                gr.update(value="πŸš€ Deploy App", visible=False)
            ]

    # Event handlers
    def update_code_language(language):
        return gr.update(language=get_gradio_language(language))

    def update_sdk_based_on_language(language):
        if language == "transformers.js":
            return gr.update(value="Transformers.js")
        elif language == "svelte":
            return gr.update(value="Svelte")
        elif language == "html":
            return gr.update(value="Static (HTML)")
        elif language == "streamlit":
            return gr.update(value="Streamlit (Python)")
        elif language == "gradio":
            return gr.update(value="Gradio (Python)")
        else:
            return gr.update(value="Gradio (Python)")

    language_dropdown.change(update_code_language, inputs=language_dropdown, outputs=code_output)
    language_dropdown.change(update_sdk_based_on_language, inputs=language_dropdown, outputs=sdk_dropdown)

    def preview_logic(code, language):
        if language == "html":
            return send_to_sandbox(code)
        if language == "streamlit":
            return send_streamlit_to_stlite(code) if is_streamlit_code(code) else "<div style='padding:1em;color:#888;text-align:center;'>Add `import streamlit as st` to enable Streamlit preview.</div>"
        if language == "gradio":
            return send_gradio_to_lite(code) if is_gradio_code(code) else "<div style='padding:1em;color:#888;text-align:center;'>Add `import gradio as gr` to enable Gradio preview.</div>"
        if language == "python" or is_streamlit_code(code):
            if is_streamlit_code(code):
                return send_streamlit_to_stlite(code)
            return "<div style='padding:1em;color:#888;text-align:center;'>Preview available only for Streamlit apps in Python. Add `import streamlit as st`.</div>"
        if language == "transformers.js":
            files = parse_transformers_js_output(code)
            if files['index.html']:
                return send_transformers_to_sandbox(files)
            return "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your code using the download button above.</div>"
        if language == "svelte":
            return "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your Svelte code and deploy it to see the result.</div>"
        return "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML.</div>"

    def show_deploy_components(*args):
        return [gr.Textbox(visible=True), gr.Dropdown(visible=True), gr.Button(visible=True)]

    def hide_deploy_components(*args):
        return [gr.Textbox(visible=False), gr.Dropdown(visible=False), gr.Button(visible=False)]
    
    def update_deploy_button_text(space_name):
        """Update deploy button text based on whether it's a new space or update"""
        if "/" in space_name.strip():
            return gr.update(value="πŸ”„ Update Space")
        else:
            return gr.update(value="πŸš€ Deploy App")
    
    def preserve_space_info_for_followup(history):
        """Check if this is a followup on an imported project and preserve space info"""
        if not history or len(history) == 0:
            return [gr.update(), gr.update()]
        
        # Look for imported project pattern in history
        for user_msg, assistant_msg in history:
            if assistant_msg and 'IMPORTED PROJECT FROM HUGGING FACE SPACE' in assistant_msg:
                # Extract space name from the imported project info
                import re
                space_match = re.search(r'Space:\s*([^\s\n]+)', assistant_msg)
                if space_match:
                    space_name = space_match.group(1)
                    return [
                        gr.update(value=space_name, visible=True),  # Update space name
                        gr.update(value="πŸ”„ Update Space", visible=True)  # Update button text
                    ]
        
        # No imported project found, return no changes
        return [gr.update(), gr.update()]

    # Load project button event
    load_project_btn.click(
        handle_load_project,
        inputs=[load_project_url],
        outputs=[load_project_status, code_output, sandbox, load_project_url, history, history_output, space_name_input, deploy_btn]
    )

    btn.click(
        generation_code,
        inputs=[input, image_input, file_input, website_url_input, setting, history, current_model, search_toggle, language_dropdown, provider_state, image_generation_toggle, image_to_image_toggle, image_to_image_prompt, text_to_image_prompt],
        outputs=[code_output, history, sandbox, history_output]
    ).then(
        show_deploy_components,
        None,
        [space_name_input, sdk_dropdown, deploy_btn]
    ).then(
        preserve_space_info_for_followup,
        inputs=[history],
        outputs=[space_name_input, deploy_btn]
    )
    # Update preview when code or language changes
    code_output.change(preview_logic, inputs=[code_output, language_dropdown], outputs=sandbox)
    language_dropdown.change(preview_logic, inputs=[code_output, language_dropdown], outputs=sandbox)
    # Update deploy button text when space name changes
    space_name_input.change(update_deploy_button_text, inputs=[space_name_input], outputs=[deploy_btn])
    clear_btn.click(clear_history, outputs=[history, history_output, file_input, website_url_input])
    clear_btn.click(hide_deploy_components, None, [space_name_input, sdk_dropdown, deploy_btn])
    # Reset space name and button text when clearing
    clear_btn.click(
        lambda: [gr.update(value=""), gr.update(value="πŸš€ Deploy App")],
        outputs=[space_name_input, deploy_btn]
    )

    # Theme switching handlers
    def handle_theme_change(theme_name):
        """Handle theme selection change and update description"""
        if theme_name in THEME_CONFIGS:
            description = THEME_CONFIGS[theme_name]["description"]
            features = THEME_FEATURES.get(theme_name, [])
            feature_text = f"**Features:** {', '.join(features)}" if features else ""
            full_description = f"*{description}*\n\n{feature_text}"
            
            return gr.update(value=full_description)
        return gr.update()

    def apply_theme_change(theme_name):
        """Save theme preference and show restart instruction"""
        if theme_name in THEME_CONFIGS:
            save_theme_preference(theme_name)
            
            restart_message = f"""
🎨 **Theme saved:** {theme_name}

⚠️ **Restart required** to fully apply the new theme.

**Why restart is needed:** Gradio themes are set during application startup and cannot be changed dynamically at runtime. This ensures all components are properly styled with consistent theming.

**To apply your new theme:**
1. Stop the application (Ctrl+C)
2. Restart it with the same command
3. Your theme will be automatically loaded

*Your theme preference has been saved and will persist across restarts.*
            """
            
            return gr.update(value=restart_message, visible=True, elem_classes=["restart-needed"])
        return gr.update()

    # Theme dropdown change event  
    theme_dropdown.change(
        handle_theme_change,
        inputs=[theme_dropdown],
        outputs=[theme_description]
    )
    
    # Apply theme button click event
    apply_theme_btn.click(
        apply_theme_change,
        inputs=[theme_dropdown],
        outputs=[theme_status]
    )

    # Deploy to Spaces logic

    def deploy_to_user_space(
        code, 
        space_name, 
        sdk_name,  # new argument
        profile: gr.OAuthProfile | None = None, 
        token: gr.OAuthToken | None = None
    ):
        import shutil
        if not code or not code.strip():
            return gr.update(value="No code to deploy.", visible=True)
        if profile is None or token is None:
            return gr.update(value="Please log in with your Hugging Face account to deploy to your own Space. Otherwise, use the default deploy (opens in new tab).", visible=True)
        
        # Check if token has write permissions
        if not token.token or token.token == "hf_":
            return gr.update(value="Error: Invalid token. Please log in again with your Hugging Face account to get a valid write token.", visible=True)
        
        # Check if this is an update to an existing space (contains /)
        is_update = "/" in space_name.strip()
        if is_update:
            # This is an existing space, use the provided space_name as repo_id
            repo_id = space_name.strip()
            # Extract username from repo_id for permission check
            space_username = repo_id.split('/')[0]
            if space_username != profile.username:
                return gr.update(value=f"Error: You can only update your own spaces. This space belongs to {space_username}.", visible=True)
            
            # Verify the user has write access to this space
            try:
                api = HfApi(token=token.token)
                # Try to get space info to verify access
                space_info = api.space_info(repo_id)
                if not space_info:
                    return gr.update(value=f"Error: Could not access space {repo_id}. Please check your permissions.", visible=True)
            except Exception as e:
                return gr.update(value=f"Error: No write access to space {repo_id}. Please ensure you have the correct permissions. Error: {str(e)}", visible=True)
        else:
            # This is a new space, create repo_id with current user
            username = profile.username
            repo_id = f"{username}/{space_name.strip()}"
        # Map SDK name to HF SDK slug
        sdk_map = {
            "Gradio (Python)": "gradio",
            "Streamlit (Python)": "docker",  # Use 'docker' for Streamlit Spaces
            "Static (HTML)": "static",
            "Transformers.js": "static",  # Transformers.js uses static SDK
            "Svelte": "static"  # Svelte uses static SDK
        }
        sdk = sdk_map.get(sdk_name, "gradio")
        
        # Create API client with user's token for proper authentication
        api = HfApi(token=token.token)
        # Only create the repo for new spaces (not updates) and non-Transformers.js, non-Streamlit, and non-Svelte SDKs
        if not is_update and sdk != "docker" and sdk_name not in ["Transformers.js", "Svelte"]:
            try:
                api.create_repo(
                    repo_id=repo_id,  # e.g. username/space_name
                    repo_type="space",
                    space_sdk=sdk,  # Use selected SDK
                    exist_ok=True  # Don't error if it already exists
                )
            except Exception as e:
                return gr.update(value=f"Error creating Space: {e}", visible=True)
        # Streamlit/docker logic
        if sdk == "docker":
            try:
                # For new spaces, duplicate the template first
                if not is_update:
                    # Use duplicate_space to create a Streamlit template space
                    from huggingface_hub import duplicate_space
                    
                    # Duplicate the streamlit template space
                    duplicated_repo = duplicate_space(
                        from_id="streamlit/streamlit-template-space",
                        to_id=space_name.strip(),
                        token=token.token,
                        exist_ok=True
                    )
                
                # Generate and upload requirements.txt for Streamlit apps
                import_statements = extract_import_statements(code)
                requirements_content = generate_requirements_txt_with_llm(import_statements)
                
                import tempfile
                
                # Upload requirements.txt first
                try:
                    with tempfile.NamedTemporaryFile("w", suffix=".txt", delete=False) as f:
                        f.write(requirements_content)
                        requirements_temp_path = f.name
                    
                    api.upload_file(
                        path_or_fileobj=requirements_temp_path,
                        path_in_repo="requirements.txt",
                        repo_id=repo_id,
                        repo_type="space"
                    )
                except Exception as e:
                    error_msg = str(e)
                    if "403 Forbidden" in error_msg and "write token" in error_msg:
                        return gr.update(value=f"Error uploading requirements.txt: Permission denied. Please ensure you have write access to {repo_id} and your token has the correct permissions.", visible=True)
                    else:
                        return gr.update(value=f"Error uploading requirements.txt: {e}", visible=True)
                finally:
                    import os
                    if 'requirements_temp_path' in locals():
                        os.unlink(requirements_temp_path)
                
                # Add anycoder tag to existing README
                add_anycoder_tag_to_readme(api, repo_id)
                
                # Upload the user's code to src/streamlit_app.py (for both new and existing spaces)
                with tempfile.NamedTemporaryFile("w", suffix=".py", delete=False) as f:
                    f.write(code)
                    temp_path = f.name
                
                try:
                    api.upload_file(
                        path_or_fileobj=temp_path,
                        path_in_repo="src/streamlit_app.py",
                        repo_id=repo_id,
                        repo_type="space"
                    )
                    space_url = f"https://huggingface.co/spaces/{repo_id}"
                    action_text = "Updated" if is_update else "Deployed"
                    return gr.update(value=f"βœ… {action_text}! [Open your Space here]({space_url})", visible=True)
                except Exception as e:
                    error_msg = str(e)
                    if "403 Forbidden" in error_msg and "write token" in error_msg:
                        return gr.update(value=f"Error: Permission denied. Please ensure you have write access to {repo_id} and your token has the correct permissions.", visible=True)
                    else:
                        return gr.update(value=f"Error uploading Streamlit app: {e}", visible=True)
                finally:
                    import os
                    os.unlink(temp_path)
                    
            except Exception as e:
                error_prefix = "Error duplicating Streamlit space" if not is_update else "Error updating Streamlit space"
                return gr.update(value=f"{error_prefix}: {e}", visible=True)
        # Transformers.js logic
        elif sdk_name == "Transformers.js":
            try:
                # For new spaces, duplicate the template. For updates, just verify access.
                if not is_update:
                    # Use duplicate_space to create a transformers.js template space
                    from huggingface_hub import duplicate_space
                    
                    # Duplicate the transformers.js template space
                    duplicated_repo = duplicate_space(
                        from_id="static-templates/transformers.js",
                        to_id=space_name.strip(),
                        token=token.token,
                        exist_ok=True
                    )
                    print("Duplicated repo result:", duplicated_repo, type(duplicated_repo))
                else:
                    # For updates, verify we can access the existing space
                    try:
                        space_info = api.space_info(repo_id)
                        if not space_info:
                            return gr.update(value=f"Error: Could not access space {repo_id} for update.", visible=True)
                    except Exception as e:
                        return gr.update(value=f"Error: Cannot update space {repo_id}. {str(e)}", visible=True)
                # Parse the transformers.js output to get the three files
                files = parse_transformers_js_output(code)
                
                if not files['index.html'] or not files['index.js'] or not files['style.css']:
                    return gr.update(value="Error: Could not parse transformers.js output. Please regenerate the code.", visible=True)
                
                # Upload the three files to the space (with retry logic for reliability)
                import tempfile
                import time
                
                # Define files to upload
                files_to_upload = [
                    ("index.html", files['index.html']),
                    ("index.js", files['index.js']),
                    ("style.css", files['style.css'])
                ]
                
                # Upload each file with retry logic (similar to static HTML pattern)
                max_attempts = 3
                for file_name, file_content in files_to_upload:
                    success = False
                    last_error = None
                    
                    for attempt in range(max_attempts):
                        try:
                            with tempfile.NamedTemporaryFile("w", suffix=f".{file_name.split('.')[-1]}", delete=False) as f:
                                f.write(file_content)
                                temp_path = f.name
                            
                            api.upload_file(
                                path_or_fileobj=temp_path,
                                path_in_repo=file_name,
                                repo_id=repo_id,
                                repo_type="space"
                            )
                            success = True
                            break
                            
                        except Exception as e:
                            last_error = e
                            error_msg = str(e)
                            if "403 Forbidden" in error_msg and "write token" in error_msg:
                                # Permission errors won't be fixed by retrying
                                return gr.update(value=f"Error: Permission denied. Please ensure you have write access to {repo_id} and your token has the correct permissions.", visible=True)
                            
                            if attempt < max_attempts - 1:  # Not the last attempt
                                time.sleep(2)  # Wait before retrying
                        finally:
                            import os
                            if 'temp_path' in locals():
                                os.unlink(temp_path)
                    
                    if not success:
                        return gr.update(value=f"Error uploading {file_name}: {last_error}", visible=True)
                
                # Add anycoder tag to existing README (for both new and update)
                add_anycoder_tag_to_readme(api, repo_id)
                
                # For updates, trigger a space restart to ensure changes take effect
                if is_update:
                    try:
                        api.restart_space(repo_id=repo_id)
                    except Exception as restart_error:
                        # Don't fail the deployment if restart fails, just log it
                        print(f"Note: Could not restart space after update: {restart_error}")
                
                space_url = f"https://huggingface.co/spaces/{repo_id}"
                action_text = "Updated" if is_update else "Deployed"
                return gr.update(value=f"βœ… {action_text}! [Open your Transformers.js Space here]({space_url})", visible=True)
                    
            except Exception as e:
                # Handle potential RepoUrl object errors
                error_msg = str(e)
                if "'url'" in error_msg or "RepoUrl" in error_msg:
                    # For RepoUrl object issues, check if the space was actually created successfully
                    try:
                        # Check if space exists by trying to access it
                        space_url = f"https://huggingface.co/spaces/{repo_id}"
                        test_api = HfApi(token=token.token)
                        space_exists = test_api.space_info(repo_id)
                        
                        if space_exists and not is_update:
                            # Space was created successfully despite the RepoUrl error
                            return gr.update(value=f"βœ… Deployed! Space was created successfully despite a technical error. [Open your Transformers.js Space here]({space_url})", visible=True)
                        elif space_exists and is_update:
                            # Space was updated successfully despite the RepoUrl error  
                            return gr.update(value=f"βœ… Updated! Space was updated successfully despite a technical error. [Open your Transformers.js Space here]({space_url})", visible=True)
                        else:
                            # Space doesn't exist, real error
                            return gr.update(value=f"Error: Could not create/update space. Please try again manually at https://huggingface.co/new-space", visible=True)
                    except:
                        # Fallback to informative error with link
                        repo_url = f"https://huggingface.co/spaces/{repo_id}"
                        return gr.update(value=f"Error: Could not properly handle space creation response. Space may have been created successfully. Check: {repo_url}", visible=True)
                
                # General error handling for both creation and updates
                action_verb = "updating" if is_update else "duplicating"
                return gr.update(value=f"Error {action_verb} Transformers.js space: {error_msg}", visible=True)
        # Svelte logic
        elif sdk_name == "Svelte" and not is_update:
            try:
                # Use duplicate_space to create a Svelte template space
                from huggingface_hub import duplicate_space
                
                # Duplicate the Svelte template space
                duplicated_repo = duplicate_space(
                    from_id="static-templates/svelte",
                    to_id=repo_id,  # Use the full repo_id (username/space_name)
                    token=token.token,
                    exist_ok=True
                )
                print("Duplicated Svelte repo result:", duplicated_repo, type(duplicated_repo))
                
                # Extract the actual repo ID from the duplicated space
                # The duplicated_repo is a RepoUrl object, convert to string and extract the repo ID
                try:
                    duplicated_repo_str = str(duplicated_repo)
                    # Extract username and repo name from the URL
                    if "/spaces/" in duplicated_repo_str:
                        parts = duplicated_repo_str.split("/spaces/")[-1].split("/")
                        if len(parts) >= 2:
                            actual_repo_id = f"{parts[0]}/{parts[1]}"
                        else:
                            actual_repo_id = repo_id  # Fallback to original
                    else:
                        actual_repo_id = repo_id  # Fallback to original
                except Exception as e:
                    print(f"Error extracting repo ID from duplicated_repo: {e}")
                    actual_repo_id = repo_id  # Fallback to original
                print("Actual repo ID for Svelte uploads:", actual_repo_id)
                
                # Parse the Svelte output to get the custom files
                files = parse_svelte_output(code)
                
                if not files['src/App.svelte']:
                    return gr.update(value="Error: Could not parse Svelte output. Please regenerate the code.", visible=True)
                
                # Upload only the custom Svelte files to the duplicated space
                import tempfile
                
                # Upload src/App.svelte (required)
                with tempfile.NamedTemporaryFile("w", suffix=".svelte", delete=False) as f:
                    f.write(files['src/App.svelte'])
                    temp_path = f.name
                
                try:
                    api.upload_file(
                        path_or_fileobj=temp_path,
                        path_in_repo="src/App.svelte",
                        repo_id=actual_repo_id,
                        repo_type="space"
                                        )
                except Exception as e:
                    error_msg = str(e)
                    if "403 Forbidden" in error_msg and "write token" in error_msg:
                        return gr.update(value=f"Error: Permission denied. Please ensure you have write access to {actual_repo_id} and your token has the correct permissions.", visible=True)
                    else:
                        return gr.update(value=f"Error uploading src/App.svelte: {e}", visible=True)
                finally:
                    import os
                    os.unlink(temp_path)
                
                # Upload src/app.css (optional)
                if files['src/app.css']:
                    with tempfile.NamedTemporaryFile("w", suffix=".css", delete=False) as f:
                        f.write(files['src/app.css'])
                        temp_path = f.name
                    
                    try:
                        api.upload_file(
                            path_or_fileobj=temp_path,
                            path_in_repo="src/app.css",
                            repo_id=actual_repo_id,
                            repo_type="space"
                        )
                    except Exception as e:
                        error_msg = str(e)
                        if "403 Forbidden" in error_msg and "write token" in error_msg:
                            return gr.update(value=f"Error: Permission denied. Please ensure you have write access to {actual_repo_id} and your token has the correct permissions.", visible=True)
                        else:
                            return gr.update(value=f"Error uploading src/app.css: {e}", visible=True)
                    finally:
                        import os
                        os.unlink(temp_path)
                
                # Add anycoder tag to existing README
                add_anycoder_tag_to_readme(api, actual_repo_id)
                
                # Success - all files uploaded
                space_url = f"https://huggingface.co/spaces/{actual_repo_id}"
                action_text = "Updated" if is_update else "Deployed"
                return gr.update(value=f"βœ… {action_text}! [Open your Svelte Space here]({space_url})", visible=True)
                    
            except Exception as e:
                # Handle potential RepoUrl object errors
                error_msg = str(e)
                if "'url'" in error_msg or "RepoUrl" in error_msg:
                    return gr.update(value=f"Error duplicating Svelte space: RepoUrl handling error. Please try again. Details: {error_msg}", visible=True)
                return gr.update(value=f"Error duplicating Svelte space: {error_msg}", visible=True)
        # Other SDKs (existing logic)
        if sdk == "static":
            import time
            file_name = "index.html"
            
            # Add anycoder tag to existing README (after repo creation)
            add_anycoder_tag_to_readme(api, repo_id)
            
            # Wait and retry logic after repo creation
            max_attempts = 3
            for attempt in range(max_attempts):
                import tempfile
                with tempfile.NamedTemporaryFile("w", suffix=".html", delete=False) as f:
                    f.write(code)
                    temp_path = f.name
                try:
                    api.upload_file(
                        path_or_fileobj=temp_path,
                        path_in_repo=file_name,
                        repo_id=repo_id,
                        repo_type="space"
                    )
                    space_url = f"https://huggingface.co/spaces/{repo_id}"
                    action_text = "Updated" if is_update else "Deployed"
                    return gr.update(value=f"βœ… {action_text}! [Open your Space here]({space_url})", visible=True)
                except Exception as e:
                    error_msg = str(e)
                    if "403 Forbidden" in error_msg and "write token" in error_msg:
                        return gr.update(value=f"Error: Permission denied. Please ensure you have write access to {repo_id} and your token has the correct permissions.", visible=True)
                    elif attempt < max_attempts - 1:
                        time.sleep(2)  # Wait before retrying
                    else:
                        return gr.update(value=f"Error uploading file after {max_attempts} attempts: {e}. Please check your permissions and try again.", visible=True)
                finally:
                    import os
                    os.unlink(temp_path)
        else:
            # Generate and upload requirements.txt for Gradio apps
            import_statements = extract_import_statements(code)
            requirements_content = generate_requirements_txt_with_llm(import_statements)
            
            import tempfile
            
            # Upload requirements.txt first
            try:
                with tempfile.NamedTemporaryFile("w", suffix=".txt", delete=False) as f:
                    f.write(requirements_content)
                    requirements_temp_path = f.name
                
                api.upload_file(
                    path_or_fileobj=requirements_temp_path,
                    path_in_repo="requirements.txt",
                    repo_id=repo_id,
                    repo_type="space"
                )
            except Exception as e:
                error_msg = str(e)
                if "403 Forbidden" in error_msg and "write token" in error_msg:
                    return gr.update(value=f"Error uploading requirements.txt: Permission denied. Please ensure you have write access to {repo_id} and your token has the correct permissions.", visible=True)
                else:
                    return gr.update(value=f"Error uploading requirements.txt: {e}", visible=True)
            finally:
                import os
                if 'requirements_temp_path' in locals():
                    os.unlink(requirements_temp_path)
            
            # Add anycoder tag to existing README
            add_anycoder_tag_to_readme(api, repo_id)
            
            # Now upload the main app.py file
            file_name = "app.py"
            with tempfile.NamedTemporaryFile("w", suffix=f".{file_name.split('.')[-1]}", delete=False) as f:
                f.write(code)
                temp_path = f.name
            try:
                api.upload_file(
                    path_or_fileobj=temp_path,
                    path_in_repo=file_name,
                    repo_id=repo_id,
                    repo_type="space"
                )
                space_url = f"https://huggingface.co/spaces/{repo_id}"
                action_text = "Updated" if is_update else "Deployed"
                return gr.update(value=f"βœ… {action_text}! [Open your Space here]({space_url})", visible=True)
            except Exception as e:
                error_msg = str(e)
                if "403 Forbidden" in error_msg and "write token" in error_msg:
                    return gr.update(value=f"Error: Permission denied. Please ensure you have write access to {repo_id} and your token has the correct permissions.", visible=True)
                else:
                    return gr.update(value=f"Error uploading file: {e}", visible=True)
            finally:
                import os
                os.unlink(temp_path)

    # Connect the deploy button to the new function
    deploy_btn.click(
        deploy_to_user_space,
        inputs=[code_output, space_name_input, sdk_dropdown],
        outputs=deploy_status
    )
    # Keep the old deploy method as fallback (if not logged in, user can still use the old method)
    # Optionally, you can keep the old deploy_btn.click for the default method as a secondary button.

if __name__ == "__main__":
    demo.queue(api_open=False, default_concurrency_limit=20).launch(
        show_api=False, 
        ssr_mode=True, 
        mcp_server=False
    )