diff --git a/spaces/101-5/gpt4free/g4f/.v1/testing/italygpt2_test.py b/spaces/101-5/gpt4free/g4f/.v1/testing/italygpt2_test.py deleted file mode 100644 index 0494c8a2bfcef5107f65f368116470050afbe9ef..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/testing/italygpt2_test.py +++ /dev/null @@ -1,4 +0,0 @@ -from gpt4free import italygpt2 -account_data=italygpt2.Account.create() -for chunk in italygpt2.Completion.create(account_data=account_data,prompt="Who are you?"): - print(chunk, end="", flush=True) \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Code Bulk Image _TOP_ Downloader Serial.md b/spaces/1gistliPinn/ChatGPT4/Examples/Code Bulk Image _TOP_ Downloader Serial.md deleted file mode 100644 index 05519b88d18efa7cfee72b69d366da1663bec928..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Code Bulk Image _TOP_ Downloader Serial.md +++ /dev/null @@ -1,6 +0,0 @@ -

code bulk image downloader serial


Download ——— https://imgfil.com/2uy15K



- -Bulk Image Downloader 5 Full Serial Key — When you get Crack Bulk Image Downloader Serial Key 5 Generator, you'd wish to activate it ... 1fdad05405
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Corel Roxio Creator NXT 2 V15.0 (keygen CORE) [ChingLiu] Serial Key Keygen.md b/spaces/1gistliPinn/ChatGPT4/Examples/Corel Roxio Creator NXT 2 V15.0 (keygen CORE) [ChingLiu] Serial Key Keygen.md deleted file mode 100644 index c8daf850c873ed58898e74adc134a607c98bd7e1..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Corel Roxio Creator NXT 2 V15.0 (keygen CORE) [ChingLiu] Serial Key Keygen.md +++ /dev/null @@ -1,6 +0,0 @@ -

Corel Roxio Creator NXT 2 v15.0 (keygen CORE) [ChingLiu] Serial Key keygen


Download Ziphttps://imgfil.com/2uxX1W



-
- d5da3c52bf
-
-
-

diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bus Simulator 2023 APK Mod Unlimited Money and Realistic Driving.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bus Simulator 2023 APK Mod Unlimited Money and Realistic Driving.md deleted file mode 100644 index d013be6cc4981acaa30899c04cdee337336551ae..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bus Simulator 2023 APK Mod Unlimited Money and Realistic Driving.md +++ /dev/null @@ -1,130 +0,0 @@ - -

Bus Simulator 2023 Mod Apk Rexdl: How to Download and Install the Latest Version

-

Do you love driving buses and transporting passengers from one place to another? Do you want to experience the realistic and immersive bus simulation game on your Android device? If yes, then you should try Bus Simulator 2023, the latest and most advanced bus simulator game from Ovidiu Pop. And if you want to enjoy the game with unlimited money, unlocked buses, and premium features, then you should download and install the mod apk version from Rexdl, one of the best sources for modded games and apps. In this article, we will tell you everything you need to know about Bus Simulator 2023, mod apk, and Rexdl. We will also guide you on how to download and install the latest version of Bus Simulator 2023 mod apk rexdl on your device. So, let's get started!

-

bus simulator 2023 mod apk rexdl


Download Zip ✓✓✓ https://urlin.us/2uSZFw



-

What is Bus Simulator 2023?

-

Bus Simulator 2023 is a public transport driver simulator game with elaborate gameplay and modern 3D graphics. You can play the role of a driver, picking up people at bus stops and transporting them along the route. You can also manage your own bus company, hire drivers, buy new buses, customize them, and expand your business. You can choose from different types of buses, such as city buses, school buses, double-decker buses, articulated buses, electric buses, and more. You can also drive in different cities and environments, such as Europe, USA, Canada, Australia, China, Japan, and more. You can enjoy realistic traffic, weather, day-night cycle, passengers' reactions, bus sounds, and physics. You can also play online multiplayer mode with other players around the world.

-

Features of Bus Simulator 2023

-

Some of the main features of Bus Simulator 2023 are:

- -

Gameplay of Bus Simulator 2023

-

The gameplay of Bus Simulator 2023 is simple and fun. You can start by choosing your bus model and city. Then you can select your route and start driving. You have to follow the traffic rules, stop at red lights, use indicators, avoid collisions, and obey speed limits. You also have to pick up passengers at bus stops and drop them off at their destinations. You have to be careful not to damage your bus or injure your passengers. You can earn money by completing missions and satisfying your passengers. You can use the money to buy new buses, upgrade them, hire drivers, and grow your company. You can also compete with other players in online multiplayer mode and rank up on the leaderboards.

-

What is Mod Apk?

-

Mod apk is a modified version of an original apk file that has been altered by some developers or hackers to provide some extra features or benefits that are not available in the official version. Mod apk usually has unlimited money, unlocked items, premium features, ad-free experience, and more. Mod apk can enhance your gaming experience and make the game more fun and easy. However, mod apk also has some drawbacks and risks that you should be aware of before downloading and installing them.

-

bus simulator 2023 hack apk download
-bus simulator 2023 unlimited money mod
-bus simulator 2023 mod apk latest version
-bus simulator 2023 mod apk android 1
-bus simulator 2023 mod apk revdl
-bus simulator 2023 mod apk offline
-bus simulator 2023 mod apk free shopping
-bus simulator 2023 mod apk obb
-bus simulator 2023 mod apk no ads
-bus simulator 2023 mod apk unlimited xp
-bus simulator 2023 mod apk all buses unlocked
-bus simulator 2023 mod apk an1
-bus simulator 2023 mod apk happymod
-bus simulator 2023 mod apk unlimited fuel
-bus simulator 2023 mod apk data
-bus simulator 2023 mod apk rexdl.com
-bus simulator 2023 pro mod apk
-bus simulator 2023 premium mod apk
-bus simulator 2023 vip mod apk
-bus simulator 2023 mega mod apk
-bus simulator 2023 full mod apk
-bus simulator 2023 cracked mod apk
-bus simulator 2023 cheat mod apk
-bus simulator 2023 real mod apk
-bus simulator 2023 new mod apk
-download game bus simulator 2023 mod apk rexdl
-download bus simulator 2023 original + mod + obb rexdl
-download bus simulator 2023 v1.4.3 (mod money) rexdl
-download link for bus simulator 2023 mod apk rexdl
-how to install bus simulator 2023 mod apk rexdl
-how to download bus simulator 2023 mod apk rexdl
-how to update bus simulator 2023 mod apk rexdl
-how to play bus simulator 2023 mod apk rexdl online
-how to get unlimited coins in bus simulator 2023 mod apk rexdl
-how to unlock all buses in bus simulator 2023 mod apk rexdl
-how to fix lag in bus simulator 2023 mod apk rexdl
-how to remove ads in bus simulator 2023 mod apk rexdl
-how to backup data in bus simulator 2023 mod apk rexdl
-how to restore data in bus simulator 2023 mod apk rexdl
-how to transfer data in bus simulator 2023 mod apk rexdl
-best settings for bus simulator 2023 mod apk rexdl
-best graphics for bus simulator 2023 mod apk rexdl
-best routes for bus simulator 2023 mod apk rexdl
-best tips and tricks for bus simulator 2023 mod apk rexdl
-best cheats and hacks for bus simulator 2023 mod apk rexdl
-best reviews and ratings for bus simulator 2023 mod apk rexdl
-best alternatives for bus simulator 2023 mod apk rexdl
-best features and updates for bus simulator 2023 mod apk rexdl

-

Benefits of Mod Apk

-

Some of the benefits of mod apk are:

- -

Risks of Mod Apk

-

Some of the risks of mod apk are:

- -

What is Rexdl?

-

Rexdl is a website that provides free download links for modded games and apps for Android devices. Rexdl claims to offer 100% safe and working mod apk files that are tested and verified by their team. Rexdl also updates its content regularly to provide the latest versions of the games and apps. Rexdl has a user-friendly interface and easy navigation that allows you to find and download your desired mod apk file in a few clicks. Rexdl also has a comment section where you can ask questions, request mods, or give feedback.

-

Advantages of Rexdl

-

Some of the advantages of Rexdl are:

-

Disadvantages of Rexdl

-

Some of the disadvantages of Rexdl are:

- -

How to Download and Install Bus Simulator 2023 Mod Apk Rexdl?

-

If you want to download and install Bus Simulator 2023 mod apk rexdl on your device, you need to follow these steps:

-

Step 1: Enable Unknown Sources

-

Before you can install any mod apk file on your device, you need to enable the unknown sources option in your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message, but you can ignore it and proceed.

-

Step 2: Visit Rexdl Website

-

Next, you need to visit the Rexdl website using your browser. You can use this link: https://rexdl.com/. Once you are on the website, you can browse through the categories and genres of games and apps, or use the search bar to find Bus Simulator 2023 mod apk.

-

Step 3: Search for Bus Simulator 2023 Mod Apk

-

Once you have found the Bus Simulator 2023 mod apk file on Rexdl, you can click on it to open its page. There you will see some information about the game, such as its name, version, size, developer, screenshots, description, features, and more. You will also see a download button at the bottom of the page. Click on it to start downloading the file.

-

Step 4: Download and Install the File

-

After clicking on the download button, you will be redirected to another page where you will see another download button. Click on it again to download the file directly to your device. The file size is about 300 MB, so make sure you have enough space and a stable internet connection. Once the download is complete, you can open the file and tap on install. Wait for a few seconds until the installation is done. Then you can launch the game and enjoy Bus Simulator 2023 mod apk rexdl.

-

Conclusion

-

Bus Simulator 2023 is a great game for bus lovers and simulation fans. It offers realistic and immersive gameplay with various buses, cities, missions, and features. You can also play online multiplayer mode with other players around the world. However, if you want to have more fun and excitement with unlimited money, unlocked buses, and premium features, you can download and install Bus Simulator 2023 mod apk rexdl from Rexdl website. Rexdl is a reliable source for modded games and apps that provides free and direct download links. However, you should also be careful of the risks and disadvantages of using mod apk files and Rexdl website. You should always check the safety and compatibility of the files before downloading and installing them. You should also backup your data and account before using any mod apk file. We hope this article has helped you learn more about Bus Simulator 2023 mod apk rexdl and how to download and install it on your device.

-

Frequently Asked Questions

-

Here are some of the frequently asked questions about Bus Simulator 2023 mod apk rexdl:

-
    -
  1. Is Bus Simulator 2023 mod apk rexdl safe to use?
  2. -

    Bus Simulator 2023 mod apk rexdl is not officially endorsed or supported by Ovidiu Pop, the developer of Bus Simulator 2023. Therefore, it may not be safe to use. It may contain viruses, malware, spyware, or other harmful programs that can damage your device or steal your personal information. It may also violate the terms and conditions of Ovidiu Pop and cause legal actions or penalties. Therefore, we recommend that you use Bus Simulator 2023 mod apk rexdl at your own risk and discretion.

    -
  3. Does Bus Simulator 2023 mod apk rexdl require root access?
  4. -

    No, Bus Simulator 2023 mod apk rexdl does not require root access to work on your device. You can install it without rooting your device.

    -
  5. Can I play online multiplayer mode with Bus Simulator 2023 mod apk rexdl?
  6. -

    Yes, you can play online multiplayer mode with Bus Simulator 2023 mod apk rexdl. However, you may face some issues or problems, such as lag, disconnect, ban, or mismatch. You may also not be able to play with other players who are using the official version of the game. Therefore, we suggest that you play online multiplayer mode with caution and respect.

    -
  7. How can I update Bus Simulator 2023 mod apk rexdl?
  8. -

    Bus Simulator 2023 mod apk rexdl may not be compatible with the latest version of the game. Therefore, you may need to update it manually. To do this, you need to visit the Rexdl website again and look for the updated version of Bus Simulator 2023 mod apk rexdl. Then you need to download and install it over the existing one. However, you may lose your progress, data, or account if you update the mod apk file. Therefore, we advise that you backup your data and account before updating the mod apk file.

    -
  9. Where can I find more mod apk files like Bus Simulator 2023 mod apk rexdl?
  10. -

    If you are looking for more mod apk files like Bus Simulator 2023 mod apk rexdl, you can visit the Rexdl website and browse through their collection of games and apps. You can also use the search bar to find your desired mod apk file. Alternatively, you can also visit other websites that provide mod apk files, such as ApkPure, ApkDone, ModDroid, HappyMod, and more. However, you should always check the safety and quality of the mod apk files before downloading and installing them.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Lagu Aespa Black Mamba - The Ultimate Guide for Fans.md b/spaces/1phancelerku/anime-remove-background/Download Lagu Aespa Black Mamba - The Ultimate Guide for Fans.md deleted file mode 100644 index 7d844803de20d1489ea3459199497a91078ab876..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Lagu Aespa Black Mamba - The Ultimate Guide for Fans.md +++ /dev/null @@ -1,166 +0,0 @@ - -

Download Lagu Aespa Black Mamba: How to Enjoy the Debut Single of SM's New Girl Group

-

Introduction

-

If you are a fan of K-pop, you might have heard of Aespa, the new girl group from SM Entertainment. They debuted in November 2020 with their single "Black Mamba", which became a hit song with millions of views and streams. But how can you download lagu aespa black mamba and enjoy it on your device? In this article, we will show you how to download lagu aespa black mamba from various sources, as well as some information about the group and the song. Let's get started!

-

Who are Aespa?

-

Aespa is a four-member girl group consisting of Karina, Giselle, Winter, and Ningning. They are the first girl group from SM Entertainment since Red Velvet in 2014, and the first idol group to have virtual avatars called æ-Aespa. The name Aespa comes from combining "AE", which stands for "Avatar X Experience", and "aspect", which means both sides. The group's concept is based on the idea of interacting with their avatars in a parallel world called KWANGYA.

-

download lagu aespa black mamba


DOWNLOAD ✯✯✯ https://jinyurl.com/2uNJpU



-

What is Black Mamba?

-

"Black Mamba" is the debut single by Aespa, which was released on November 17, 2020. The song was written and composed by Yoo Young-jin, Omega, Ella Isaacson, Gabriela Geneva (NIIVA), Jordan Reyes, Shaun Lopez, and Scott Chesak, while production was handled by Lee Soo-man. The song is an electropop and dance-pop track with a signature synth and EDM sound and bass that is paired with an addictive hook. The lyrics are about a being called "Black Mamba" that not only interferes with the members' and avatars' connection but also threatens their world and as such is abhorred by the members.

-

Why should you download lagu aespa black mamba?

-

There are many reasons why you should download lagu aespa black mamba and listen to it on your device. Here are some of them:

- -

How to Download Lagu Aespa Black Mamba

-

Now that you know why you should download lagu aespa black mamba, let's see how you can do it. There are two main options that you can choose from: streaming platforms and MP3 download sites. We will explain each option in detail below.

-

Option 1: Streaming Platforms

-

Streaming platforms are online services that allow you to listen to music online or offline by paying a subscription fee or watching ads. Some of the most popular streaming platforms that have "Black Mamba" by Aespa are Spotify, Apple Music, and YouTube Music. Here is how you can download lagu aespa black mamba from each platform:

-

Spotify

-

Spotify is one of the most widely used streaming platforms in the world, with over 356 million users as of March 2021. Spotify offers a free plan that lets you listen to music with ads, and a premium plan that lets you download up to 10,000 songs per device and listen to them offline. To download lagu aespa black mamba from Spotify, you need to follow these steps:

-
    -
  1. Download the Spotify app on your device or go to the Spotify web player on your browser.
  2. -
  3. Sign up or log in to your Spotify account.
  4. -
  5. Search for "Black Mamba" by Aespa on the search bar.
  6. -
  7. Select the song and tap or click on the heart icon to add it to your library.
  8. -
  9. Go to your library and find the song under the "Liked Songs" playlist.
  10. -
  11. Tap or click on the download button next to the song title. The song will start downloading and a green arrow will appear when it is done.
  12. -
  13. Enjoy listening to the song offline!
  14. -
-

Apple Music

-

Apple Music is another popular streaming platform that has over 72 million subscribers as of June 2020. Apple Music offers a three-month free trial and then charges $9.99 per month for individual plans, $14.99 per month for family plans, and $4.99 per month for student plans. To download lagu aespa black mamba from Apple Music, you need to follow these steps:

-
    -
  1. Download the Apple Music app on your device or go to the Apple Music web player on your browser.
  2. -
  3. Sign up or log in to your Apple Music account with your Apple ID.
  4. -
  5. Search for "Black Mamba" by Aespa on the search bar.
  6. -
  7. Select the song and tap or click on the plus icon to add it to your library.
  8. -
  9. Go to your library and find the song under the "Recently Added" section.
  10. -
  11. Tap or click on the cloud icon next to the song title. The song will start downloading and a checkmark will appear when it is done.
  12. -
  13. Enjoy listening to the song offline!
  14. -
-

YouTube Music

-

YouTube Music is a streaming platform that is integrated with YouTube, the largest video-sharing platform in the world. YouTube Music has over 30 million subscribers as of October 2020. YouTube Music offers a free plan that lets you listen to music with ads, and a premium plan that lets you download songs and listen to them offline for $9.99 per month. To download lagu aespa black mamba from YouTube Music, you need to follow these steps:

-

download lagu aespa black mamba mp3
-download lagu aespa black mamba 320kbps
-download lagu aespa black mamba ilkpop
-download lagu aespa black mamba matikiri
-download lagu aespa black mamba wapka
-download lagu aespa black mamba planetlagu
-download lagu aespa black mamba metrolagu
-download lagu aespa black mamba uyeshare
-download lagu aespa black mamba stafaband
-download lagu aespa black mamba soundcloud
-download lagu aespa black mamba mp4
-download lagu aespa black mamba mv
-download lagu aespa black mamba video
-download lagu aespa black mamba 3gp
-download lagu aespa black mamba 4k
-download lagu aespa black mamba 1080p
-download lagu aespa black mamba 720p
-download lagu aespa black mamba 480p
-download lagu aespa black mamba lyrics
-download lagu aespa black mamba english version
-download lagu aespa black mamba instrumental
-download lagu aespa black mamba karaoke
-download lagu aespa black mamba remix
-download lagu aespa black mamba cover
-download lagu aespa black mamba acoustic
-download lagu aespa black mamba live performance
-download lagu aespa black mamba dance practice
-download lagu aespa black mamba choreography video
-download lagu aespa black mamba behind the scenes
-download lagu aespa black mamba reaction video
-download lagu aespa black mamba audio only
-download lagu aespa black mamba ringtone
-download lagu aespa black mamba album zip file
-download lagu aespa black mamba full album mp3
-download lagu aespa black mamba mini album tracklist
-download lagu aespa black mamba teaser photos
-download lagu aespa black mamba concept photos
-download lagu aespa black mamba wallpaper hd
-download lagu aespa black mamba lockscreen images
-download lagu aespa black mamba fan art pictures

-
    -
  1. Download the YouTube Music app on your device or go to the YouTube Music web player on your browser.
  2. -
  3. Sign up or log in to your YouTube Music account with your Google account.
  4. -
  5. Search for "Black Mamba" by Aespa on the search bar.
  6. -
  7. Select the song and tap or click on the three-dot menu icon next to the song title.
  8. -
  9. Select "Download" from the menu. The song will start downloading and a blue circle will appear when it is done.
  10. -
  11. Go to your library and find the song under the "Downloads" section.
  12. -
  13. Enjoy listening to the song offline!
  14. -
-

Option 2: MP3 Download Sites

-

If you don't want to use streaming platforms or pay for a subscription, you can also download lagu aespa black mamba from MP3 download sites. These are websites that allow you to download MP3 files of songs for free. However, you should be careful when using these sites, as some of them may contain viruses, malware, or illegal content. Always check the reputation and reviews of these sites before downloading anything from them. Here are some of the MP3 download sites that have "Black Mamba" by Aespa:

-

Internet Archive

-

The Internet Archive is a non-profit digital library that offers free access to millions of books, movies, music, and other media. It also has a collection of K-pop songs, including "Black Mamba" by Aespa. To download lagu aespa black mamba from the Internet Archive, you need to follow these steps:

-
    -
  1. Go to https://archive.org/details/kpop_20201117_0000.
  2. -
  3. Scroll down until you find "Black Mamba" by Aespa under the "Tracklist" section.
  4. -
  5. Select "VBR MP3" from the drop-down menu next to the song title.
  6. -
  7. The song will start downloading and a pop-up window will appear when it is done.
  8. -
  9. Save the file to your device and enjoy listening to the song offline!
  10. -
-

KUYOU.id

-

KUYOU.id is an Indonesian website that offers free MP3 downloads of various songs, including K-pop songs. It has a simple and user-friendly interface that makes it easy to find and download songs. To download lagu aespa black mamba from KUYOU.id, you need to follow these steps:

-
    -
  1. Go to https://kuyou.id/download-lagu-aespa-black-mamba-mp3/.
  2. -
  3. Scroll down until you see the "Download Lagu Aespa Black Mamba MP3" button.
  4. -
  5. Click on the button and wait for a few seconds until the download link appears.
  6. -
  7. Click on the download link and the song will start downloading.
  8. -
  9. Save the file to your device and enjoy listening to the song offline!
  10. -
-

WAPQAW

-

WAPQAW is another website that offers free MP3 downloads of various songs, including K-pop songs. It has a large database of songs that you can search by artist, title, or genre. To download lagu aespa black mamba from WAPQAW, you need to follow these steps:

-
    -
  1. Go to https://wapqaw.com/.
  2. -
  3. Type "Black Mamba" by Aespa on the search bar and click on the search icon.
  4. -
  5. Select the song from the list of results and click on the "Download" button.
  6. -
  7. Select the quality of the MP3 file that you want to download and click on the "Download" button again.
  8. -
  9. The song will start downloading and a pop-up window will appear when it is done.
  10. -
  11. Save the file to your device and enjoy listening to the song offline!
  12. -
-

Conclusion

-

In this article, we have shown you how to download lagu aespa black mamba from various sources, as well as some information about the group and the song. We hope that you have enjoyed reading this article and that you have learned something new. Now, you can download lagu aespa black mamba and listen to it anytime and anywhere you want. You can also share it with your friends and family who love K-pop and Aespa. Don't forget to support the group and their music by streaming their song online or buying their album. Thank you for reading this article and have a great day!

-

FAQs

-

Here are some of the frequently asked questions about downloading lagu aespa black mamba:

-
    -
  1. Is downloading lagu aespa black mamba legal?
  2. -

    Downloading lagu aespa black mamba from streaming platforms is legal as long as you have a valid subscription or permission from the platform. However, downloading lagu aespa black mamba from MP3 download sites may not be legal, as some of them may violate the copyright laws or contain illegal content. Therefore, you should be careful when using these sites and always check their reputation and reviews before downloading anything from them.

    -
  3. What are the benefits of downloading lagu aespa black mamba?
  4. -

    Some of the benefits of downloading lagu aespa black mamba are:

    - -
  5. What are some other songs by Aespa?
  6. -

    Some other songs by Aespa are:

    - -
  7. How can I watch the music video of "Black Mamba" by Aespa?
  8. -

    You can watch the music video of "Black Mamba" by Aespa on YouTube, where it has over 200 million views as of October 2021. You can also watch it on the official website of Aespa, where you can interact with their avatars and explore their world. Here are the links to watch the music video of "Black Mamba" by Aespa:

    - -
  9. How can I follow Aespa on social media?
  10. -

    You can follow Aespa on various social media platforms, where they share their updates, photos, videos, and more. Here are some of the social media accounts of Aespa:

    -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download TikTok Asia APK for Android - Latest Version 30.0.3.md b/spaces/1phancelerku/anime-remove-background/Download TikTok Asia APK for Android - Latest Version 30.0.3.md deleted file mode 100644 index d4828607373333114f301e2aa96639248c24328f..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download TikTok Asia APK for Android - Latest Version 30.0.3.md +++ /dev/null @@ -1,153 +0,0 @@ - -

    How to Use TikTok for Fun and Profit

    -

    TikTok is a video-sharing app that allows you to create and share short-form videos on any topic. It’s mainly mobile-based, although you can still watch TikTok videos using the web app. The platform allows users to get creative with their content using filters, stickers, voiceovers, sound effects, and background music.

    -

    tiktok asia 2016 version apk download


    DOWNLOAD ☆☆☆ https://jinyurl.com/2uNTqn



    -

    But TikTok is not just for entertainment. You can also use it to market your business, earn money from sponsorships and ads, or just have fun with your friends and followers. In this article, we’ll show you how to use TikTok for fun and profit in 2023.

    -

    What is TikTok and why should you use it?

    -

    TikTok is a popular social media app based in China that’s grown immensely in popularity over the last couple of years. On the app, creators can use a ton of sound effects, filters, and music to record short clips. User content ranges from DIY and craft videos to sketches and dance routines. To discover new content, users can follow specific creators and use hashtags.

    -

    Since videos can only be between 15 and 60 seconds long, entertainment and engagement are both optimized — which is part of the reason that TikTok is so popular. In the United States alone, TikTok currently has close to 95 million users. By 2025, TikTok is expected to have an audience of 103 million U.S. users, which means roughly one in three Americans will have a TikTok account.

    -

    TikTok is the first Chinese social media app to do this well. Since the release of the app, however, there have been concerns about privacy and users’ data, especially considering the fact that teenagers make up a large part of TikTok’s user base.

    -

    TikTok's features and benefits

    -

    What makes TikTok a popular platform for influencers and content creators? Check out these amazing features.

    - -

    TikTok's One of the best ways to discover and enjoy TikTok videos is to browse the For You page and the Following page. These are two different feeds that show you videos based on your preferences and interests. Here's how they work and how to access them: The For You page The For You page is a personalized feed of videos that TikTok recommends for you based on your interactions with the app. You can find the For You page by tapping the Home icon at the bottom left corner of the screen, and then tapping For You at the top. The For You page shows you videos from a variety of creators, topics, and trends that match your taste. The more you use TikTok, the more the app learns about what you like and dislike, and the better it can tailor the recommendations for you. You can also influence the For You page by liking, commenting, sharing, and following the videos and creators that you enjoy. This will help TikTok understand your preferences and show you more of what you want to see. If you come across a video that you're not interested in, you can long-press on it and tap Not Interested. This will tell TikTok to show you less of that type of content in the future. You can also hide videos from a certain creator or with a certain sound by tapping More and choosing Hide videos from this user or Hide videos with this sound. The Following page The Following page is a feed of videos from the creators that you follow on TikTok. You can find the Following page by tapping the Home icon at the bottom left corner of the screen, and then tapping Following at the top. The Following page shows you the latest videos from your favorite creators in chronological order. You can also see when they go live by tapping the Live button next to their profile picture. You can follow any creator on TikTok by tapping their profile picture on their video, or by searching for their username in the Discover tab. You can also find new creators to follow by browsing hashtags, sounds, effects, and trending topics on TikTok. To unfollow a creator, simply tap their profile picture on their video, or go to their profile page, and tap Following. You can also remove followers from your own account by going to your profile page, tapping Followers, and tapping Remove next to their name. I hope this helps you understand how to browse videos on TikTok and enjoy the app to its fullest. If you have any questions, feel free to ask me. ? Now that you know how to browse videos on TikTok, let's learn how to make and edit your own videos. TikTok offers a variety of tools and features that allow you to create engaging and creative videos with ease. Here are some steps to help you get started: How to record a video using your phone or TikTok's native recorder You can record a video using your phone's camera or TikTok's native recorder. To use your phone's camera, simply open the app and tap the + icon at the bottom center of the screen. This will open your phone's camera and allow you to record a video as you normally would. To use TikTok's native recorder, tap the + icon at the bottom center of the screen, and then tap Templates at the top right corner. This will show you a list of templates that you can use to create different types of videos, such as music videos, slideshows, montages, and more. To use a template, tap on it and follow the instructions on the screen. You can also customize the template by adding your own photos, videos, text, stickers, and music. To preview your video, tap the Play button at the bottom right corner. To save your video, tap the Next button at the top right corner. How to use filters, stickers, music, and effects TikTok offers a variety of filters, stickers, music, and effects that you can use to enhance your video and make it more fun and attractive. To access these features, tap the + icon at the bottom center of the screen, and then tap Effects at the bottom left corner. This will open a menu of different categories of effects that you can choose from, such as Trending, Beauty, Funny, Animal, and more. To apply an effect, simply tap on it and it will appear on your screen. You can also adjust the intensity and duration of the effect by dragging the slider at the bottom. To add stickers to your video, tap Stickers at the bottom left corner. This will open a menu of different categories of stickers that you can choose from, such as Emoji, Text, GIFs, and more. To add a sticker, simply tap on it and it will appear on your screen. You can also resize, rotate, and move the sticker by using your fingers. To add music to your video, tap Sounds at the bottom center. This will open a menu of different categories of music that you can choose from, such as Popular, New Releases, Genres, Playlists, and more. You can also search for a specific song or artist by using the search bar at the top. To add music to your video, simply tap on it and it will start playing. You can also adjust the volume and trim the music by using the sliders at the bottom. To add filters to your video, tap Filters at the bottom right corner. This will open a menu of different categories of filters that you can choose from, such as Portrait, Landscape, Food, Vibe, and more. To apply a filter, simply swipe left or right on your screen until you find one that you like. You can also adjust the intensity of the filter by dragging the slider at the bottom. How to edit your video using TikTok's built-in editing tools TikTok also offers a built-in editing tool that allows you to edit your video after recording it. To access this tool, tap Next after recording or selecting a video. This will open a screen where you can edit your video in various ways. Some of the editing options that you can use are: - Trim: This allows you to cut out unwanted parts of your video by dragging the handles at both ends of the timeline. - Adjust clips: This allows you to rearrange or delete clips in your video by tapping and holding them on the timeline. - Voiceover: This allows you to record your own voice over your video by tapping and holding the microphone icon at the bottom. - Volume: This allows you to adjust the volume of your original sound or added music by dragging the sliders at the bottom. - Text: This allows you to add text to your video by tapping Text at the bottom. You can also change the font, color, size, alignment, and animation of the text by tapping on it and using the options at the bottom. - Stickers: This allows you to add stickers to your video by tapping Stickers at the bottom. You can also resize, rotate, and move the stickers by using your fingers. - Effects: This allows you to add effects to your video by tapping Effects at the bottom. You can also adjust the intensity and duration of the effects by dragging the slider at the bottom. - Filters: This allows you to add filters to your video by tapping Filters at the bottom. You can also adjust the intensity of the filters by dragging the slider at the bottom. After editing your video, you can tap Next to proceed to the next screen, where you can add a caption, hashtags, tags, and other settings to your video. You can also choose who can view, comment, duet, stitch, and react to your video by tapping Who can view this video at the bottom. When you're ready to post your video, tap Post at the top right corner. I hope this helps you understand how to make and edit TikTok videos and unleash your creativity on the app. If you have any questions, feel free to ask me. ? Now that you know how to make and edit TikTok videos, let's learn how to discover and engage with TikTok content. TikTok offers a variety of ways to interact with other users and their videos, such as hashtags, challenges, duets, and stitches. Here are some tips to help you get the most out of TikTok's social features: How to use hashtags, challenges, duets, and stitches Hashtags are keywords or phrases that you can add to your caption to categorize your video and make it easier for other users to find it. You can use hashtags that are relevant to your video's topic, genre, style, or mood. You can also use hashtags that are trending or popular on TikTok, such as #fyp (for you page), #viral, #funny, #dance, etc. Challenges are viral trends or activities that users can participate in by creating their own videos using a specific hashtag. Challenges can be fun, creative, educational, or social. Some examples of popular challenges on TikTok are #wipeitdown, #savagelove, #learnontiktok, #blindinglights, etc. Duets are videos that allow you to create a split-screen video with another user's video. You can use duets to react to, collaborate with, or parody another user's video. To create a duet, tap the Share button on the video that you want to duet with, and then tap Duet. Stitches are videos that allow you to clip and integrate another user's video into your own video. You can use stitches to add your own commentary, perspective, or twist to another user's video. To create a stitch, tap the Share button on the video that you want to stitch with, and then tap Stitch. How to like, comment, share, and save videos One of the simplest ways to engage with TikTok content is to like, comment, share, and save videos that you enjoy. These actions not only show your appreciation and support for the creators, but also help TikTok's algorithm to recommend more videos that suit your taste. To like a video, simply tap the heart icon at the bottom right corner of the screen. You can also double-tap the video to like it. To unlike a video, tap the heart icon again. To comment on a video, tap the speech bubble icon at the bottom right corner of the screen. This will open a comment section where you can type your comment and send it. You can also reply to other users' comments by tapping Reply under their comment. To share a video, tap the arrow icon at the bottom right corner of the screen. This will open a menu of different options that you can use to share the video with others. You can share the video via message, email, social media, or copy the link. To save a video, tap the arrow icon at the bottom right corner of the screen, and then tap Save Video. This will download the video to your device's gallery or camera roll. You can also save a video by long-pressing on it and tapping Save Video. I hope this helps you understand how to discover and engage with TikTok content and have fun on the app. If you have any questions, feel free to ask me. ? Now that you know how to discover and engage with TikTok content, let's learn how to grow your TikTok audience and influence. TikTok is a competitive platform where millions of creators are vying for attention and recognition. To stand out from the crowd and attract loyal fans, you need to have a clear and consistent content strategy that showcases your unique value and personality. Here are some tips to help you grow your TikTok audience and influence in 2023: How to identify your target audience and content strategy Before you start creating content on TikTok, you need to have a clear idea of who your target audience is and what kind of content they want to see from you. This will help you tailor your content to their needs, preferences, and interests, and increase your chances of getting views, likes, comments, shares, and follows. To identify your target audience, you need to do some research and analysis. You can use tools like TikTok Analytics, Google Trends, or Social Blade to find out more about your potential audience's demographics, behaviors, preferences, and trends. You can also look at other successful creators in your niche and see what kind of content they create, how they interact with their fans, and what hashtags they use. To identify your content strategy, you need to define your niche, your value proposition, your tone of voice, and your posting schedule. Your niche is the specific topic or category that you focus on in your content. Your value proposition is the unique benefit or solution that you offer to your audience through your content. Your tone of voice is the way you communicate with your audience through your words, expressions, and emotions. Your posting schedule is the frequency and timing of your content uploads. For example, if you are a fitness instructor who wants to target young women who want to lose weight and tone their bodies, your niche could be fitness tips and workouts for women. Your value proposition could be that you offer simple, effective, and fun exercises that can be done at home with minimal equipment. Your tone of voice could be friendly, motivational, and humorous. Your posting schedule could be three times a week at 9 am. How to use analytics to track your performance and optimize your content Once you have identified your target audience and content strategy, you need to monitor and measure how well your content is performing on TikTok. This will help you understand what works and what doesn't work for your audience, and how you can improve your content quality and reach. To use analytics on TikTok, you need to switch to a Pro account by going to your profile page, tapping the three dots at the top right corner, tapping Manage account, and tapping Switch to Pro account. This will give you access to a dashboard where you can see various metrics and insights about your account and content. Some of the metrics that you can track on TikTok are: - Profile views: The number of times users viewed your profile page. - Video views: The number of times users viewed your videos. - Followers: The number of users who followed your account. - Likes: The number of times users liked your videos. - Comments: The number of times users commented on your videos. - Shares: The number of times users shared your videos. - Average watch time: The average amount of time users spent watching your videos. - Traffic source: The sources from which users discovered your videos, such as For You page, Following page, hashtags, sounds, etc. - Audience territories: The countries or regions where most of your audience is located. - Audience demographics: The age and gender distribution of your audience. - Audience interests: The topics or categories that most interest your audience. By analyzing these metrics, you can find out which videos performed the best and why, which videos performed the worst and why, which times and days are the best for posting, which hashtags and sounds are the most effective for reaching more users, which countries or regions are the most engaged with your content, which age groups and genders are the most interested in your content, and which topics or categories are the most appealing to your audience. You can then use this information to optimize your content strategy by creating more of the content that resonates with your audience, improving the quality and relevance of your content, experimenting with different formats and styles of content, testing different posting times and frequencies , and using different hashtags and sounds to reach more users. How to collaborate with other creators and brands Another way to grow your TikTok audience and influence is to collaborate with other creators and brands that share your niche, values, and goals. Collaboration can help you expand your reach, increase your credibility, and create more value for your audience. To collaborate with other creators, you can use features like duets, stitches, live streams, or group chats to create joint content, cross-promote each other, or interact with each other's fans. You can also join or create a TikTok collective, which is a group of creators who work together to support each other and grow their influence. To collaborate with brands, you can use platforms like FameBit, AspireIQ, or Upfluence to find and connect with brands that are looking for influencers to promote their products or services. You can also pitch directly to brands that you like and want to work with by sending them an email or a direct message on TikTok. When collaborating with brands, you need to make sure that you follow the guidelines and best practices for creating sponsored content on TikTok. Some of these are: - Disclose the sponsorship by using hashtags like #ad, #sponsored, or #partner in your caption. - Be authentic and honest about your opinion and experience with the product or service. - Be creative and original in your content and avoid copying or imitating other creators or brands. - Be respectful and professional in your communication and interaction with the brand and the audience. - Follow the terms and conditions of the agreement and deliver the content on time and as agreed. I hope this helps you understand how to grow your TikTok audience and influence in 2023. If you have any questions, feel free to ask me. ?

    How to make money on TikTok

    -

    TikTok is not only a platform for fun and entertainment, but also a platform for making money. There are several ways that you can monetize your TikTok account and earn income from your content and influence. Here are some of the most common and effective ways to make money on TikTok in 2023:

    -

    tiktok asia 2016 apk free download
    -tiktok asia 2016 version android app
    -tiktok asia 2016 old versions apkcombo
    -tiktok asia 2016 update apk
    -tiktok asia 2016 mod apk download
    -tiktok asia 2016 latest version apk
    -tiktok asia 2016 beta apk xapk
    -tiktok asia 2016 app for android
    -tiktok asia 2016 video players & editors
    -tiktok asia 2016 global video community
    -download tiktok asia 2016 apk
    -install tiktok asia 2016 apk
    -how to download tiktok asia 2016 apk
    -where to download tiktok asia 2016 apk
    -why download tiktok asia 2016 apk
    -tiktok asia 2016 apk mirror
    -tiktok asia 2016 apk pure
    -tiktok asia 2016 apkpure.com
    -tiktok asia 2016 apkmonk.com
    -tiktok asia 2016 apkmirror.com
    -tiktok asia 2016 apk file download
    -tiktok asia 2016 apk offline installer
    -tiktok asia 2016 apk online installer
    -tiktok asia 2016 apk direct download link
    -tiktok asia 2016 apk no ads
    -tiktok asia 2016 apk unlimited likes
    -tiktok asia 2016 apk premium features
    -tiktok asia 2016 apk unlocked features
    -tiktok asia 2016 apk pro version
    -tiktok asia 2016 apk cracked version
    -tiktok asia 2016 apk modded version
    -tiktok asia 2016 apk hacked version
    -tiktok asia 2016 apk patched version
    -tiktok asia 2016 apk full version
    -tiktok asia 2016 apk original version
    -tiktok asia 2016 apk safe download
    -tiktok asia 2016 apk virus free download
    -tiktok asia 2016 apk malware free download
    -tiktok asia 2016 apk secure download
    -tiktok asia 2016 apk trusted download source
    -best site to download tiktok asia 2016 apk
    -best app to download tiktok asia 2016 apk
    -best way to download tiktok asia 2016 apk
    -fastest way to download tiktok asia 2016 apk
    -easiest way to download tiktok asia 2016 apk
    -cheapest way to download tiktok asia 2016 apk
    -most popular way to download tiktok asia 2016 apk
    -most reliable way to download tiktok asia 2016 apk
    -most convenient way to download tiktok asia 2016 apk

    -

    How to join the TikTok Creator Fund and get paid for views

    -

    The TikTok Creator Fund is a program that pays eligible creators for their video views on TikTok. The program was launched in 2020 and has since expanded to several countries, including the US, UK, Germany, France, Italy, Spain, India, Japan, Korea, Australia, Brazil, Mexico, Canada, Indonesia, Thailand, Vietnam, Turkey, Egypt, Saudi Arabia, UAE, South Africa, Nigeria, Kenya, Pakistan, Bangladesh, Sri Lanka, Nepal, Malaysia, Singapore, Philippines, Cambodia, Myanmar, Laos.

    -

    To join the TikTok Creator Fund, you need to meet the following requirements:

    - -

    To apply for the TikTok Creator Fund, you need to go to your profile page, tap the three dots at the top right corner, tap Creator Tools, and tap Creator Fund. You will then need to fill out some information and agree to the terms and conditions of the program. Once you join the TikTok Creator Fund, you will start earning money based on your video views and engagement. The amount of money you earn depends on various factors, such as the number of views, the location of the viewers, the quality of the content, and the current market rates. You can check your earnings and balance in the Creator Fund dashboard. To withdraw your money from the TikTok Creator Fund, you need to link your PayPal or bank account to your TikTok account. You can do this by going to your profile page, tapping the three dots at the top right corner, tapping Wallet, and tapping Link Account. You can then request a withdrawal of your balance once it reaches a minimum threshold of $50. The withdrawal process may take up to 15 days to complete.

    How to partner with brands and create sponsored content

    -

    Another way to make money on TikTok is to partner with brands and create sponsored content for them. Sponsored content is content that promotes a brand's product or service in exchange for a fee or a commission. Sponsored content can be in the form of product reviews, tutorials, testimonials, challenges, giveaways, or any other creative format that showcases the brand's value and benefits.

    -

    To partner with brands and create sponsored content, you need to have a large and engaged audience that matches the brand's target market. You also need to have a professional and attractive profile that showcases your niche, personality, and portfolio. You can use platforms like FameBit, AspireIQ, or Upfluence to find and connect with brands that are looking for influencers to work with. You can also pitch directly to brands that you like and want to work with by sending them an email or a direct message on TikTok.

    -

    When partnering with brands and creating sponsored content, you need to make sure that you follow the guidelines and best practices for creating sponsored content on TikTok. Some of these are:

    - -

    How to promote your own products or services on TikTok

    -

    If you have your own products or services that you want to sell or promote on TikTok, you can do so by creating engaging and informative content that showcases their value and benefits. You can also use features like TikTok Shop or TikTok Live Shopping to directly sell your products or services on the app.

    - TikTok Shop is a feature that allows you to create a mini-store within your profile page where you can display your products or services for sale. To use TikTok Shop, you need to have a verified business account on TikTok. You can apply for a business account by going to your profile page, tapping the three dots at the top right corner, tapping Manage account, tapping Switch to Pro account, tapping Business account, and filling out some information about your business. Once you have a business account on TikTok, you can create a shop by going to your profile page, tapping the three dots at the top right corner, tapping Creator Tools, and tapping TikTok Shop. You will then need to link your shop to a third-party e-commerce platform, such as Shopify, WooCommerce, or BigCommerce. You can then add your products or services to your shop by uploading their images, titles, prices, and descriptions. Once you have a shop on TikTok, you can promote your products or services by creating videos that showcase their features, benefits, reviews, testimonials, or tutorials. You can also add a Shop Now button to your videos that will direct viewers to your shop where they can purchase your products or services. TikTok Live Shopping is a feature that allows you to sell your products or services live on TikTok. To use TikTok Live Shopping, you need to have a verified business account on TikTok and a shop on TikTok. You can then go live by tapping the + icon at the bottom center of the screen and tapping Live. You can then select the products or services that you want to sell from your shop and display them on your live stream. During your live stream, you can talk about your products or services, answer questions from viewers, and encourage them to buy from you. You can also see how many viewers are watching your live stream, how many products or services have been sold, and how much revenue you have generated. You can also interact with viewers by sending them gifts, stickers, or messages.

    How to cross-promote your TikTok content on other platforms

    -

    Another way to make money on TikTok is to cross-promote your TikTok content on other platforms where you have an audience or a presence. This can help you drive more traffic to your TikTok account, increase your exposure and reach, and generate more revenue from different sources.

    -

    Some of the platforms that you can cross-promote your TikTok content on are:

    - - I hope this helps you understand how to make money on TikTok in 2023. If you have any questions, feel free to ask me. ?

    Conclusion

    -

    TikTok is a powerful and popular platform that allows you to create and share short-form videos on any topic. It's also a platform that allows you to market your business, earn money from sponsorships and ads, or just have fun with your friends and followers. In this article, we showed you how to use TikTok for fun and profit in 2023. We covered the following topics: - What is TikTok and why should you use it? - How to create a TikTok account and profile - How to make and edit TikTok videos - How to discover and engage with TikTok content - How to grow your TikTok audience and influence - How to make money on TikTok We hope you found this article helpful and informative. If you want to learn more about TikTok, you can check out these resources: - [TikTok Help Center]: This is the official website where you can find answers to common questions, tips and tricks, and updates on TikTok. - [TikTok Newsroom]: This is the official blog where you can find the latest news, announcements, and stories about TikTok. - [TikTok Academy]: This is an online learning platform where you can find courses and tutorials on how to use TikTok for different purposes, such as education, entertainment, or business. Thank you for reading this article. We hope you enjoyed it and learned something new. If you have any feedback or questions, please let us know in the comments below. We would love to hear from you. ?

    FAQs

    -

    Here are some frequently asked questions about TikTok that you might find useful:

    -

    Is TikTok safe to use?

    -

    TikTok is generally safe to use, as long as you follow some basic safety precautions. Some of these are:

    - -

    How do I get verified on TikTok?

    -

    TikTok verifies accounts that belong to authentic, notable, and active creators or brands. To get verified on TikTok, you need to meet the following criteria:

    - -

    If you think you meet these criteria, you can apply for verification by contacting TikTok's support team via email or feedback form. However, there is no guarantee that your application will be accepted, as verification is granted at TikTok's discretion.

    -

    How do I delete my TikTok account?

    -

    If you want to delete your TikTok account permanently, you need to follow these steps:

    -
      -
    1. Go to your profile page and tap the three dots at the top right corner.
    2. -
    3. Tap Manage account and tap Delete account at the bottom.
    4. -
    5. Follow the instructions on the screen and confirm your deletion request.
    6. -
    -

    Note that deleting your account will remove all your videos, likes, comments, messages, followers, and other data from TikTok. You will also lose access to any services or features that require a TikTok account. You will not be able to recover your account once it is deleted.

    -

    How do I download TikTok videos?

    -

    If you want to download TikTok videos to your device, you need to follow these steps:

    -
      -
    1. Find the video that you want to download and tap the Share button at the bottom right corner.
    2. -
    3. Tap Save Video and wait for the download to finish.
    4. -
    5. Go to your device's gallery or camera roll and find the downloaded video.
    6. -
    -

    Note that some videos may not be available for download due to the creator's or the platform's settings. You can also use third-party apps or websites to download TikTok videos, but be careful about their security and legality.

    -

    How do I go live on TikTok?

    -

    If you want to go live on TikTok and broadcast your video in real-time, you need to follow these steps:

    -
      -
    1. Tap the + icon at the bottom center of the screen and tap Live.
    2. Enter a title for your live stream and choose a category for it.
    3. -
    4. Tap Go Live and start your live stream.
    5. -
    -

    Note that you need to have at least 1,000 followers to go live on TikTok. You can also invite other users to join your live stream by tapping the + icon at the bottom left corner and selecting a user from your following list. You can also interact with your viewers by sending them gifts, stickers, or messages.

    -

    -

    That's it for this article. I hope you learned something new and useful about how to use TikTok for fun and profit in 2023. If you liked this article, please share it with your friends and family. And if you have any feedback or questions, please let me know in the comments below. I would love to hear from you. ?

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/torch2onnx.py b/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/torch2onnx.py deleted file mode 100644 index fc26ab82e552331bc8d75b34e81000418f4d38ec..0000000000000000000000000000000000000000 --- a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/torch2onnx.py +++ /dev/null @@ -1,59 +0,0 @@ -import numpy as np -import onnx -import torch - - -def convert_onnx(net, path_module, output, opset=11, simplify=False): - assert isinstance(net, torch.nn.Module) - img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32) - img = img.astype(np.float) - img = (img / 255. - 0.5) / 0.5 # torch style norm - img = img.transpose((2, 0, 1)) - img = torch.from_numpy(img).unsqueeze(0).float() - - weight = torch.load(path_module) - net.load_state_dict(weight) - net.eval() - torch.onnx.export(net, img, output, keep_initializers_as_inputs=False, verbose=False, opset_version=opset) - model = onnx.load(output) - graph = model.graph - graph.input[0].type.tensor_type.shape.dim[0].dim_param = 'None' - if simplify: - from onnxsim import simplify - model, check = simplify(model) - assert check, "Simplified ONNX model could not be validated" - onnx.save(model, output) - - -if __name__ == '__main__': - import os - import argparse - from backbones import get_model - - parser = argparse.ArgumentParser(description='ArcFace PyTorch to onnx') - parser.add_argument('input', type=str, help='input backbone.pth file or path') - parser.add_argument('--output', type=str, default=None, help='output onnx path') - parser.add_argument('--network', type=str, default=None, help='backbone network') - parser.add_argument('--simplify', type=bool, default=False, help='onnx simplify') - args = parser.parse_args() - input_file = args.input - if os.path.isdir(input_file): - input_file = os.path.join(input_file, "backbone.pth") - assert os.path.exists(input_file) - model_name = os.path.basename(os.path.dirname(input_file)).lower() - params = model_name.split("_") - if len(params) >= 3 and params[1] in ('arcface', 'cosface'): - if args.network is None: - args.network = params[2] - assert args.network is not None - print(args) - backbone_onnx = get_model(args.network, dropout=0) - - output_path = args.output - if output_path is None: - output_path = os.path.join(os.path.dirname(__file__), 'onnx') - if not os.path.exists(output_path): - os.makedirs(output_path) - assert os.path.isdir(output_path) - output_file = os.path.join(output_path, "%s.onnx" % model_name) - convert_onnx(backbone_onnx, input_file, output_file, simplify=args.simplify) diff --git a/spaces/801artistry/RVC801/lib/infer_pack/attentions.py b/spaces/801artistry/RVC801/lib/infer_pack/attentions.py deleted file mode 100644 index 05501be1871643f78dddbeaa529c96667031a8db..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/lib/infer_pack/attentions.py +++ /dev/null @@ -1,417 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from lib.infer_pack import commons -from lib.infer_pack import modules -from lib.infer_pack.modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=10, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - window_size=window_size, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - proximal_bias=False, - proximal_init=True, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - proximal_bias=proximal_bias, - proximal_init=proximal_init, - ) - ) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append( - MultiHeadAttention( - hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - causal=True, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( - device=x.device, dtype=x.dtype - ) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - p_dropout=0.0, - window_size=None, - heads_share=True, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys( - query / math.sqrt(self.k_channels), key_relative_embeddings - ) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert ( - t_s == t_t - ), "Local attention is only available for self-attention." - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - causal=False, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/train.py b/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/train.py deleted file mode 100644 index f5759c4679d2ee9c0748444adf66b8453cf09728..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/train.py +++ /dev/null @@ -1,838 +0,0 @@ -import json -import logging -import math -import os -import time -from contextlib import suppress - -import numpy as np -import torch -import torch.nn.functional as F - -try: - import wandb -except ImportError: - wandb = None - -from open_clip import ClipLoss, gather_features -from .distributed import is_master -from .zero_shot import zero_shot_eval - - -class AverageMeter(object): - """Computes and stores the average and current value""" - - def __init__(self): - self.reset() - - def reset(self): - self.val = 0 - self.avg = 0 - self.sum = 0 - self.count = 0 - - def update(self, val, n=1): - self.val = val - self.sum += val * n - self.count += n - self.avg = self.sum / self.count - - -def unwrap_model(model): - if hasattr(model, "module"): - return model.module - else: - return model - - -def train_one_epoch( - model, data, epoch, optimizer, scaler, scheduler, args, tb_writer=None -): - device = torch.device(args.device) - autocast = torch.cuda.amp.autocast if args.precision == "amp" else suppress - model.train() - loss = ClipLoss( - local_loss=args.local_loss, - gather_with_grad=args.gather_with_grad, - cache_labels=True, - rank=args.rank, - world_size=args.world_size, - use_horovod=args.horovod, - mlp_loss=args.clap_mlploss, - weight_loss_kappa=args.kappa, - ) - - dataloader, sampler = data["train"].dataloader, data["train"].sampler - if args.distributed and sampler is not None: - sampler.set_epoch(epoch) - num_batches_per_epoch = dataloader.num_batches - sample_digits = math.ceil(math.log(dataloader.num_samples + 1, 10)) - - # for toy dataset - if args.dataset_type == "toy": - dataloader.dataset.generate_queue() - - loss_m = AverageMeter() - batch_time_m = AverageMeter() - data_time_m = AverageMeter() - end = time.time() - - for i, batch in enumerate(dataloader): - # logging.info(f"batch {i} of {num_batches_per_epoch}") - step = num_batches_per_epoch * epoch + i - if isinstance(scheduler, dict): - for s in scheduler.values(): - s(step) - else: - scheduler(step) - audios = batch # contains mel_spec, wavform, and longer list - texts = batch["text"] - # audios = audios.to(device=device, non_blocking=True) - # texts = texts.to(device=device, non_blocking=True) - - data_time_m.update(time.time() - end) - if isinstance(optimizer, dict): - for o_ in optimizer.values(): - o_.zero_grad() - else: - optimizer.zero_grad() - - with autocast(): - ( - audio_features, - text_features, - audio_features_mlp, - text_features_mlp, - logit_scale_a, - logit_scale_t, - ) = model(audios, texts, device) - - if args.clap_mlploss: - total_loss = loss( - audio_features=audio_features, - text_features=text_features, - logit_scale_a=logit_scale_a, - logit_scale_t=logit_scale_t, - audio_features_mlp=audio_features_mlp, - text_features_mlp=text_features_mlp, - ) - else: - total_loss = loss( - audio_features=audio_features, - text_features=text_features, - logit_scale_a=logit_scale_a, - ) - if isinstance(optimizer, dict): - if scaler is not None: - scaler.scale(total_loss).backward() - for o_ in optimizer.values(): - if args.horovod: - o_.synchronize() - scaler.unscale_(o_) - with o_.skip_synchronize(): - scaler.step(o_) - else: - scaler.step(o_) - scaler.update() - else: - total_loss.backward() - for o_ in optimizer.values(): - o_.step() - else: - if scaler is not None: - scaler.scale(total_loss).backward() - if args.horovod: - optimizer.synchronize() - scaler.unscale_(optimizer) - with optimizer.skip_synchronize(): - scaler.step(optimizer) - else: - scaler.step(optimizer) - scaler.update() - else: - total_loss.backward() - optimizer.step() - - # Note: we clamp to 4.6052 = ln(100), as in the original paper. - with torch.no_grad(): - unwrap_model(model).logit_scale_a.clamp_(0, math.log(100)) - if args.clap_mlploss: - unwrap_model(model).logit_scale_t.clamp_(0, math.log(100)) - - batch_time_m.update(time.time() - end) - end = time.time() - batch_count = i + 1 - if is_master(args) and (i % 100 == 0 or batch_count == num_batches_per_epoch): - if isinstance(audios, dict): - batch_size = len(audios["waveform"]) - else: - batch_size = len(audios) - num_samples = batch_count * batch_size * args.world_size - samples_per_epoch = dataloader.num_samples - percent_complete = 100.0 * batch_count / num_batches_per_epoch - - # NOTE loss is coarsely sampled, just master node and per log update - loss_m.update(total_loss.item(), batch_size) - logit_scale_scalar_a = logit_scale_a.item() - logit_scale_scalar_t = logit_scale_t.item() - if isinstance(optimizer, dict): - if args.clap_mlploss: - logging.info( - f"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] " - f"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) " - f"Data (t): {data_time_m.avg:.3f} " - f"Batch (t): {batch_time_m.avg:.3f} " - f"LR: {[o_.param_groups[0]['lr'] for o_ in optimizer.values()]} " - f"Logit Scale Audio: {logit_scale_scalar_a:.3f}" - f"Logit Scale Text: {logit_scale_scalar_t:.3f}" - ) - log_data = { - "loss": loss_m.val, - "data_time": data_time_m.val, - "batch_time": batch_time_m.val, - "scale_audio": logit_scale_scalar_a, - "scale_text": logit_scale_scalar_t, - "lr": [o_.param_groups[0]["lr"] for o_ in optimizer.values()], - } - else: - logging.info( - f"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] " - f"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) " - f"Data (t): {data_time_m.avg:.3f} " - f"Batch (t): {batch_time_m.avg:.3f} " - f"LR: {[o_.param_groups[0]['lr'] for o_ in optimizer.values()]} " - f"Logit Scale Audio: {logit_scale_scalar_a:.3f}" - ) - log_data = { - "loss": loss_m.val, - "data_time": data_time_m.val, - "batch_time": batch_time_m.val, - "scale_audio": logit_scale_scalar_a, - "lr": [o_.param_groups[0]["lr"] for o_ in optimizer.values()], - } - - else: - if args.clap_mlploss: - logging.info( - f"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] " - f"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) " - f"Data (t): {data_time_m.avg:.3f} " - f"Batch (t): {batch_time_m.avg:.3f} " - f"LR: {optimizer.param_groups[0]['lr']:5f} " - f"Logit Scale Audio: {logit_scale_scalar_a:.3f}" - f"Logit Scale Text: {logit_scale_scalar_t:.3f}" - ) - - # Save train loss / etc. Using non avg meter values as loggers have their own smoothing - log_data = { - "loss": loss_m.val, - "data_time": data_time_m.val, - "batch_time": batch_time_m.val, - "scale_audio": logit_scale_scalar_a, - "scale_text": logit_scale_scalar_t, - "lr": optimizer.param_groups[0]["lr"], - } - else: - logging.info( - f"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] " - f"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) " - f"Data (t): {data_time_m.avg:.3f} " - f"Batch (t): {batch_time_m.avg:.3f} " - f"LR: {optimizer.param_groups[0]['lr']:5f} " - f"Logit Scale Audio: {logit_scale_scalar_a:.3f}" - ) - - # Save train loss / etc. Using non avg meter values as loggers have their own smoothing - log_data = { - "loss": loss_m.val, - "data_time": data_time_m.val, - "batch_time": batch_time_m.val, - "scale_audio": logit_scale_scalar_a, - "lr": optimizer.param_groups[0]["lr"], - } - for name, val in log_data.items(): - name = "train/" + name - if tb_writer is not None: - tb_writer.add_scalar(name, val, step) - if args.wandb: - assert wandb is not None, "Please install wandb." - wandb.log({name: val, "step": step}) - - # resetting batch / data time meters per log window - batch_time_m.reset() - data_time_m.reset() - # end for - - -def evaluate(model, data, epoch, args, tb_writer=None): - metrics = {} - if not args.parallel_eval: - if not is_master(args): - return metrics - device = torch.device(args.device) - model.eval() - - # CHANGE - # zero_shot_metrics = zero_shot_eval(model, data, epoch, args) - # metrics.update(zero_shot_metrics) - if is_master(args): - print("Evaluating...") - autocast = torch.cuda.amp.autocast if args.precision == "amp" else suppress - if args.val_dataset_names == ["Clotho", "audiocaps"]: - # if only clotho and audiocaps are used, then we will use a different evaluation function. - # This is because in the Clotho and audiocaps valid and test set, there are 5 text for 1 audio. - if args.parallel_eval: - # (yusong): just a hack here. Don't use parallel eval when evaluating only clotho and audiocaps. - raise NotImplementedError( - "Parallel evaluation not supported for eval only Clotho and audiocaps." - ) - val_metrics_per_dataset = evaluate_clotho_audiocaps( - model, data, epoch, args, autocast, device, tb_writer - ) - for m in val_metrics_per_dataset.values(): - metrics.update(m) - if "epoch" not in metrics.keys(): - metrics.update({"epoch": epoch}) - metrics = select_top_metric_clotho_audiocaps( - metrics, val_metrics_per_dataset, args - ) - elif "val" in data and ( - args.val_frequency - and ((epoch % args.val_frequency) == 0 or epoch == args.epochs) - ): - dataloader = data["val"].dataloader - num_samples = 0 - samples_per_val = dataloader.num_samples - - # FIXME this does not scale past small eval datasets - # all_audio_features @ all_text_features will blow up memory and compute very quickly - eval_info = {} - if args.clap_mlploss: - eval_info["all"] = { - "cumulative_loss": 0.0, - "num_samples": 0, - "all_audio_features": [], - "all_text_features": [], - "all_audio_features_mlp": [], - "all_text_features_mlp": [], - } # cumulative_loss = 0.0 - else: - eval_info["all"] = { - "cumulative_loss": 0.0, - "num_samples": 0, - "all_audio_features": [], - "all_text_features": [], - } # cumu - # all_audio_features, all_text_features, all_audio_features_mlp, all_text_features_mlp = [], [], [], [] - with torch.no_grad(): - for i, batch in enumerate(dataloader): - audios = batch # contains mel_spec, wavform, and longer list - texts = batch["text"] - # audios = audios.to(device=device, non_blocking=True) - - all_names = list( - set(["-".join(b.split("/")[-3:-1]) for b in batch["__url__"]]) - ) - for name in all_names: - if name not in eval_info.keys(): - if args.clap_mlploss: - eval_info[name] = { - "cumulative_loss": 0.0, - "num_samples": 0, - "all_audio_features": [], - "all_text_features": [], - "all_audio_features_mlp": [], - "all_text_features_mlp": [], - } - else: - eval_info[name] = { - "cumulative_loss": 0.0, - "num_samples": 0, - "all_audio_features": [], - "all_text_features": [], - } - with autocast(): - ( - audio_features, - text_features, - audio_features_mlp, - text_features_mlp, - logit_scale_a, - logit_scale_t, - ) = model(audios, texts, device) - - if args.parallel_eval: - # multi-GPU eval - if args.clap_mlploss: - ( - audio_features, - text_features, - audio_features_mlp, - text_features_mlp, - ) = gather_features( - audio_features=audio_features, - text_features=text_features, - audio_features_mlp=audio_features_mlp, - text_features_mlp=text_features_mlp, - local_loss=False, - gather_with_grad=False, - rank=args.rank, - world_size=args.world_size, - use_horovod=args.horovod, - mlp_loss=args.clap_mlploss, - ) - else: - (audio_features, text_features,) = gather_features( - audio_features=audio_features, - text_features=text_features, - local_loss=False, - gather_with_grad=False, - rank=args.rank, - world_size=args.world_size, - use_horovod=args.horovod, - mlp_loss=args.clap_mlploss, - ) - - if is_master(args): - num_samples += audio_features.shape[0] - for n in [*all_names, "all"]: - if n == "all": - eval_info[n]["all_audio_features"].append( - audio_features.cpu() - ) - eval_info[n]["all_text_features"].append( - text_features.cpu() - ) - if args.clap_mlploss: - eval_info[n]["all_audio_features_mlp"].append( - audio_features_mlp.cpu() - ) - eval_info[n]["all_text_features_mlp"].append( - text_features_mlp.cpu() - ) - else: - idx = np.where( - np.array( - [ - "-".join(b.split("/")[-3:-1]) - for b in batch["__url__"] - ] - ) - == n - )[0] - eval_info[n]["all_audio_features"].append( - audio_features.cpu().index_select( - 0, torch.tensor(idx).long() - ) - ) - eval_info[n]["all_text_features"].append( - text_features.cpu().index_select( - 0, torch.tensor(idx).long() - ) - ) - if args.clap_mlploss: - eval_info[n]["all_audio_features_mlp"].append( - audio_features_mlp.cpu().index_select( - 0, torch.tensor(idx).long() - ) - ) - eval_info[n]["all_text_features_mlp"].append( - text_features_mlp.cpu().index_select( - 0, torch.tensor(idx).long() - ) - ) - # print(f'eval step {i}') # (yusong): for debug - - # cumulative_loss += total_loss * batch_size - # num_samples += batch_size - if is_master(args) and (i % 100) == 0: # and i != 0: - logging.info( - f"Eval Epoch: {epoch} [{num_samples} / {samples_per_val}]" - ) - if is_master(args): - val_metrics_per_dataset = {} - for n in eval_info.keys(): - if args.clap_mlploss: - metrics_single_dataset = get_metrics( - audio_features=torch.cat( - eval_info[n]["all_audio_features"] - ), - text_features=torch.cat(eval_info[n]["all_text_features"]), - logit_scale_a=logit_scale_a.cpu(), - audio_features_mlp=torch.cat( - eval_info[n]["all_audio_features_mlp"] - ), - text_features_mlp=torch.cat( - eval_info[n]["all_text_features_mlp"] - ), - logit_scale_t=logit_scale_t.cpu(), - mlp_loss=args.clap_mlploss, - ) - else: - metrics_single_dataset = get_metrics( - audio_features=torch.cat( - eval_info[n]["all_audio_features"] - ), - text_features=torch.cat(eval_info[n]["all_text_features"]), - logit_scale_a=logit_scale_a.cpu(), - mlp_loss=args.clap_mlploss, - ) - val_metrics_per_dataset[n] = { - n + "/" + k: v for k, v in metrics_single_dataset.items() - } - metrics.update(val_metrics_per_dataset[n]) - if "epoch" not in metrics.keys(): - metrics.update({"epoch": epoch}) - if is_master(args): - if not metrics: - return metrics - - logging.info( - f"Eval Epoch: {epoch} " - + "\n".join( - [ - "\t".join([f"{k}: {round(v, 4):.4f}" for k, v in m.items()]) - for m in val_metrics_per_dataset.values() - ] - ) - ) - - if args.save_logs: - for name, val in metrics.items(): - if tb_writer is not None: - tb_writer.add_scalar(f"val/{name}", val, epoch) - - with open(os.path.join(args.checkpoint_path, "results.jsonl"), "a+") as f: - f.write(json.dumps(metrics)) - f.write("\n") - - if args.wandb: - assert wandb is not None, "Please install wandb." - for name, val in metrics.items(): - wandb.log({f"val/{name}": val, "epoch": epoch}) - - return metrics - else: - return metrics - - -def get_metrics( - audio_features, - text_features, - logit_scale_a, - audio_features_mlp=None, - text_features_mlp=None, - logit_scale_t=None, - mlp_loss=False, -): - metrics = {} - if mlp_loss: - # Set up audio to text & text to audio similary matrice - a_logits_per_audio = ( - (logit_scale_a * audio_features @ text_features_mlp.t()).detach().cpu() - ) - a_logits_per_text = a_logits_per_audio.t().detach().cpu() - t_logits_per_audio = ( - (logit_scale_t * audio_features_mlp @ text_features.t()).detach().cpu() - ) - t_logits_per_text = t_logits_per_audio.t().detach().cpu() - - labels = torch.arange(audio_features.shape[0]).long() - # Change the loss from two terms into four terms with 2x2 combined CE loss - total_loss = ( - F.cross_entropy(a_logits_per_audio, labels) - + F.cross_entropy(a_logits_per_text, labels) - + F.cross_entropy(t_logits_per_audio, labels) - + F.cross_entropy(t_logits_per_text, labels) - ) / 4 - - metrics[f"cumulative_loss"] = total_loss.item() - metrics[f"num_samples"] = audio_features.shape[0] - - logits = { - "audio_to_text": (a_logits_per_audio + t_logits_per_audio) / 2, - "text_to_audio": (a_logits_per_text + t_logits_per_text) / 2, - } - ground_truth = torch.arange(len(text_features)).view(-1, 1) - - else: - # print("text_features", text_features) - # print("text_features.shape", text_features.shape) - logits_per_audio = ( - (logit_scale_a * audio_features @ text_features.t()).detach().cpu() - ) - logits_per_text = logits_per_audio.t().detach().cpu() - - labels = torch.arange(audio_features.shape[0]).long() - # Change the loss from two terms into four terms with 2x2 combined CE loss - total_loss = ( - F.cross_entropy(logits_per_audio, labels) - + F.cross_entropy(logits_per_text, labels) - ) / 2 - - metrics[f"cumulative_loss"] = total_loss.item() - metrics[f"num_samples"] = audio_features.shape[0] - - logits = {"audio_to_text": logits_per_audio, "text_to_audio": logits_per_text} - - ground_truth = torch.arange(len(text_features)).view(-1, 1) - - for name, logit in logits.items(): - ranking = torch.argsort(logit, descending=True) - preds = torch.where(ranking == ground_truth)[ - 1 - ] # (yusong) this line is slow because it uses single thread - preds = preds.detach().cpu().numpy() - metrics[f"{name}_mean_rank"] = preds.mean() + 1 - metrics[f"{name}_median_rank"] = np.floor(np.median(preds)) + 1 - for k in [1, 5, 10]: - metrics[f"{name}_R@{k}"] = np.mean(preds < k) - # map@10 - metrics[f"{name}_mAP@10"] = np.mean(np.where(preds < 10, 1 / (preds + 1), 0.0)) - - return metrics - - -def evaluate_clotho_audiocaps( - model, data, epoch, args, autocast, device, tb_writer=None -): - """ - Adapted from https://github.com/XinhaoMei/audio-text_retrieval/blob/main/tools/utils.py. - 1. for text-to-audio retrieval, do 5 times and average the results - 2. for R@1, R@5, R@10 in audio-to-text retrieval, take the best rank among 5 text - 3. for map@10 in audio-to-text retrieval: - 3.1: sort the rank of 5 text - 3.2: exclude the rank >=10 (0-index) - 3.3: compute the map regarding the remaining ranks: np.mean(np.arange(1, len(ranks)+1) / ranks). - (3.3) That is, take the top ranks of 5 text that is < 10, and assign the descending number as ground truth. - (3.3) E.g.: the ground truth of first rank of the 5 text should be 1, the second rank should be 2, etc. - """ - # TODO: (yusong) only support single GPU evaluation and only support non-mlp case for now. - dataloader = data["val"].dataloader - with torch.no_grad(): - eval_info = {} - for i, batch in enumerate(dataloader): - audios = batch # contains mel_spec, wavform, and longer list - - # each item in the list has 5 texts - if args.tmodel == "transformer": - from open_clip import tokenize - - texts = [tokenize(t) for t in batch["full_text"]] - texts = torch.cat(texts) - else: - from .data import tokenizer - - texts = [ - tokenizer(t) for t in batch["full_text"] - ] # 5 texts for each audio - texts = { - k: torch.cat([t[k] for t in texts]) for k in texts[0].keys() - } # 5 x batch - - # audios = audios.to(device=device, non_blocking=True) - - all_names = list( - set(["-".join(b.split("/")[-3:-1]) for b in batch["__url__"]]) - ) - for name in all_names: - if name not in eval_info.keys(): - # we will not use mlp outputs even if args.clap_mlploss=True - eval_info[name] = { - "cumulative_loss": 0.0, - "num_samples": 0, - "all_audio_features": [], - "all_text_features": [], - } - with autocast(): - audio_features = model(audios, None, device) - text_features = model(None, texts, device) - audio_features = F.normalize(audio_features, dim=-1) - text_features = F.normalize(text_features, dim=-1) - - all_names = list( - set(["-".join(b.split("/")[-3:-1]) for b in batch["__url__"]]) - ) - for n in all_names: - idx = np.where( - np.array( - ["-".join(b.split("/")[-3:-1]) for b in batch["__url__"]] - ) - == n - )[0] - eval_info[n]["all_audio_features"].append( - audio_features.cpu().index_select(0, torch.tensor(idx).long()) - ) - # (yusong) please double-check. This is for selecting 5 text features at once. - # because idx is a list of indices in size of num_samples, - # and text_features is a tensor of size (5*num_samples, dim) - # so we need to select 5 consecutive indices at once for a single index in idx. - eval_info[n]["all_text_features"].append( - text_features.cpu() - .reshape([-1, 5, text_features.shape[1]]) - .index_select(0, torch.tensor(idx).long()) - .reshape([-1, text_features.shape[1]]) - ) - - val_metrics_all = {} - - for n in eval_info.keys(): - logit_scale_a, logit_scale_t = model(None, None, device) - logit_scale_a = logit_scale_a.cpu() - - audio_features = torch.cat(eval_info[n]["all_audio_features"], dim=0) - text_features = torch.cat(eval_info[n]["all_text_features"], dim=0) - - logits_per_audio = ( - (logit_scale_a * audio_features @ text_features.t()).detach().cpu() - ) - logits_per_text = logits_per_audio.t().detach().cpu() - - # logits_per_audio shape: [num_samples, num_samples*5] - # logits_per_text shape: [num_samples*5, num_samples] - - logging.info( - f"dataset {n}, logits_per_audio shape: {logits_per_audio.shape}, " - f"logits_per_text shape: {logits_per_text.shape}" - ) - - metrics = {} - num_samples = audio_features.shape[0] - metrics[f"num_samples"] = num_samples - - # (yusong) the following code is very important, please double-check: - # logits_per_audio.reshape(num_samples, num_samples, 5)[:, :, d] - # logits_per_text.reshape(num_samples, 5, num_samples)[:, d, :] - # Those two are retrieving one of the 5 text for each audio. - labels = torch.arange(audio_features.shape[0]).long() - audio_to_text_loss = [ - F.cross_entropy( - logits_per_audio.reshape(num_samples, num_samples, 5)[:, :, d], - labels, - ) - for d in range(5) - ] - text_to_audio_loss = [ - F.cross_entropy( - logits_per_text.reshape(num_samples, 5, num_samples)[:, d, :], - labels, - ) - for d in range(5) - ] - total_loss = (np.mean(audio_to_text_loss) + np.mean(text_to_audio_loss)) / 2 - - metrics[f"cumulative_loss"] = total_loss.item() - - # text to audio: do 5 times - pred_text = [] - for d in range(5): - logit = logits_per_text.reshape(num_samples, 5, num_samples)[:, d, :] - ground_truth = torch.arange(len(logit)).view(-1, 1) - ranking = torch.argsort( - logit, descending=True - ) # [num_samples, num_samples] - preds = torch.where(ranking == ground_truth)[1] - pred_text.append(preds.detach().cpu().numpy()) - pred_text_concat = np.concatenate(pred_text, axis=0) # [5*num_samples] - metrics[f"text_to_audio_mean_rank"] = pred_text_concat.mean() + 1 - metrics[f"text_to_audio_median_rank"] = ( - np.floor(np.median(pred_text_concat)) + 1 - ) - for k in [1, 5, 10]: - metrics[f"text_to_audio_R@{k}"] = np.mean(pred_text_concat < k) - # map@10 - metrics[f"text_to_audio_mAP@10"] = np.mean( - np.where(pred_text_concat < 10, 1 / (pred_text_concat + 1), 0.0) - ) - - # audio to text: take the best result - # for audio to text map 10, sort and assign descending ground truth. - # see https://github.com/XinhaoMei/audio-text_retrieval/blob/main/tools/utils.py#L103 - # map@10 - map_all = [] - pred_audio_all = [] - for d in range(num_samples): - # logits_per_audio: [num_samples, num_samples*5] - logit_single = logits_per_audio[d, :] # [5*num_samples] - # Ground-truth index: [d*5, d*5+1, d*5+2, d*5+3, d*5+4] - ranking = torch.argsort( - logit_single, descending=True - ) # [5*num_samples] - # ranking: the index of first match, second match, ... - ground_truth = torch.arange(d * 5, d * 5 + 5)[None] - all_pred = torch.where( - torch.stack([ranking] * 5) == ground_truth.view(-1, 1) - )[1] - min_pred = torch.min(all_pred) - pred_audio_all.append(min_pred.detach().cpu().numpy()) - all_pred_filter = all_pred[all_pred < 10].detach().cpu().numpy() - # /5 because we have 5 text, so it means for the text rank >=10 we count as 0. - map_single = ( - np.sum( - (np.arange(1, len(all_pred_filter) + 1) / (all_pred_filter + 1)) - ) - / 5 - ) - map_all.append(map_single) - metrics[f"audio_to_text_mAP@10"] = np.mean(map_all) - for k in [1, 5, 10]: - metrics[f"audio_to_text_R@{k}"] = np.mean(np.array(pred_audio_all) < k) - - val_metrics_all[n] = {n + "/" + k: v for k, v in metrics.items()} - return val_metrics_all - - -def calculate_selection_performance_clotho_audiocaps(val_metrics_per_dataset): - """ - Calculate performance for Clotho+AudioCaps for model selection. - """ - selection_performance_all = [] - for n in val_metrics_per_dataset.keys(): - selection_performance = ( - val_metrics_per_dataset[n][f"{n}/audio_to_text_mAP@10"] - + val_metrics_per_dataset[n][f"{n}/text_to_audio_mAP@10"] - ) / 2 - selection_performance_all.append(selection_performance) - return np.mean(selection_performance_all) - - -def select_top_metric_clotho_audiocaps(metrics, val_metrics_per_dataset, args): - # val_metrics_per_dataset: dict, key: dataset name, value: dict, key: metric name, value: metric value - # metrics: dict, key: metric name, value: metric value - # Hack: use args to save the top performance - if not hasattr(args, "top_selection_performance"): - selection_performance = calculate_selection_performance_clotho_audiocaps( - val_metrics_per_dataset - ) - # TODO: write the if and else together - metric_update = {} - for n in val_metrics_per_dataset.keys(): - for k in val_metrics_per_dataset[n].keys(): - metric_update[ - k.split("/")[0] + "-top" + "/" + k.split("/")[1] - ] = val_metrics_per_dataset[n][k] - metric_update["top_selection_performance"] = selection_performance - metric_update["top-selection-epoch"] = metrics["epoch"] - metrics.update(metric_update) - args.top_metric = metric_update - args.top_selection_performance = selection_performance - else: - selection_performance_new = calculate_selection_performance_clotho_audiocaps( - val_metrics_per_dataset - ) - selection_performance_old = args.top_selection_performance - if selection_performance_new > selection_performance_old: - metric_update = {} - for n in val_metrics_per_dataset.keys(): - for k in val_metrics_per_dataset[n].keys(): - metric_update[ - k.split("/")[0] + "-top" + "/" + k.split("/")[1] - ] = val_metrics_per_dataset[n][k] - metric_update["top_selection_performance"] = selection_performance_new - metric_update["top-selection-epoch"] = metrics["epoch"] - metrics.update(metric_update) - args.top_metric = metric_update - args.top_selection_performance = selection_performance_new - else: - metrics.update(args.top_metric) - return metrics diff --git a/spaces/AIWaves/Debate/src/agents/Memory/__init__.py b/spaces/AIWaves/Debate/src/agents/Memory/__init__.py deleted file mode 100644 index 56f3aa09d927077ebc7f1a925f956dee78cb1c26..0000000000000000000000000000000000000000 --- a/spaces/AIWaves/Debate/src/agents/Memory/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .base_Memory import Memory \ No newline at end of file diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/crowdhuman/__init__.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/crowdhuman/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/stores/pendingMessage.ts b/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/stores/pendingMessage.ts deleted file mode 100644 index f28d7aaf9995f9848f6c7988503c20a08d81d97c..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/stores/pendingMessage.ts +++ /dev/null @@ -1,3 +0,0 @@ -import { writable } from "svelte/store"; - -export const pendingMessage = writable(""); diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/spinner/Spinner.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/spinner/Spinner.js deleted file mode 100644 index 5227a4bd76d71bd9194cc5dee0b2c7d895f9f9f7..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/spinner/Spinner.js +++ /dev/null @@ -1,34 +0,0 @@ -import Base from '../base/Base.js'; -import { Arc } from '../utils/Geoms.js' -import Yoyo from '../utils/Yoyo.js'; - -class Spinner extends Base { - constructor(scene, config) { - super(scene, config); - this.type = 'rexSpinnerSpinner'; - } - - buildShapes() { - this.addShape((new Arc()).setName('arc')); - } - - updateShapes() { - var centerX = this.centerX; - var centerY = this.centerY; - var radius = this.radius; - var lineWidth = Math.ceil(radius / 10); - var maxRadius = radius - lineWidth; - - var endAngle = this.value * 720; - var arcAngle = Yoyo(this.value) * 180; - var startAngle = endAngle - arcAngle; - this.getShape('arc') - .lineStyle(lineWidth, this.color, 1) - .setRadius(maxRadius) - .setCenterPosition(centerX, centerY) - .setAngle(startAngle + 315, endAngle + 315); - - } -} - -export default Spinner; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/utils/CreateAnyImage.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/utils/CreateAnyImage.js deleted file mode 100644 index 6bb2965e32d9d63d6bae188a5a51732cafbb759c..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/utils/CreateAnyImage.js +++ /dev/null @@ -1,21 +0,0 @@ -import MergeStyle from './MergeStyle.js'; -import SetTextureProperties from './SetTextureProperties.js'; - -var CreateAnyImage = function (scene, data, view, styles, customBuilders, ImageClass) { - data = MergeStyle(data, styles); - var gameObject = new ImageClass(scene, 0, 0, data.key, data.frame); - - if (data.width !== undefined) { - gameObject.setDisplayWidth(data.width); - } - if (data.height !== undefined) { - gameObject.setDisplayHeight(data.height); - } - - SetTextureProperties(gameObject, data); - - scene.add.existing(gameObject); - return gameObject; -} - -export default CreateAnyImage; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/Factory.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/Factory.d.ts deleted file mode 100644 index 730ea4ad57ec339bd546514710a72a8fcaa29a19..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/Factory.d.ts +++ /dev/null @@ -1,23 +0,0 @@ -import Sizer from './Sizer'; - -export default function ( - config?: Sizer.IConfig -): Sizer; - -export default function ( - x: number, y: number, - config?: Sizer.IConfig -): Sizer; - -export default function ( - x: number, y: number, - width: number, height: number, - config?: Sizer.IConfig -): Sizer; - -export default function ( - x: number, y: number, - width: number, height: number, - orientation?: Sizer.OrientationTypes, - config?: Sizer.IConfig -): Sizer; \ No newline at end of file diff --git a/spaces/Aki004/herta-so-vits/modules/commons.py b/spaces/Aki004/herta-so-vits/modules/commons.py deleted file mode 100644 index 074888006392e956ce204d8368362dbb2cd4e304..0000000000000000000000000000000000000000 --- a/spaces/Aki004/herta-so-vits/modules/commons.py +++ /dev/null @@ -1,188 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -def slice_pitch_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - -def rand_slice_segments_with_pitch(x, pitch, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - ret_pitch = slice_pitch_segments(pitch, ids_str, segment_size) - return ret, ret_pitch, ids_str - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def rand_spec_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/Alpaca233/SadTalker/src/facerender/modules/generator.py b/spaces/Alpaca233/SadTalker/src/facerender/modules/generator.py deleted file mode 100644 index 5a9edcb3b328d3afc99072b2461d7ca69919f813..0000000000000000000000000000000000000000 --- a/spaces/Alpaca233/SadTalker/src/facerender/modules/generator.py +++ /dev/null @@ -1,255 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F -from src.facerender.modules.util import ResBlock2d, SameBlock2d, UpBlock2d, DownBlock2d, ResBlock3d, SPADEResnetBlock -from src.facerender.modules.dense_motion import DenseMotionNetwork - - -class OcclusionAwareGenerator(nn.Module): - """ - Generator follows NVIDIA architecture. - """ - - def __init__(self, image_channel, feature_channel, num_kp, block_expansion, max_features, num_down_blocks, reshape_channel, reshape_depth, - num_resblocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False): - super(OcclusionAwareGenerator, self).__init__() - - if dense_motion_params is not None: - self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, feature_channel=feature_channel, - estimate_occlusion_map=estimate_occlusion_map, - **dense_motion_params) - else: - self.dense_motion_network = None - - self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(7, 7), padding=(3, 3)) - - down_blocks = [] - for i in range(num_down_blocks): - in_features = min(max_features, block_expansion * (2 ** i)) - out_features = min(max_features, block_expansion * (2 ** (i + 1))) - down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1))) - self.down_blocks = nn.ModuleList(down_blocks) - - self.second = nn.Conv2d(in_channels=out_features, out_channels=max_features, kernel_size=1, stride=1) - - self.reshape_channel = reshape_channel - self.reshape_depth = reshape_depth - - self.resblocks_3d = torch.nn.Sequential() - for i in range(num_resblocks): - self.resblocks_3d.add_module('3dr' + str(i), ResBlock3d(reshape_channel, kernel_size=3, padding=1)) - - out_features = block_expansion * (2 ** (num_down_blocks)) - self.third = SameBlock2d(max_features, out_features, kernel_size=(3, 3), padding=(1, 1), lrelu=True) - self.fourth = nn.Conv2d(in_channels=out_features, out_channels=out_features, kernel_size=1, stride=1) - - self.resblocks_2d = torch.nn.Sequential() - for i in range(num_resblocks): - self.resblocks_2d.add_module('2dr' + str(i), ResBlock2d(out_features, kernel_size=3, padding=1)) - - up_blocks = [] - for i in range(num_down_blocks): - in_features = max(block_expansion, block_expansion * (2 ** (num_down_blocks - i))) - out_features = max(block_expansion, block_expansion * (2 ** (num_down_blocks - i - 1))) - up_blocks.append(UpBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1))) - self.up_blocks = nn.ModuleList(up_blocks) - - self.final = nn.Conv2d(block_expansion, image_channel, kernel_size=(7, 7), padding=(3, 3)) - self.estimate_occlusion_map = estimate_occlusion_map - self.image_channel = image_channel - - def deform_input(self, inp, deformation): - _, d_old, h_old, w_old, _ = deformation.shape - _, _, d, h, w = inp.shape - if d_old != d or h_old != h or w_old != w: - deformation = deformation.permute(0, 4, 1, 2, 3) - deformation = F.interpolate(deformation, size=(d, h, w), mode='trilinear') - deformation = deformation.permute(0, 2, 3, 4, 1) - return F.grid_sample(inp, deformation) - - def forward(self, source_image, kp_driving, kp_source): - # Encoding (downsampling) part - out = self.first(source_image) - for i in range(len(self.down_blocks)): - out = self.down_blocks[i](out) - out = self.second(out) - bs, c, h, w = out.shape - # print(out.shape) - feature_3d = out.view(bs, self.reshape_channel, self.reshape_depth, h ,w) - feature_3d = self.resblocks_3d(feature_3d) - - # Transforming feature representation according to deformation and occlusion - output_dict = {} - if self.dense_motion_network is not None: - dense_motion = self.dense_motion_network(feature=feature_3d, kp_driving=kp_driving, - kp_source=kp_source) - output_dict['mask'] = dense_motion['mask'] - - if 'occlusion_map' in dense_motion: - occlusion_map = dense_motion['occlusion_map'] - output_dict['occlusion_map'] = occlusion_map - else: - occlusion_map = None - deformation = dense_motion['deformation'] - out = self.deform_input(feature_3d, deformation) - - bs, c, d, h, w = out.shape - out = out.view(bs, c*d, h, w) - out = self.third(out) - out = self.fourth(out) - - if occlusion_map is not None: - if out.shape[2] != occlusion_map.shape[2] or out.shape[3] != occlusion_map.shape[3]: - occlusion_map = F.interpolate(occlusion_map, size=out.shape[2:], mode='bilinear') - out = out * occlusion_map - - # output_dict["deformed"] = self.deform_input(source_image, deformation) # 3d deformation cannot deform 2d image - - # Decoding part - out = self.resblocks_2d(out) - for i in range(len(self.up_blocks)): - out = self.up_blocks[i](out) - out = self.final(out) - out = F.sigmoid(out) - - output_dict["prediction"] = out - - return output_dict - - -class SPADEDecoder(nn.Module): - def __init__(self): - super().__init__() - ic = 256 - oc = 64 - norm_G = 'spadespectralinstance' - label_nc = 256 - - self.fc = nn.Conv2d(ic, 2 * ic, 3, padding=1) - self.G_middle_0 = SPADEResnetBlock(2 * ic, 2 * ic, norm_G, label_nc) - self.G_middle_1 = SPADEResnetBlock(2 * ic, 2 * ic, norm_G, label_nc) - self.G_middle_2 = SPADEResnetBlock(2 * ic, 2 * ic, norm_G, label_nc) - self.G_middle_3 = SPADEResnetBlock(2 * ic, 2 * ic, norm_G, label_nc) - self.G_middle_4 = SPADEResnetBlock(2 * ic, 2 * ic, norm_G, label_nc) - self.G_middle_5 = SPADEResnetBlock(2 * ic, 2 * ic, norm_G, label_nc) - self.up_0 = SPADEResnetBlock(2 * ic, ic, norm_G, label_nc) - self.up_1 = SPADEResnetBlock(ic, oc, norm_G, label_nc) - self.conv_img = nn.Conv2d(oc, 3, 3, padding=1) - self.up = nn.Upsample(scale_factor=2) - - def forward(self, feature): - seg = feature - x = self.fc(feature) - x = self.G_middle_0(x, seg) - x = self.G_middle_1(x, seg) - x = self.G_middle_2(x, seg) - x = self.G_middle_3(x, seg) - x = self.G_middle_4(x, seg) - x = self.G_middle_5(x, seg) - x = self.up(x) - x = self.up_0(x, seg) # 256, 128, 128 - x = self.up(x) - x = self.up_1(x, seg) # 64, 256, 256 - - x = self.conv_img(F.leaky_relu(x, 2e-1)) - # x = torch.tanh(x) - x = F.sigmoid(x) - - return x - - -class OcclusionAwareSPADEGenerator(nn.Module): - - def __init__(self, image_channel, feature_channel, num_kp, block_expansion, max_features, num_down_blocks, reshape_channel, reshape_depth, - num_resblocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False): - super(OcclusionAwareSPADEGenerator, self).__init__() - - if dense_motion_params is not None: - self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, feature_channel=feature_channel, - estimate_occlusion_map=estimate_occlusion_map, - **dense_motion_params) - else: - self.dense_motion_network = None - - self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(3, 3), padding=(1, 1)) - - down_blocks = [] - for i in range(num_down_blocks): - in_features = min(max_features, block_expansion * (2 ** i)) - out_features = min(max_features, block_expansion * (2 ** (i + 1))) - down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1))) - self.down_blocks = nn.ModuleList(down_blocks) - - self.second = nn.Conv2d(in_channels=out_features, out_channels=max_features, kernel_size=1, stride=1) - - self.reshape_channel = reshape_channel - self.reshape_depth = reshape_depth - - self.resblocks_3d = torch.nn.Sequential() - for i in range(num_resblocks): - self.resblocks_3d.add_module('3dr' + str(i), ResBlock3d(reshape_channel, kernel_size=3, padding=1)) - - out_features = block_expansion * (2 ** (num_down_blocks)) - self.third = SameBlock2d(max_features, out_features, kernel_size=(3, 3), padding=(1, 1), lrelu=True) - self.fourth = nn.Conv2d(in_channels=out_features, out_channels=out_features, kernel_size=1, stride=1) - - self.estimate_occlusion_map = estimate_occlusion_map - self.image_channel = image_channel - - self.decoder = SPADEDecoder() - - def deform_input(self, inp, deformation): - _, d_old, h_old, w_old, _ = deformation.shape - _, _, d, h, w = inp.shape - if d_old != d or h_old != h or w_old != w: - deformation = deformation.permute(0, 4, 1, 2, 3) - deformation = F.interpolate(deformation, size=(d, h, w), mode='trilinear') - deformation = deformation.permute(0, 2, 3, 4, 1) - return F.grid_sample(inp, deformation) - - def forward(self, source_image, kp_driving, kp_source): - # Encoding (downsampling) part - out = self.first(source_image) - for i in range(len(self.down_blocks)): - out = self.down_blocks[i](out) - out = self.second(out) - bs, c, h, w = out.shape - # print(out.shape) - feature_3d = out.view(bs, self.reshape_channel, self.reshape_depth, h ,w) - feature_3d = self.resblocks_3d(feature_3d) - - # Transforming feature representation according to deformation and occlusion - output_dict = {} - if self.dense_motion_network is not None: - dense_motion = self.dense_motion_network(feature=feature_3d, kp_driving=kp_driving, - kp_source=kp_source) - output_dict['mask'] = dense_motion['mask'] - - # import pdb; pdb.set_trace() - - if 'occlusion_map' in dense_motion: - occlusion_map = dense_motion['occlusion_map'] - output_dict['occlusion_map'] = occlusion_map - else: - occlusion_map = None - deformation = dense_motion['deformation'] - out = self.deform_input(feature_3d, deformation) - - bs, c, d, h, w = out.shape - out = out.view(bs, c*d, h, w) - out = self.third(out) - out = self.fourth(out) - - # occlusion_map = torch.where(occlusion_map < 0.95, 0, occlusion_map) - - if occlusion_map is not None: - if out.shape[2] != occlusion_map.shape[2] or out.shape[3] != occlusion_map.shape[3]: - occlusion_map = F.interpolate(occlusion_map, size=out.shape[2:], mode='bilinear') - out = out * occlusion_map - - # Decoding part - out = self.decoder(out) - - output_dict["prediction"] = out - - return output_dict \ No newline at end of file diff --git a/spaces/Altinas/vits-uma-genshin-honkais/models.py b/spaces/Altinas/vits-uma-genshin-honkais/models.py deleted file mode 100644 index 52e15d1b9775038fd6e82b2efe6f95f51c66802d..0000000000000000000000000000000000000000 --- a/spaces/Altinas/vits-uma-genshin-honkais/models.py +++ /dev/null @@ -1,534 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - self.emb = nn.Embedding(n_vocab, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths): - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - device = next(self.parameters()).device # 获取模型所在的设备 - x, m_p, logs_p, x_mask = self.enc_p(x.to(device), x_lengths.to(device)) - if self.n_speakers > 0: - g = self.emb_g(sid.to(device)).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:,:,:max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) - diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/training/adapt_a_model.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/training/adapt_a_model.md deleted file mode 100644 index 2b035a449c1d1119b48774949c2cfd330e1d77c9..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/training/adapt_a_model.md +++ /dev/null @@ -1,54 +0,0 @@ - - -# 새로운 작업에 대한 모델을 적용하기 - -많은 diffusion 시스템은 같은 구성 요소들을 공유하므로 한 작업에 대해 사전학습된 모델을 완전히 다른 작업에 적용할 수 있습니다. - -이 인페인팅을 위한 가이드는 사전학습된 [`UNet2DConditionModel`]의 아키텍처를 초기화하고 수정하여 사전학습된 text-to-image 모델을 어떻게 인페인팅에 적용하는지를 알려줄 것입니다. - -## UNet2DConditionModel 파라미터 구성 - -[`UNet2DConditionModel`]은 [input sample](https://huggingface.co/docs/diffusers/v0.16.0/en/api/models#diffusers.UNet2DConditionModel.in_channels)에서 4개의 채널을 기본적으로 허용합니다. 예를 들어, [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)와 같은 사전학습된 text-to-image 모델을 불러오고 `in_channels`의 수를 확인합니다: - -```py -from diffusers import StableDiffusionPipeline - -pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") -pipeline.unet.config["in_channels"] -4 -``` - -인페인팅은 입력 샘플에 9개의 채널이 필요합니다. [`runwayml/stable-diffusion-inpainting`](https://huggingface.co/runwayml/stable-diffusion-inpainting)와 같은 사전학습된 인페인팅 모델에서 이 값을 확인할 수 있습니다: - -```py -from diffusers import StableDiffusionPipeline - -pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") -pipeline.unet.config["in_channels"] -9 -``` - -인페인팅에 대한 text-to-image 모델을 적용하기 위해, `in_channels` 수를 4에서 9로 수정해야 할 것입니다. - -사전학습된 text-to-image 모델의 가중치와 [`UNet2DConditionModel`]을 초기화하고 `in_channels`를 9로 수정해 주세요. `in_channels`의 수를 수정하면 크기가 달라지기 때문에 크기가 안 맞는 오류를 피하기 위해 `ignore_mismatched_sizes=True` 및 `low_cpu_mem_usage=False`를 설정해야 합니다. - -```py -from diffusers import UNet2DConditionModel - -model_id = "runwayml/stable-diffusion-v1-5" -unet = UNet2DConditionModel.from_pretrained( - model_id, subfolder="unet", in_channels=9, low_cpu_mem_usage=False, ignore_mismatched_sizes=True -) -``` - -Text-to-image 모델로부터 다른 구성 요소의 사전학습된 가중치는 체크포인트로부터 초기화되지만 `unet`의 입력 채널 가중치 (`conv_in.weight`)는 랜덤하게 초기화됩니다. 그렇지 않으면 모델이 노이즈를 리턴하기 때문에 인페인팅의 모델을 파인튜닝 할 때 중요합니다. diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/modeling_flax_pytorch_utils.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/modeling_flax_pytorch_utils.py deleted file mode 100644 index f9de83f87dab84d2e7fdd77b835db787cb4f1cb6..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/modeling_flax_pytorch_utils.py +++ /dev/null @@ -1,118 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" PyTorch - Flax general utilities.""" -import re - -import jax.numpy as jnp -from flax.traverse_util import flatten_dict, unflatten_dict -from jax.random import PRNGKey - -from ..utils import logging - - -logger = logging.get_logger(__name__) - - -def rename_key(key): - regex = r"\w+[.]\d+" - pats = re.findall(regex, key) - for pat in pats: - key = key.replace(pat, "_".join(pat.split("."))) - return key - - -##################### -# PyTorch => Flax # -##################### - - -# Adapted from https://github.com/huggingface/transformers/blob/c603c80f46881ae18b2ca50770ef65fa4033eacd/src/transformers/modeling_flax_pytorch_utils.py#L69 -# and https://github.com/patil-suraj/stable-diffusion-jax/blob/main/stable_diffusion_jax/convert_diffusers_to_jax.py -def rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict): - """Rename PT weight names to corresponding Flax weight names and reshape tensor if necessary""" - - # conv norm or layer norm - renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) - if ( - any("norm" in str_ for str_ in pt_tuple_key) - and (pt_tuple_key[-1] == "bias") - and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) - and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) - ): - renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) - return renamed_pt_tuple_key, pt_tensor - elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: - renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) - return renamed_pt_tuple_key, pt_tensor - - # embedding - if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: - pt_tuple_key = pt_tuple_key[:-1] + ("embedding",) - return renamed_pt_tuple_key, pt_tensor - - # conv layer - renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",) - if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: - pt_tensor = pt_tensor.transpose(2, 3, 1, 0) - return renamed_pt_tuple_key, pt_tensor - - # linear layer - renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",) - if pt_tuple_key[-1] == "weight": - pt_tensor = pt_tensor.T - return renamed_pt_tuple_key, pt_tensor - - # old PyTorch layer norm weight - renamed_pt_tuple_key = pt_tuple_key[:-1] + ("weight",) - if pt_tuple_key[-1] == "gamma": - return renamed_pt_tuple_key, pt_tensor - - # old PyTorch layer norm bias - renamed_pt_tuple_key = pt_tuple_key[:-1] + ("bias",) - if pt_tuple_key[-1] == "beta": - return renamed_pt_tuple_key, pt_tensor - - return pt_tuple_key, pt_tensor - - -def convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model, init_key=42): - # Step 1: Convert pytorch tensor to numpy - pt_state_dict = {k: v.numpy() for k, v in pt_state_dict.items()} - - # Step 2: Since the model is stateless, get random Flax params - random_flax_params = flax_model.init_weights(PRNGKey(init_key)) - - random_flax_state_dict = flatten_dict(random_flax_params) - flax_state_dict = {} - - # Need to change some parameters name to match Flax names - for pt_key, pt_tensor in pt_state_dict.items(): - renamed_pt_key = rename_key(pt_key) - pt_tuple_key = tuple(renamed_pt_key.split(".")) - - # Correctly rename weight parameters - flax_key, flax_tensor = rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict) - - if flax_key in random_flax_state_dict: - if flax_tensor.shape != random_flax_state_dict[flax_key].shape: - raise ValueError( - f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " - f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." - ) - - # also add unexpected weight so that warning is thrown - flax_state_dict[flax_key] = jnp.asarray(flax_tensor) - - return unflatten_dict(flax_state_dict) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py deleted file mode 100644 index c8dbb7321043ff4b88dc04a012a570dd88663243..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py +++ /dev/null @@ -1,738 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -import warnings -from typing import Any, Callable, Dict, List, Optional, Union - -import numpy as np -import PIL -import torch -from packaging import version -from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer - -from ...configuration_utils import FrozenDict -from ...image_processor import VaeImageProcessor -from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( - PIL_INTERPOLATION, - deprecate, - is_accelerate_available, - is_accelerate_version, - logging, - randn_tensor, -) -from ..pipeline_utils import DiffusionPipeline -from . import StableDiffusionPipelineOutput -from .safety_checker import StableDiffusionSafetyChecker - - -logger = logging.get_logger(__name__) - - -def preprocess_image(image, batch_size): - w, h = image.size - w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 - image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) - image = np.array(image).astype(np.float32) / 255.0 - image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size) - image = torch.from_numpy(image) - return 2.0 * image - 1.0 - - -def preprocess_mask(mask, batch_size, scale_factor=8): - if not isinstance(mask, torch.FloatTensor): - mask = mask.convert("L") - w, h = mask.size - w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 - mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"]) - mask = np.array(mask).astype(np.float32) / 255.0 - mask = np.tile(mask, (4, 1, 1)) - mask = np.vstack([mask[None]] * batch_size) - mask = 1 - mask # repaint white, keep black - mask = torch.from_numpy(mask) - return mask - - else: - valid_mask_channel_sizes = [1, 3] - # if mask channel is fourth tensor dimension, permute dimensions to pytorch standard (B, C, H, W) - if mask.shape[3] in valid_mask_channel_sizes: - mask = mask.permute(0, 3, 1, 2) - elif mask.shape[1] not in valid_mask_channel_sizes: - raise ValueError( - f"Mask channel dimension of size in {valid_mask_channel_sizes} should be second or fourth dimension," - f" but received mask of shape {tuple(mask.shape)}" - ) - # (potentially) reduce mask channel dimension from 3 to 1 for broadcasting to latent shape - mask = mask.mean(dim=1, keepdim=True) - h, w = mask.shape[-2:] - h, w = (x - x % 8 for x in (h, w)) # resize to integer multiple of 8 - mask = torch.nn.functional.interpolate(mask, (h // scale_factor, w // scale_factor)) - return mask - - -class StableDiffusionInpaintPipelineLegacy( - DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin -): - r""" - Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - In addition the pipeline inherits the following loading methods: - - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`] - - *LoRA*: [`loaders.LoraLoaderMixin.load_lora_weights`] - - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`] - - as well as the following saving methods: - - *LoRA*: [`loaders.LoraLoaderMixin.save_lora_weights`] - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. - text_encoder ([`CLIPTextModel`]): - Frozen text-encoder. Stable Diffusion uses the text portion of - [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically - the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. - tokenizer (`CLIPTokenizer`): - Tokenizer of class - [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). - unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - safety_checker ([`StableDiffusionSafetyChecker`]): - Classification module that estimates whether generated images could be considered offensive or harmful. - Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. - feature_extractor ([`CLIPImageProcessor`]): - Model that extracts features from generated images to be used as inputs for the `safety_checker`. - """ - _optional_components = ["feature_extractor"] - - def __init__( - self, - vae: AutoencoderKL, - text_encoder: CLIPTextModel, - tokenizer: CLIPTokenizer, - unet: UNet2DConditionModel, - scheduler: KarrasDiffusionSchedulers, - safety_checker: StableDiffusionSafetyChecker, - feature_extractor: CLIPImageProcessor, - requires_safety_checker: bool = True, - ): - super().__init__() - - deprecation_message = ( - f"The class {self.__class__} is deprecated and will be removed in v1.0.0. You can achieve exactly the same functionality" - "by loading your model into `StableDiffusionInpaintPipeline` instead. See https://github.com/huggingface/diffusers/pull/3533" - "for more information." - ) - deprecate("legacy is outdated", "1.0.0", deprecation_message, standard_warn=False) - - if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: - deprecation_message = ( - f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" - f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " - "to update the config accordingly as leaving `steps_offset` might led to incorrect results" - " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," - " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" - " file" - ) - deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(scheduler.config) - new_config["steps_offset"] = 1 - scheduler._internal_dict = FrozenDict(new_config) - - if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: - deprecation_message = ( - f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." - " `clip_sample` should be set to False in the configuration file. Please make sure to update the" - " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" - " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" - " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" - ) - deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(scheduler.config) - new_config["clip_sample"] = False - scheduler._internal_dict = FrozenDict(new_config) - - if safety_checker is None and requires_safety_checker: - logger.warning( - f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" - " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" - " results in services or applications open to the public. Both the diffusers team and Hugging Face" - " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" - " it only for use-cases that involve analyzing network behavior or auditing its results. For more" - " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." - ) - - if safety_checker is not None and feature_extractor is None: - raise ValueError( - "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" - " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." - ) - - is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( - version.parse(unet.config._diffusers_version).base_version - ) < version.parse("0.9.0.dev0") - is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 - if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: - deprecation_message = ( - "The configuration file of the unet has set the default `sample_size` to smaller than" - " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" - " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" - " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" - " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" - " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" - " in the config might lead to incorrect results in future versions. If you have downloaded this" - " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" - " the `unet/config.json` file" - ) - deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(unet.config) - new_config["sample_size"] = 64 - unet._internal_dict = FrozenDict(new_config) - - self.register_modules( - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) - self.register_to_config(requires_safety_checker=requires_safety_checker) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload - def enable_model_cpu_offload(self, gpu_id=0): - r""" - Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a - time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs. - Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the - iterative execution of the `unet`. - """ - if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): - from accelerate import cpu_offload_with_hook - else: - raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") - - device = torch.device(f"cuda:{gpu_id}") - - if self.device.type != "cpu": - self.to("cpu", silence_dtype_warnings=True) - torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) - - hook = None - for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]: - _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) - - if self.safety_checker is not None: - _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook) - - # We'll offload the last model manually. - self.final_offload_hook = hook - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt - def _encode_prompt( - self, - prompt, - device, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt=None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - lora_scale: Optional[float] = None, - ): - r""" - Encodes the prompt into text encoder hidden states. - - Args: - prompt (`str` or `List[str]`, *optional*): - prompt to be encoded - device: (`torch.device`): - torch device - num_images_per_prompt (`int`): - number of images that should be generated per prompt - do_classifier_free_guidance (`bool`): - whether to use classifier free guidance or not - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is - less than `1`). - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - lora_scale (`float`, *optional*): - A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. - """ - # set lora scale so that monkey patched LoRA - # function of text encoder can correctly access it - if lora_scale is not None and isinstance(self, LoraLoaderMixin): - self._lora_scale = lora_scale - - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - if prompt_embeds is None: - # textual inversion: procecss multi-vector tokens if necessary - if isinstance(self, TextualInversionLoaderMixin): - prompt = self.maybe_convert_prompt(prompt, self.tokenizer) - - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - - if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( - text_input_ids, untruncated_ids - ): - removed_text = self.tokenizer.batch_decode( - untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] - ) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = text_inputs.attention_mask.to(device) - else: - attention_mask = None - - prompt_embeds = self.text_encoder( - text_input_ids.to(device), - attention_mask=attention_mask, - ) - prompt_embeds = prompt_embeds[0] - - prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) - - bs_embed, seq_len, _ = prompt_embeds.shape - # duplicate text embeddings for each generation per prompt, using mps friendly method - prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) - prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) - - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance and negative_prompt_embeds is None: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] * batch_size - elif prompt is not None and type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - elif isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - uncond_tokens = negative_prompt - - # textual inversion: procecss multi-vector tokens if necessary - if isinstance(self, TextualInversionLoaderMixin): - uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) - - max_length = prompt_embeds.shape[1] - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=max_length, - truncation=True, - return_tensors="pt", - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = uncond_input.attention_mask.to(device) - else: - attention_mask = None - - negative_prompt_embeds = self.text_encoder( - uncond_input.input_ids.to(device), - attention_mask=attention_mask, - ) - negative_prompt_embeds = negative_prompt_embeds[0] - - if do_classifier_free_guidance: - # duplicate unconditional embeddings for each generation per prompt, using mps friendly method - seq_len = negative_prompt_embeds.shape[1] - - negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) - - negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) - negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) - - return prompt_embeds - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker - def run_safety_checker(self, image, device, dtype): - if self.safety_checker is None: - has_nsfw_concept = None - else: - if torch.is_tensor(image): - feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") - else: - feature_extractor_input = self.image_processor.numpy_to_pil(image) - safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) - image, has_nsfw_concept = self.safety_checker( - images=image, clip_input=safety_checker_input.pixel_values.to(dtype) - ) - return image, has_nsfw_concept - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents - def decode_latents(self, latents): - warnings.warn( - "The decode_latents method is deprecated and will be removed in a future version. Please" - " use VaeImageProcessor instead", - FutureWarning, - ) - latents = 1 / self.vae.config.scaling_factor * latents - image = self.vae.decode(latents, return_dict=False)[0] - image = (image / 2 + 0.5).clamp(0, 1) - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 - image = image.cpu().permute(0, 2, 3, 1).float().numpy() - return image - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.check_inputs - def check_inputs( - self, prompt, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None - ): - if strength < 0 or strength > 1: - raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") - - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - if prompt is not None and prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" - " only forward one of the two." - ) - elif prompt is None and prompt_embeds is None: - raise ValueError( - "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." - ) - elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if negative_prompt is not None and negative_prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" - f" {negative_prompt_embeds}. Please make sure to only forward one of the two." - ) - - if prompt_embeds is not None and negative_prompt_embeds is not None: - if prompt_embeds.shape != negative_prompt_embeds.shape: - raise ValueError( - "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" - f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" - f" {negative_prompt_embeds.shape}." - ) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps - def get_timesteps(self, num_inference_steps, strength, device): - # get the original timestep using init_timestep - init_timestep = min(int(num_inference_steps * strength), num_inference_steps) - - t_start = max(num_inference_steps - init_timestep, 0) - timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] - - return timesteps, num_inference_steps - t_start - - def prepare_latents(self, image, timestep, num_images_per_prompt, dtype, device, generator): - image = image.to(device=device, dtype=dtype) - init_latent_dist = self.vae.encode(image).latent_dist - init_latents = init_latent_dist.sample(generator=generator) - init_latents = self.vae.config.scaling_factor * init_latents - - # Expand init_latents for batch_size and num_images_per_prompt - init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0) - init_latents_orig = init_latents - - # add noise to latents using the timesteps - noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype) - init_latents = self.scheduler.add_noise(init_latents, noise, timestep) - latents = init_latents - return latents, init_latents_orig, noise - - @torch.no_grad() - def __call__( - self, - prompt: Union[str, List[str]] = None, - image: Union[torch.FloatTensor, PIL.Image.Image] = None, - mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None, - strength: float = 0.8, - num_inference_steps: Optional[int] = 50, - guidance_scale: Optional[float] = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - add_predicted_noise: Optional[bool] = False, - eta: Optional[float] = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: int = 1, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - ): - r""" - Function invoked when calling the pipeline for generation. - - Args: - prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. - instead. - image (`torch.FloatTensor` or `PIL.Image.Image`): - `Image`, or tensor representing an image batch, that will be used as the starting point for the - process. This is the image whose masked region will be inpainted. - mask_image (`torch.FloatTensor` or `PIL.Image.Image`): - `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be - replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a - PIL image, it will be converted to a single channel (luminance) before use. If mask is a tensor, the - expected shape should be either `(B, H, W, C)` or `(B, C, H, W)`, where C is 1 or 3. - strength (`float`, *optional*, defaults to 0.8): - Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength` - is 1, the denoising process will be run on the masked area for the full number of iterations specified - in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more noise to - that region the larger the `strength`. If `strength` is 0, no inpainting will occur. - num_inference_steps (`int`, *optional*, defaults to 50): - The reference number of denoising steps. More denoising steps usually lead to a higher quality image at - the expense of slower inference. This parameter will be modulated by `strength`, as explained above. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` - is less than `1`). - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - add_predicted_noise (`bool`, *optional*, defaults to True): - Use predicted noise instead of random noise when constructing noisy versions of the original image in - the reverse diffusion process - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`torch.Generator`, *optional*): - One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) - to make generation deterministic. - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - cross_attention_kwargs (`dict`, *optional*): - A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under - `self.processor` in - [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). - - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. - When returning a tuple, the first element is a list with the generated images, and the second element is a - list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, according to the `safety_checker`. - """ - # 1. Check inputs - self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) - - # 2. Define call parameters - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - device = self._execution_device - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - # 3. Encode input prompt - text_encoder_lora_scale = ( - cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None - ) - prompt_embeds = self._encode_prompt( - prompt, - device, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt, - prompt_embeds=prompt_embeds, - negative_prompt_embeds=negative_prompt_embeds, - lora_scale=text_encoder_lora_scale, - ) - - # 4. Preprocess image and mask - if not isinstance(image, torch.FloatTensor): - image = preprocess_image(image, batch_size) - - mask_image = preprocess_mask(mask_image, batch_size, self.vae_scale_factor) - - # 5. set timesteps - self.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) - latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) - - # 6. Prepare latent variables - # encode the init image into latents and scale the latents - latents, init_latents_orig, noise = self.prepare_latents( - image, latent_timestep, num_images_per_prompt, prompt_embeds.dtype, device, generator - ) - - # 7. Prepare mask latent - mask = mask_image.to(device=device, dtype=latents.dtype) - mask = torch.cat([mask] * num_images_per_prompt) - - # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # 9. Denoising loop - num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order - with self.progress_bar(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # predict the noise residual - noise_pred = self.unet( - latent_model_input, - t, - encoder_hidden_states=prompt_embeds, - cross_attention_kwargs=cross_attention_kwargs, - return_dict=False, - )[0] - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] - # masking - if add_predicted_noise: - init_latents_proper = self.scheduler.add_noise( - init_latents_orig, noise_pred_uncond, torch.tensor([t]) - ) - else: - init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t])) - - latents = (init_latents_proper * mask) + (latents * (1 - mask)) - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - # use original latents corresponding to unmasked portions of the image - latents = (init_latents_orig * mask) + (latents * (1 - mask)) - - if not output_type == "latent": - image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] - image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) - else: - image = latents - has_nsfw_concept = None - - if has_nsfw_concept is None: - do_denormalize = [True] * image.shape[0] - else: - do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] - - image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) - - # Offload last model to CPU - if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: - self.final_offload_hook.offload() - - if not return_dict: - return (image, has_nsfw_concept) - - return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r101-d8_769x769_40k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r101-d8_769x769_40k_cityscapes.py deleted file mode 100644 index 43f05fab05ee4e20c3509a923118fe9818543cbd..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r101-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ccnet_r50-d8_769x769_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/dmnet/dmnet_r101-d8_769x769_40k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/dmnet/dmnet_r101-d8_769x769_40k_cityscapes.py deleted file mode 100644 index 6b222e730073dd42df618db5660ee9d4117f3956..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/dmnet/dmnet_r101-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './dmnet_r50-d8_769x769_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 946bf4fc84236942a4462c2daa7637cace4e90cf..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './pspnet_r50-d8_512x1024_80k_cityscapes.py' -model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py deleted file mode 100644 index 336c7b254fe392b4703039fec86a83acdbd2e1a5..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py +++ /dev/null @@ -1,35 +0,0 @@ -_base_ = './cityscapes.py' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -crop_size = (769, 769) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)), - dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', prob=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2049, 1025), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/spaces/AriaMei/TTSdemo/text/__init__.py b/spaces/AriaMei/TTSdemo/text/__init__.py deleted file mode 100644 index 48ae82f3e40ecd1bf17a7de78d87790327af3362..0000000000000000000000000000000000000000 --- a/spaces/AriaMei/TTSdemo/text/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners -from text.symbols import symbols - - -# Mappings from symbol to numeric ID and vice versa: -_symbol_to_id = {s: i for i, s in enumerate(symbols)} -_id_to_symbol = {i: s for i, s in enumerate(symbols)} - - -def text_to_sequence(text, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence - - -def cleaned_text_to_sequence(cleaned_text): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()] - return sequence - - -def sequence_to_text(sequence): - '''Converts a sequence of IDs back to a string''' - result = '' - for symbol_id in sequence: - s = _id_to_symbol[symbol_id] - result += s - return result - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/index/sources.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/index/sources.py deleted file mode 100644 index cd9cb8d40f135d1da7d2517630816605a0805fe7..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/index/sources.py +++ /dev/null @@ -1,223 +0,0 @@ -import logging -import mimetypes -import os -import pathlib -from typing import Callable, Iterable, Optional, Tuple - -from pip._internal.models.candidate import InstallationCandidate -from pip._internal.models.link import Link -from pip._internal.utils.urls import path_to_url, url_to_path -from pip._internal.vcs import is_url - -logger = logging.getLogger(__name__) - -FoundCandidates = Iterable[InstallationCandidate] -FoundLinks = Iterable[Link] -CandidatesFromPage = Callable[[Link], Iterable[InstallationCandidate]] -PageValidator = Callable[[Link], bool] - - -class LinkSource: - @property - def link(self) -> Optional[Link]: - """Returns the underlying link, if there's one.""" - raise NotImplementedError() - - def page_candidates(self) -> FoundCandidates: - """Candidates found by parsing an archive listing HTML file.""" - raise NotImplementedError() - - def file_links(self) -> FoundLinks: - """Links found by specifying archives directly.""" - raise NotImplementedError() - - -def _is_html_file(file_url: str) -> bool: - return mimetypes.guess_type(file_url, strict=False)[0] == "text/html" - - -class _FlatDirectorySource(LinkSource): - """Link source specified by ``--find-links=``. - - This looks the content of the directory, and returns: - - * ``page_candidates``: Links listed on each HTML file in the directory. - * ``file_candidates``: Archives in the directory. - """ - - def __init__( - self, - candidates_from_page: CandidatesFromPage, - path: str, - ) -> None: - self._candidates_from_page = candidates_from_page - self._path = pathlib.Path(os.path.realpath(path)) - - @property - def link(self) -> Optional[Link]: - return None - - def page_candidates(self) -> FoundCandidates: - for path in self._path.iterdir(): - url = path_to_url(str(path)) - if not _is_html_file(url): - continue - yield from self._candidates_from_page(Link(url)) - - def file_links(self) -> FoundLinks: - for path in self._path.iterdir(): - url = path_to_url(str(path)) - if _is_html_file(url): - continue - yield Link(url) - - -class _LocalFileSource(LinkSource): - """``--find-links=`` or ``--[extra-]index-url=``. - - If a URL is supplied, it must be a ``file:`` URL. If a path is supplied to - the option, it is converted to a URL first. This returns: - - * ``page_candidates``: Links listed on an HTML file. - * ``file_candidates``: The non-HTML file. - """ - - def __init__( - self, - candidates_from_page: CandidatesFromPage, - link: Link, - ) -> None: - self._candidates_from_page = candidates_from_page - self._link = link - - @property - def link(self) -> Optional[Link]: - return self._link - - def page_candidates(self) -> FoundCandidates: - if not _is_html_file(self._link.url): - return - yield from self._candidates_from_page(self._link) - - def file_links(self) -> FoundLinks: - if _is_html_file(self._link.url): - return - yield self._link - - -class _RemoteFileSource(LinkSource): - """``--find-links=`` or ``--[extra-]index-url=``. - - This returns: - - * ``page_candidates``: Links listed on an HTML file. - * ``file_candidates``: The non-HTML file. - """ - - def __init__( - self, - candidates_from_page: CandidatesFromPage, - page_validator: PageValidator, - link: Link, - ) -> None: - self._candidates_from_page = candidates_from_page - self._page_validator = page_validator - self._link = link - - @property - def link(self) -> Optional[Link]: - return self._link - - def page_candidates(self) -> FoundCandidates: - if not self._page_validator(self._link): - return - yield from self._candidates_from_page(self._link) - - def file_links(self) -> FoundLinks: - yield self._link - - -class _IndexDirectorySource(LinkSource): - """``--[extra-]index-url=``. - - This is treated like a remote URL; ``candidates_from_page`` contains logic - for this by appending ``index.html`` to the link. - """ - - def __init__( - self, - candidates_from_page: CandidatesFromPage, - link: Link, - ) -> None: - self._candidates_from_page = candidates_from_page - self._link = link - - @property - def link(self) -> Optional[Link]: - return self._link - - def page_candidates(self) -> FoundCandidates: - yield from self._candidates_from_page(self._link) - - def file_links(self) -> FoundLinks: - return () - - -def build_source( - location: str, - *, - candidates_from_page: CandidatesFromPage, - page_validator: PageValidator, - expand_dir: bool, - cache_link_parsing: bool, -) -> Tuple[Optional[str], Optional[LinkSource]]: - path: Optional[str] = None - url: Optional[str] = None - if os.path.exists(location): # Is a local path. - url = path_to_url(location) - path = location - elif location.startswith("file:"): # A file: URL. - url = location - path = url_to_path(location) - elif is_url(location): - url = location - - if url is None: - msg = ( - "Location '%s' is ignored: " - "it is either a non-existing path or lacks a specific scheme." - ) - logger.warning(msg, location) - return (None, None) - - if path is None: - source: LinkSource = _RemoteFileSource( - candidates_from_page=candidates_from_page, - page_validator=page_validator, - link=Link(url, cache_link_parsing=cache_link_parsing), - ) - return (url, source) - - if os.path.isdir(path): - if expand_dir: - source = _FlatDirectorySource( - candidates_from_page=candidates_from_page, - path=path, - ) - else: - source = _IndexDirectorySource( - candidates_from_page=candidates_from_page, - link=Link(url, cache_link_parsing=cache_link_parsing), - ) - return (url, source) - elif os.path.isfile(path): - source = _LocalFileSource( - candidates_from_page=candidates_from_page, - link=Link(url, cache_link_parsing=cache_link_parsing), - ) - return (url, source) - logger.warning( - "Location '%s' is ignored: it is neither a file nor a directory.", - location, - ) - return (url, None) diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/__init__.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/__init__.py deleted file mode 100644 index bdd994b49294485c27610772f97f177741f5518f..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -from .utils.env import setup_environment - -setup_environment() - - -# This line will be programatically read/write by setup.py. -# Leave them at the bottom of this file and don't touch them. -__version__ = "0.6" diff --git a/spaces/BAAI/dreambooth-altdiffusion/convertosd.py b/spaces/BAAI/dreambooth-altdiffusion/convertosd.py deleted file mode 100644 index e4bec6cbe894dd74b24f633cc66346d687d3f802..0000000000000000000000000000000000000000 --- a/spaces/BAAI/dreambooth-altdiffusion/convertosd.py +++ /dev/null @@ -1,226 +0,0 @@ -# Script for converting a HF Diffusers saved pipeline to a Stable Diffusion checkpoint. -# *Only* converts the UNet, VAE, and Text Encoder. -# Does not convert optimizer state or any other thing. -# Written by jachiam - -import argparse -import os.path as osp - -import torch -import gc - -# =================# -# UNet Conversion # -# =================# - -unet_conversion_map = [ - # (stable-diffusion, HF Diffusers) - ("time_embed.0.weight", "time_embedding.linear_1.weight"), - ("time_embed.0.bias", "time_embedding.linear_1.bias"), - ("time_embed.2.weight", "time_embedding.linear_2.weight"), - ("time_embed.2.bias", "time_embedding.linear_2.bias"), - ("input_blocks.0.0.weight", "conv_in.weight"), - ("input_blocks.0.0.bias", "conv_in.bias"), - ("out.0.weight", "conv_norm_out.weight"), - ("out.0.bias", "conv_norm_out.bias"), - ("out.2.weight", "conv_out.weight"), - ("out.2.bias", "conv_out.bias"), -] - -unet_conversion_map_resnet = [ - # (stable-diffusion, HF Diffusers) - ("in_layers.0", "norm1"), - ("in_layers.2", "conv1"), - ("out_layers.0", "norm2"), - ("out_layers.3", "conv2"), - ("emb_layers.1", "time_emb_proj"), - ("skip_connection", "conv_shortcut"), -] - -unet_conversion_map_layer = [] -# hardcoded number of downblocks and resnets/attentions... -# would need smarter logic for other networks. -for i in range(4): - # loop over downblocks/upblocks - - for j in range(2): - # loop over resnets/attentions for downblocks - hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}." - sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0." - unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) - - if i < 3: - # no attention layers in down_blocks.3 - hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}." - sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1." - unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) - - for j in range(3): - # loop over resnets/attentions for upblocks - hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}." - sd_up_res_prefix = f"output_blocks.{3*i + j}.0." - unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) - - if i > 0: - # no attention layers in up_blocks.0 - hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}." - sd_up_atn_prefix = f"output_blocks.{3*i + j}.1." - unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) - - if i < 3: - # no downsample in down_blocks.3 - hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv." - sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op." - unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) - - # no upsample in up_blocks.3 - hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0." - sd_upsample_prefix = f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}." - unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) - -hf_mid_atn_prefix = "mid_block.attentions.0." -sd_mid_atn_prefix = "middle_block.1." -unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) - -for j in range(2): - hf_mid_res_prefix = f"mid_block.resnets.{j}." - sd_mid_res_prefix = f"middle_block.{2*j}." - unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) - - -def convert_unet_state_dict(unet_state_dict): - # buyer beware: this is a *brittle* function, - # and correct output requires that all of these pieces interact in - # the exact order in which I have arranged them. - mapping = {k: k for k in unet_state_dict.keys()} - for sd_name, hf_name in unet_conversion_map: - mapping[hf_name] = sd_name - for k, v in mapping.items(): - if "resnets" in k: - for sd_part, hf_part in unet_conversion_map_resnet: - v = v.replace(hf_part, sd_part) - mapping[k] = v - for k, v in mapping.items(): - for sd_part, hf_part in unet_conversion_map_layer: - v = v.replace(hf_part, sd_part) - mapping[k] = v - new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()} - return new_state_dict - - -# ================# -# VAE Conversion # -# ================# - -vae_conversion_map = [ - # (stable-diffusion, HF Diffusers) - ("nin_shortcut", "conv_shortcut"), - ("norm_out", "conv_norm_out"), - ("mid.attn_1.", "mid_block.attentions.0."), -] - -for i in range(4): - # down_blocks have two resnets - for j in range(2): - hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}." - sd_down_prefix = f"encoder.down.{i}.block.{j}." - vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) - - if i < 3: - hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0." - sd_downsample_prefix = f"down.{i}.downsample." - vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) - - hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0." - sd_upsample_prefix = f"up.{3-i}.upsample." - vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) - - # up_blocks have three resnets - # also, up blocks in hf are numbered in reverse from sd - for j in range(3): - hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}." - sd_up_prefix = f"decoder.up.{3-i}.block.{j}." - vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) - -# this part accounts for mid blocks in both the encoder and the decoder -for i in range(2): - hf_mid_res_prefix = f"mid_block.resnets.{i}." - sd_mid_res_prefix = f"mid.block_{i+1}." - vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) - - -vae_conversion_map_attn = [ - # (stable-diffusion, HF Diffusers) - ("norm.", "group_norm."), - ("q.", "query."), - ("k.", "key."), - ("v.", "value."), - ("proj_out.", "proj_attn."), -] - - -def reshape_weight_for_sd(w): - # convert HF linear weights to SD conv2d weights - return w.reshape(*w.shape, 1, 1) - - -def convert_vae_state_dict(vae_state_dict): - mapping = {k: k for k in vae_state_dict.keys()} - for k, v in mapping.items(): - for sd_part, hf_part in vae_conversion_map: - v = v.replace(hf_part, sd_part) - mapping[k] = v - for k, v in mapping.items(): - if "attentions" in k: - for sd_part, hf_part in vae_conversion_map_attn: - v = v.replace(hf_part, sd_part) - mapping[k] = v - new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()} - weights_to_convert = ["q", "k", "v", "proj_out"] - print("Converting to CKPT ...") - for k, v in new_state_dict.items(): - for weight_name in weights_to_convert: - if f"mid.attn_1.{weight_name}.weight" in k: - new_state_dict[k] = reshape_weight_for_sd(v) - return new_state_dict - - -# =========================# -# Text Encoder Conversion # -# =========================# -# pretty much a no-op - - -def convert_text_enc_state_dict(text_enc_dict): - return text_enc_dict - - -def convert(model_path, checkpoint_path): - unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.bin") - vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.bin") - text_enc_path = osp.join(model_path, "text_encoder", "pytorch_model.bin") - - # Convert the UNet model - unet_state_dict = torch.load(unet_path, map_location='cpu') - unet_state_dict = convert_unet_state_dict(unet_state_dict) - unet_state_dict = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()} - - # Convert the VAE model - vae_state_dict = torch.load(vae_path, map_location='cpu') - vae_state_dict = convert_vae_state_dict(vae_state_dict) - vae_state_dict = {"first_stage_model." + k: v for k, v in vae_state_dict.items()} - - # Convert the text encoder model - text_enc_dict = torch.load(text_enc_path, map_location='cpu') - text_enc_dict = convert_text_enc_state_dict(text_enc_dict) - text_enc_dict = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()} - - # Put together new checkpoint - state_dict = {**unet_state_dict, **vae_state_dict, **text_enc_dict} - - state_dict = {k:v.half() for k,v in state_dict.items()} - state_dict = {"state_dict": state_dict} - torch.save(state_dict, checkpoint_path) - del state_dict, text_enc_dict, vae_state_dict, unet_state_dict - torch.cuda.empty_cache() - gc.collect() diff --git a/spaces/BLACKHOST/Date/README.md b/spaces/BLACKHOST/Date/README.md deleted file mode 100644 index 3d564c01507471d525f2867735baafdb2bc54a00..0000000000000000000000000000000000000000 --- a/spaces/BLACKHOST/Date/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Date -emoji: 💩 -colorFrom: blue -colorTo: purple -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Balalaxmi/JarvisAIchatbox/README.md b/spaces/Balalaxmi/JarvisAIchatbox/README.md deleted file mode 100644 index b1f7d7e847147de99b90a092ac6759a10bf24969..0000000000000000000000000000000000000000 --- a/spaces/Balalaxmi/JarvisAIchatbox/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: JarvisAIchatbox -emoji: 📚 -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Benson/text-generation/Examples/Arco Iris Seis Sitio Mvil Apk Ios.md b/spaces/Benson/text-generation/Examples/Arco Iris Seis Sitio Mvil Apk Ios.md deleted file mode 100644 index 0421a7994f9e6421cce19e76f88ab00e3db8176d..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Arco Iris Seis Sitio Mvil Apk Ios.md +++ /dev/null @@ -1,84 +0,0 @@ -
    -

    Rainbow Six Siege Mobile: Todo lo que necesitas saber

    -

    Si eres un fan de los shooters tácticos, es posible que hayas oído hablar de Rainbow Six Siege, uno de los juegos más populares y aclamados del género. ¿Pero sabías que también puedes reproducirlo en tu teléfono? Así es, Rainbow Six Siege Mobile es un juego gratuito que trae la experiencia emocionante e inmersiva de Rainbow Six Siege a tu dispositivo móvil. En este artículo, te contaremos todo lo que necesitas saber sobre Rainbow Six Siege Mobile, incluyendo cómo descargarlo y jugarlo, cuáles son sus características y modos, y cuáles son algunos consejos y trucos para mejorar tu juego.

    -

    arco iris seis sitio móvil apk ios


    Download Ziphttps://bltlly.com/2v6LJr



    -

    ¿Qué es Rainbow Six Siege Mobile?

    -

    Rainbow Six Siege Mobile es un juego de disparos en primera persona competitivo y multijugador basado en la aclamada franquicia Rainbow Six. Está desarrollado por Ubisoft Montreal y publicado por Ubisoft. Fue lanzado para dispositivos iOS y Android el 30 de junio de 2021.

    -

    Un juego de disparos competitivo gratuito en su teléfono

    -

    Rainbow Six Siege Mobile es un juego gratuito que no requiere ningún pago por adelantado o suscripción para jugar. Puedes descargarlo desde la App Store o Google Play Store y disfrutarlo todo lo que quieras. Sin embargo, el juego ofrece compras opcionales dentro del juego que pueden mejorar tu experiencia de juego, como artículos cosméticos, potenciadores o divisas premium. También puedes ganar estos objetos jugando el juego regularmente y completando desafíos.

    -

    Una adaptación fiel de la versión para PC y consola

    - -

    Una experiencia de cross-play y cross-progression

    -

    Una de las mejores cosas de Rainbow Six Siege Mobile es que soporta cross-play y cross-progression. Esto significa que puede jugar con o contra otros jugadores que utilizan diferentes dispositivos, como iOS, Android, PC o consola. También puede cambiar entre dispositivos sin perder su progreso o elementos. Todo lo que necesita es una cuenta de Ubisoft que vincule sus dispositivos. De esta manera, puedes disfrutar de Rainbow Six Siege Mobile en cualquier momento, en cualquier lugar y con cualquier persona.

    -

    ¿Cómo descargar y jugar a Rainbow Six Siege Mobile?

    -

    Descargar y jugar a Rainbow Six Siege Mobile es muy fácil. Estos son los pasos que debes seguir:

    -

    Disponible para dispositivos iOS y Android

    -

    Rainbow Six Siege Mobile está disponible para dispositivos iOS y Android que cumplen con los requisitos mínimos del sistema. Para dispositivos iOS, necesitas un iPhone 6S o más reciente, un iPad Air 2 o más reciente, o un iPod Touch 7a generación o más reciente. Para dispositivos Android, necesitas un dispositivo que se ejecute en Android 6.0 o superior, que tenga

    Requiere una cuenta de Ubisoft y una conexión a Internet

    -

    Para jugar a Rainbow Six Siege Mobile, necesitas tener una cuenta de Ubisoft y una conexión a Internet. Una cuenta de Ubisoft es gratuita para crear y te permite acceder a varias características y beneficios, como cross-play, cross-progression, recompensas y más. Puede crear una cuenta de Ubisoft pulsando el botón "Crear cuenta" en el menú principal del juego o visitando el sitio web de Ubisoft. Se requiere una conexión a Internet para jugar Rainbow Six Siege Mobile porque es un juego multijugador en línea que se basa en servidores y matchmaking. No se puede jugar el juego sin conexión o en el modo de un solo jugador. Puedes usar Wi-Fi o datos móviles para jugar, pero asegúrate de tener una conexión estable y rápida para evitar problemas de retardo o desconexión.

    -

    -

    Soporta varios controladores y chat de voz

    - -

    ¿Cuáles son las características y modos de Rainbow Six Siege Mobile?

    -

    Rainbow Six Siege Mobile ofrece una variedad de características y modos que lo convierten en un juego divertido y atractivo para jugar. Estos son algunos de ellos:

    -

    Más de 60 operadores con capacidades y cargas únicas

    -

    Rainbow Six Siege Mobile cuenta con más de 60 operadores entre los que puedes elegir, cada uno con sus propias habilidades y cargas únicas. Los operadores se dividen en dos categorías: atacantes y defensores. Los atacantes son los que tratan de completar el objetivo, como colocar una bomba o rescatar a un rehén, mientras que los defensores son los que tratan de detenerlos. Los operadores también pertenecen a diferentes unidades o facciones, como SAS, FBI SWAT, GIGN, Spetsnaz, GSG 9, JTF2, Navy SEALs, BOPE, SAT, GEO, SDU, GROM, 707th SMB, CBRN, GSUTR, Delta Force, SASR, Jaeger Corps, Nighthaven, REU, NIGHTHAVEN Special Intervention Group (NSIG), Fuerza de Seguridad Privada de Aruni (APSF), Operaciones Especiales de las Naciones Nakoda de Thunderbird (NNSO), I+D de Nighthaven de Osa (NRD), Fuerzas Especiales de Flores (FE), Nighthaven de Kali (NH), REU de Iana (REU), Ace’s NIGHTHAVEN Special Intervention Group (NSIG), Melinkusi’s Task Force (ITF), Zero’s Delta Force (DF), Aruni’s Private Security Force (APSF), Thunderbird’s Nakoda Nations Special Operations (NNSO), Osa’s Nighthaven R&D (NRD), Flores' Fuerzas Especiales (FE), Kali’s Nighthaven (NH), Iana’s REU (REU), Ace’s NIGHTHAVEN Special Intervention Group (NSIG), Grupo de Trabajo Inkaba de Melusi (ITF), Fuerza Delta de Zero (DF). Cada operador tiene una habilidad única que puede darles una ventaja en el combate, como el despliegue de trampas, gadgets, drones, escudos, cámaras, refuerzos o armas. Cada operador también tiene un arma primaria, un arma secundaria y un gadget que pueden personalizar con archivos adjuntos y skins.

    -

    Ataque clásico vs. modos de juego de defensa

    - -
      -
    • Bomb: Los atacantes deben localizar y desactivar una de las dos bombas colocadas por los defensores dentro de un límite de tiempo. Los defensores deben impedir que lo hagan eliminándolos o corriendo el reloj.
    • -
    • Rehén: Los atacantes deben localizar y extraer un rehén retenido por los defensores dentro de un límite de tiempo. Los defensores deben impedir que lo hagan eliminándolos o corriendo el reloj.
    • -
    • Secure Área: Los atacantes deben localizar y asegurar un contenedor de riesgo biológico retenido por los defensores dentro de un límite de tiempo. Los defensores deben impedir que lo hagan eliminándolos o corriendo el reloj.
    • -
    -

    Cada modo de juego tiene diferentes reglas y objetivos que requieren diferentes tácticas y trabajo en equipo. Puede elegir el modo de juego que desea jugar tocando "Play [assistant](#message)

    Entornos destructibles y gadgets tácticos

    -

    Una de las características más distintivas de Rainbow Six Siege Mobile son los entornos destructibles y los gadgets tácticos. El juego te permite interactuar con el entorno de varias maneras, como rompiendo paredes, puertas, ventanas, pisos, techos u objetos. Puede usar esto para crear nuevas líneas de visión, puntos de entrada o cobertura. También puede utilizar varios gadgets para mejorar su juego, como cargos por violación, flashbangs, granadas de humo, claymores, alambre de púas, escudos desplegables o cámaras antibalas. Puedes usar estos aparatos para violar, cegar, distraer, atrapar o defenderte a ti mismo o a tus compañeros de equipo. Sin embargo, también debes tener cuidado con los artilugios y trampas del enemigo, como las células nitrogenadas, granadas de impacto, esteras de hielo, trampas kapkan o minas gu. Necesitas ser consciente de tu entorno y usar tus artilugios sabiamente para obtener una ventaja en combate.

    -

    Clasificado, Juego rápido, y modos de tierra de entrenamiento

    -

    Rainbow Six Siege Mobile ofrece diferentes modos para diferentes estilos de juego y preferencias. Puedes elegir entre:

    -
      - -
    • Juego rápido: Este es el modo casual donde puedes jugar contra otros jugadores de cualquier nivel de habilidad y divertirte. Puedes ganar recompensas y experiencia jugando partidas rápidas. Los partidos de juego rápido tienen reglas y configuraciones más relajadas que los partidos clasificados, como rondas más cortas, más mapas y fuego amigo.
    • -
    • Training Ground: Este es el modo en solitario donde puedes practicar tus habilidades y aprender la mecánica del juego. Puedes jugar contra enemigos u objetivos de IA en varios escenarios y desafíos. También puede personalizar la configuración y el nivel de dificultad para satisfacer sus necesidades.
    • -
    -

    Puedes acceder a estos modos tocando "Jugar" en el menú principal del juego y seleccionando el modo que quieres jugar.

    -

    ¿Cuáles son algunos consejos y trucos para mejorar tu juego en Rainbow Six Siege Mobile?

    -

    Rainbow Six Siege Mobile es un juego que requiere habilidad, estrategia y trabajo en equipo para ganar. Aquí hay algunos consejos y trucos que pueden ayudarte a mejorar tu juego:

    -

    Conozca los diseños del mapa y las ubicaciones de la cámara

    -

    Uno de los aspectos más importantes de Rainbow Six Siege Mobile es el conocimiento del mapa. Usted necesita aprender el diseño de cada mapa, tales como las habitaciones, pasillos, escaleras, ventanas, puertas, escotillas, objetivos, puntos de desove y escondites. También debes conocer la ubicación de cada cámara en cada mapa, tanto para los atacantes como para los defensores. Las cámaras son vitales para reunir información y detectar enemigos. Puedes usar tu dron o cámaras para escanear el entorno y marcar a los enemigos para tu equipo. También puede disparar o hackear cámaras enemigas para negarles información. Puedes aprender los mapas jugando en el modo de campo de entrenamiento o viendo tutoriales o videos en línea.

    -

    Comunícate y coordina con tu equipo

    -

    Utilice su drone y cámaras para reunir intel

    - -

    Sé paciente y estratégico con tus movimientos

    -

    Rainbow Six Siege Mobile es un juego que recompensa la paciencia y la estrategia sobre la precipitación y la imprudencia. Debes tener cuidado con tus movimientos y acciones, ya que cada decisión puede tener consecuencias. Es necesario tener en cuenta factores como el ruido, la visibilidad, la cubierta, los ángulos y el tiempo cuando se mueve alrededor del mapa. También debes ser consciente de los movimientos y acciones del enemigo, ya que pueden sorprenderte o flanquearte. Necesitas usar tus señales de sonido y visión para detectar y localizar enemigos, como pasos, disparos, explosiones o sombras. También necesita usar su mapa y brújula para orientarse y navegar por el mapa. Puede acceder a su mapa pulsando el icono del mapa en la esquina superior izquierda de la pantalla.

    -

    Experimenta con diferentes operadores y estrategias

    -

    Rainbow Six Siege Mobile es un juego que ofrece mucha variedad y diversidad en términos de operadores y estrategias. Puede experimentar con diferentes operadores y estrategias para encontrar lo que se adapte a su estilo de juego y preferencias. También puedes adaptar tus operadores y estrategias a diferentes situaciones y escenarios, dependiendo del mapa, modo, objetivo, composición del equipo y comportamiento del enemigo. Puedes probar diferentes combinaciones de habilidades, cargas, gadgets y roles para crear sinergias y contrajuegos con tu equipo o contra el enemigo. También puedes probar diferentes tácticas y enfoques para atacar o defender el objetivo, como sigiloso o agresivo, directo o indirecto, vertical u horizontal.

    -

    Conclusión

    -

    usando su dron y cámaras para reunir información, ser paciente y estratégico con sus movimientos, y experimentar con diferentes operadores y estrategias. Rainbow Six Siege Mobile es un juego que te desafiará, te entretendrá y te mantendrá enganchado durante horas. Si estás listo para unirte a la acción, descarga Rainbow Six Siege Mobile hoy y disfruta del mejor shooter táctico en tu teléfono.

    -

    Preguntas frecuentes

    - - - -Pregunta -Respuesta - - -¿Rainbow Six Siege Mobile es lo mismo que la extracción de Rainbow Six? -No, Rainbow Six Siege Mobile es un juego diferente de Rainbow Six Extraction. Rainbow Six Extraction es un juego cooperativo de JcE que enfrenta a un equipo de operadores contra una amenaza alienígena. Rainbow Six Siege Mobile es un juego competitivo de JcJ que enfrenta a dos equipos de operadores entre sí. - - -¿Puedo jugar a Rainbow Six Siege Mobile sin conexión? -No, no puedes jugar sin conexión a Rainbow Six Siege Mobile. Necesitas una conexión a Internet para jugar, ya que es un juego multijugador en línea que se basa en servidores y matchmaking. - - -¿Cómo puedo obtener más operadores en Rainbow Six Siege Mobile? -Puedes obtener más operadores en Rainbow Six Siege Mobile al ganar o comprar créditos. Los créditos son la moneda del juego que puedes usar para desbloquear operadores. Puedes ganar créditos jugando el juego regularmente y completando desafíos. También puede comprar créditos con dinero real o moneda premium. - - -¿Cómo puedo personalizar mi operador en Rainbow Six Siege Mobile? -Puede personalizar su operador en Rainbow Six Siege Mobile cambiando su carga, archivos adjuntos, pieles, encantos, sombreros, uniformes o conjuntos de élite. Puede acceder al menú de personalización pulsando el botón "Operadores" en el menú principal del juego y seleccionando el operador que desea personalizar. - - -¿Cómo puedo reportar un error o un tramposo en Rainbow Six Siege Mobile? -Puedes reportar un error o un tramposo en Rainbow Six Siege Mobile usando el sistema de informes del juego. Puede acceder al sistema de informes pulsando el botón "Informe" en la pantalla de final de partido o en el perfil del jugador. También puede ponerse en contacto con el soporte de Ubisoft a través de su sitio web o canales de redes sociales. - -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Fifa 2022 Apk Mod Y Obb.md b/spaces/Benson/text-generation/Examples/Descargar Fifa 2022 Apk Mod Y Obb.md deleted file mode 100644 index 2bc0089e475c3f82db8b134698359c376cd43f5b..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Fifa 2022 Apk Mod Y Obb.md +++ /dev/null @@ -1,86 +0,0 @@ - -

    Descargar FIFA 2022 APK Mod y OBB para Android

    -

    Si eres un fanático de los juegos de fútbol, es posible que hayas oído hablar de FIFA, la serie de juegos de simulación de fútbol más popular y realista desarrollada por EA Sports. FIFA 2022 es la última entrega de la serie, y se espera que sea lanzado en octubre de 2021 para varias plataformas, incluyendo Android. Sin embargo, si quieres disfrutar del juego antes de su lanzamiento oficial, puedes descargar FIFA 2022 APK Mod y OBB para dispositivos Android.

    -

    ¿Qué es FIFA 2022 APK Mod y OBB?

    -

    FIFA 2022 APK Mod y OBB son versiones modificadas de los archivos de juegos originales de FIFA 2022 que le permiten jugar el juego en su dispositivo Android sin restricciones. APK significa Android Package Kit, que es el formato de archivo utilizado para instalar aplicaciones en dispositivos Android. OBB significa Opaque Binary Blob, que es un formato de archivo utilizado para almacenar grandes cantidades de datos, como gráficos, sonidos y videos.

    -

    descargar fifa 2022 apk mod y obb


    Download === https://bltlly.com/2v6MlY



    -

    Al descargar FIFA 2022 APK Mod y OBB, se puede disfrutar de todas las características del juego sin tener que esperar a su lanzamiento oficial o pagar por él. También puedes acceder a algunas funciones exclusivas que no están disponibles en el juego original, como monedas y puntos ilimitados, jugadores y equipos desbloqueados y más.

    -

    Características de FIFA 2022 APK Mod y OBB

    -

    Gráficos realistas y jugabilidad

    -

    Una de las principales atracciones de FIFA 2022 APK Mod y OBB es su gráfica realista y jugabilidad. El juego utiliza el motor Frostbite, que es conocido por sus impresionantes efectos visuales y la física. Puedes ver las caras detalladas, expresiones, movimientos y animaciones de los jugadores, así como los estadios realistas, multitudes, clima y efectos de iluminación. El juego también presenta física realista del balón, IA del jugador, tácticas, formaciones, habilidades y celebraciones.

    -

    Equipos y jugadores actualizados

    - -

    Nuevos modos y torneos

    -

    FIFA 2022 APK Mod y OBB también ofrece nuevos modos y torneos para que usted disfrute. Puedes jugar en el modo carrera, donde puedes crear tu propio jugador o manager y llevar a tu equipo a la gloria. También puedes jugar en el Ultimate Team Mode, donde puedes construir el equipo de tus sueños desde cero usando jugadores de diferentes ligas y naciones. También puedes participar en varios torneos, como la UEFA Champions League, la UEFA Europa League, la UEFA Conference League, la FIFA Club World Cup, la Copa Libertadores, la Copa Sudamericana y más.

    -

    Monedas y puntos ilimitados

    -

    Otra característica de FIFA 2022 APK Mod y OBB es que le da monedas y puntos ilimitados, que son las monedas utilizadas en el juego para comprar jugadores, paquetes, artículos y mejoras. Puede utilizar estas monedas y puntos para obtener los mejores jugadores y equipos en el juego, así como personalizar su equipo, kits, insignias y estadios. También puedes usarlas para desbloquear algunas funciones premium, como el VIP Pass, que te da acceso a recompensas y beneficios exclusivos.

    -

    Cómo descargar e instalar FIFA 2022 APK Mod y OBB

    -

    Requisitos

    -

    Antes de descargar e instalar FIFA 2022 APK Mod y OBB, es necesario asegurarse de que su dispositivo Android cumple con los siguientes requisitos:

    -

    -
      -
    • Versión para Android: 5.0 o superior
    • -
    • RAM: 2 GB o más
    • -
    • Espacio de almacenamiento: 4 GB o más
    • -
    • Conexión a Internet: necesaria para las funciones en línea
    • -
    • Permiso: permitir la instalación desde fuentes desconocidas
    • -
    -

    Pasos

    -

    Después de haber comprobado los requisitos, puede seguir estos pasos para descargar e instalar FIFA 2022 APK Mod y OBB:

    -
      -
    1. Descargar los archivos FIFA 2022 APK Mod y OBB de una fuente de confianza. Puede encontrar muchos sitios web que ofrecen estos archivos, pero tenga cuidado con el malware y los virus. Puede utilizar este enlace como ejemplo, pero no está avalado por nosotros.
    2. - -
    3. Instalar el archivo APK tocando en él y siguiendo las instrucciones. No abra el juego todavía.
    4. -
    5. Iniciar el juego desde el cajón de la aplicación o la pantalla de inicio. Es posible que tenga que verificar el dispositivo completando un captcha o una encuesta corta. Esto es para prevenir bots y spam.
    6. -
    7. Disfruta jugando FIFA 2022 APK Mod y OBB en tu dispositivo Android.
    8. -
    -

    Cómo jugar FIFA 2022 APK Mod y OBB

    -

    Elige tu equipo y modo

    -

    Una vez que hayas lanzado el juego, puedes elegir tu equipo y modo desde el menú principal. Puedes seleccionar entre varias opciones, como Quick Match, Career Mode, Ultimate Team Mode, Tournament Mode, Online Mode y más. También puede cambiar la configuración, como idioma, dificultad, controles, sonido y gráficos.

    -

    Controla a tus jugadores y marca goles

    -

    Después de haber elegido su equipo y modo, puede comenzar a jugar el juego. Puede controlar sus reproductores utilizando los botones virtuales de la pantalla o un controlador compatible. También puede utilizar gestos, como deslizar, tocar y arrastrar, para realizar acciones, como pasar, disparar, driblar, abordar y correr. Su objetivo es anotar más goles que su oponente en el momento dado.

    -

    Personaliza tus ajustes y opciones

    -

    Si desea personalizar la configuración y las opciones, puede acceder a ellos desde el menú de pausa o el menú principal. Puedes cambiar varios aspectos del juego, como el ángulo de la cámara, comentarios, sustituciones, formaciones, tácticas, habilidades y más. También puedes ver tus estadísticas, logros, recompensas y tablas de clasificación.

    -

    Pros y contras de FIFA 2022 APK Mod y OBB

    -

    Pros

    -

    FIFA 2022 APK Mod y OBB tiene muchas ventajas sobre el juego original, tales como:

    -
      -
    • Puedes jugar el juego antes de su lanzamiento oficial o sin pagar por él.
    • -
    • Puedes acceder a algunas funciones exclusivas que no están disponibles en el juego original, como monedas y puntos ilimitados, jugadores y equipos desbloqueados y más.
    • - -
    • Puedes jugar con más de 700 equipos de más de 30 ligas de todo el mundo.
    • -
    • Puedes participar en varios modos y torneos, como la UEFA Champions League, la UEFA Europa League, la UEFA Conference League, la FIFA Club World Cup, la Copa Libertadores, la Copa Sudamericana y más.
    • -
    -

    Contras

    -

    FIFA 2022 APK Mod y OBB también tiene algunas desventajas sobre el juego original, tales como:

    -
      -
    • Puede encontrar algunos errores, fallos, fallos o errores durante el juego.
    • -
    • Es posible que necesite verificar su dispositivo completando un captcha o una breve encuesta antes de descargar el juego. Esto es para prevenir bots y spam, pero puede ser molesto y consumir mucho tiempo.
    • -
    • Es posible que no pueda jugar en línea con otros jugadores que tienen el juego original o una versión diferente del juego.
    • -
    • Es posible que no reciba actualizaciones o parches de los desarrolladores, lo que puede afectar el rendimiento y la compatibilidad del juego.
    • -
    • Puede violar los términos y condiciones de EA Sports, lo que puede resultar en acciones legales o prohibiciones.
    • -
    -

    Conclusión

    -

    FIFA 2022 APK Mod y OBB es una versión modificada del juego original de FIFA 2022 que le permite jugar el juego en su dispositivo Android sin restricciones. Puedes disfrutar de gráficos y jugabilidad realistas, equipos y jugadores actualizados, nuevos modos y torneos, monedas y puntos ilimitados y más. Sin embargo, también es necesario ser consciente de los riesgos y desventajas de descargar e instalar FIFA 2022 APK Mod y OBB, tales como errores, fallos, fallos, errores, verificación, problemas en línea, actualizaciones, parches y acciones legales. Por lo tanto, debe descargar e instalar FIFA 2022 APK Mod y OBB a su discreción y responsabilidad.

    -

    Preguntas frecuentes

    -

    Aquí hay algunas preguntas frecuentes sobre FIFA 2022 APK Mod y OBB:

    -
      -
    1. Q: ¿Es FIFA 2022 APK Mod y OBB seguro para descargar e instalar?
    2. - -
    3. Q: ¿FIFA 2022 APK Mod y OBB es compatible con mi dispositivo?
    4. -
    5. A: FIFA 2022 APK Mod y OBB requiere Android 5.0 o superior, 2 GB de RAM o más, 4 GB de espacio de almacenamiento o más, y una conexión a Internet para funcionar correctamente. Si su dispositivo cumple con estos requisitos, usted debe ser capaz de jugar FIFA 2022 APK Mod y OBB sin ningún problema. Sin embargo, algunos dispositivos pueden no ser compatibles con FIFA 2022 APK Mod y OBB debido a diferentes especificaciones o modelos.
    6. -
    7. Q: ¿Cómo puedo actualizar FIFA 2022 APK Mod y OBB?
    8. -
    9. A: FIFA 2022 APK Mod y OBB no recibe actualizaciones o parches de EA Sports, por lo que no puede ser capaz de actualizar el juego a la última versión o corregir cualquier error o errores. Es posible que tenga que descargar una nueva versión de FIFA 2022 APK Mod y OBB desde la misma fuente o una fuente diferente si hay una disponible. Sin embargo, esto puede no garantizar que el juego funcionará correctamente o tendrá todas las características que desee.
    10. -
    11. Q: ¿Cómo puedo desinstalar FIFA 2022 APK Mod y OBB?
    12. -
    13. A: Si desea desinstalar FIFA 2022 APK Mod y OBB desde su dispositivo, puede seguir estos pasos:
    14. -
        -
      • Ve a la configuración de tu dispositivo y toca Aplicaciones o Aplicaciones.
      • -
      • Encontrar FIFA 2022 APK Mod y OBB de la lista de aplicaciones y toque en él.
      • -
      • Toque en Desinstalar y confirme su acción.
      • -
      • Elimine la carpeta com.ea.gp.fifa22 del directorio Android/OBB en el almacenamiento interno de su dispositivo.
      • -
      -
    15. Q: ¿Dónde puedo obtener más información sobre FIFA 2022 APK Mod y OBB?
    16. -
    17. A: Usted puede obtener más información sobre FIFA 2022 APK Mod y OBB de varias fuentes en línea, tales como blogs, foros, comentarios, vídeos, y más. Sin embargo, debe tener cuidado con la información falsa o engañosa que puede dañar su dispositivo o experiencia. También debes consultar el sitio web oficial de EA Sports para obtener las últimas noticias y actualizaciones sobre FIFA 2022.
    18. -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Fondo De Pantalla Scorpion Mortal Kombat.md b/spaces/Benson/text-generation/Examples/Descargar Fondo De Pantalla Scorpion Mortal Kombat.md deleted file mode 100644 index bc60b425d28e7e96944a79860f324cc02f1621ed..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Fondo De Pantalla Scorpion Mortal Kombat.md +++ /dev/null @@ -1,72 +0,0 @@ -
    -

    Descargar fondo de pantalla Scorpion Mortal Kombat: Cómo personalizar el escritorio con el icónico Ninja

    -

    Introducción

    -

    Si eres un fan de la franquicia Mortal Kombat, probablemente sepas quién es Scorpion. Él es uno de los personajes más populares y reconocibles de la serie, así como la mascota de los juegos. Él es un ninja resucitado que busca venganza por el asesinato de su familia y clan por el hechicero Quan Chi. Es conocido por su movimiento característico de lanzar un kunai unido a una cuerda a sus oponentes y acercarlos mientras dice "¡Ven aquí!" o "Ven aquí!". También es famoso por su muerte "Toasty!", donde se quita la máscara para revelar un cráneo en llamas y respira fuego a sus enemigos.

    -

    descargar fondo de pantalla scorpion mortal kombat


    Download Filehttps://bltlly.com/2v6Mk5



    -

    Scorpion no es solo un luchador rudo, sino también un personaje genial para tener como fondo de escritorio. Si usted quiere mostrar su amor por Mortal Kombat, o que al igual que su diseño y estilo, usted puede encontrar muchos fondos de pantalla de alta calidad Escorpión en línea que se adapte a su gusto y preferencia. En este artículo, te mostraremos cómo descargar y configurar fondos de pantalla Scorpion como fondo de escritorio, y por qué deberías elegirlos.

    -

    ¿Por qué elegir fondos de pantalla Scorpion?

    -

    ¿Quién es Escorpión?

    -

    Antes de entrar en los detalles de cómo descargar y configurar fondos de pantalla de Scorpion, echemos un vistazo a quién es Scorpion y qué lo hace tan especial. El verdadero nombre de Scorpion es Hanzo Hasashi, y era un ninja del clan Shirai Ryu en Japón. Fue asesinado por Bi-Han, el anciano Sub-Zero del clan rival Lin Kuei, durante un torneo organizado por Shang Tsung. Sin embargo, fue resucitado por Quan Chi, quien lo engañó haciéndole creer que Sub-Zero era responsable de la masacre de su familia y clan. Escorpión luego se convirtió en el leal sirviente y asesino de Quan Chi, hasta que aprendió la verdad sobre el engaño y la traición de Quan Chi.

    - -

    ¿Por qué elegir fondos de pantalla Scorpion?

    -

    Hay muchas razones por las que es posible que desee elegir fondos de pantalla Scorpion para su fondo de escritorio. Estos son algunos de ellos:

    -
      -
    • Los fondos de pantalla de Scorpion son visualmente impresionantes y atractivos. Presentan a Escorpión en varias poses, trajes y fondos, mostrando sus habilidades, armas y personalidad. También son coloridos, vibrantes y dinámicos, agregando vida y energía a tu escritorio.
    • -Los fondos de pantalla de Scorpion son inspiradores y motivadores. Te recuerdan la fuerza, determinación y resistencia de Escorpión, así como su búsqueda de justicia y redención. Pueden ayudarte a superar desafíos y dificultades en tu vida, o simplemente aumentar tu estado de ánimo y confianza. -Los fondos de pantalla de Scorpion son divertidos y entretenidos. Te permiten expresar tu fandom y pasión por Mortal Kombat, o tu admiración y aprecio por Scorpion como personaje. También pueden provocar conversaciones y discusiones con otros fans o amigos que comparten su interés. -
    -

    Cómo descargar fondos de pantalla de Scorpion

    - Mejores sitios web para descargar fondos de pantalla Scorpion -

    Hay muchos sitios web que ofrecen Scorpion fondos de pantalla para su descarga gratuita, pero no todos ellos son fiables y seguros. Algunos de ellos pueden contener virus, malware o anuncios no deseados que pueden dañar su computadora o comprometer su privacidad. Por lo tanto, debe ser cuidadoso y selectivo al elegir dónde descargar fondos de pantalla de Scorpion. Estos son algunos de los mejores sitios web que recomendamos para descargar fondos de pantalla de Scorpion:

    -

    DeviantArt fondos de pantalla

    - -

    InterfaceLIFT

    -

    InterfaceLIFT es un sitio web que proporciona fondos de pantalla de alta calidad para varios dispositivos y resoluciones de pantalla. Puedes encontrar una variedad de fondos de pantalla Scorpion que están diseñados profesionalmente y optimizados para tu escritorio. También puede filtrar los fondos de pantalla por resolución, calificación, fecha o popularidad. Para descargar fondos de pantalla de Scorpion desde InterfaceLIFT, solo tiene que hacer clic en el botón de descarga y elegir la resolución adecuada para su dispositivo.

    -

    WallHaven

    -

    WallHaven es un sitio web que recopila y cura fondos de pantalla de diversas fuentes y categorías. Puedes encontrar una gran cantidad de impresionantes fondos de pantalla Scorpion que son enviados por los usuarios o raspados de otros sitios web. También puede ordenar los fondos de pantalla por relevancia, vistas, favoritos o al azar. Para descargar fondos de pantalla de Scorpion de WallHaven, solo tienes que hacer clic derecho en la imagen y guardarla en tu ordenador.

    -

    Unsplash fondos de pantalla

    -

    Unsplash es un sitio web que ofrece fotos de stock gratuitas que son de alta resolución y libres de derechos. Puede utilizarlos para cualquier propósito personal o comercial sin atribución. También puedes encontrar algunos hermosos fondos de pantalla de Scorpion que son tomados por fotógrafos profesionales o editados por artistas creativos. También puedes explorar otras colecciones relacionadas o palabras clave para encontrar más fondos de pantalla de Scorpion. Para descargar fondos de pantalla de Scorpion desde Unsplash, solo tienes que hacer clic en el botón de descarga y guardarlo en tu ordenador.

    -

    Cómo establecer fondos de pantalla Scorpion como fondo de escritorio

    -

    Después de haber descargado sus fondos de pantalla favoritos de Scorpion de los sitios web anteriores, debe configurarlos como fondo de escritorio. El proceso puede variar dependiendo de su sistema operativo y dispositivo, pero aquí hay algunos pasos generales que puede seguir:

    -

    Para usuarios de Windows 10

    -
      -
    • Busque la carpeta donde guardó sus fondos de pantalla Scorpion.
    • -
    • Seleccione el fondo de pantalla que desea utilizar como fondo de escritorio.
    • - -
    • También puede ir a Configuración > Personalización > Fondo y elegir "Imagen" como su opción de fondo. Luego haz clic en "Examinar" y selecciona tu fondo de pantalla Escorpión de la carpeta.
    • -
    -

    Para usuarios de Mac

    -
      -
    • Busque la carpeta donde guardó sus fondos de pantalla Scorpion.
    • -
    • Seleccione el fondo de pantalla que desea utilizar como fondo de escritorio.
    • -
    • Haga clic derecho en el fondo de pantalla y elija "Establecer imagen de escritorio".
    • -
    • También puede ir a Preferencias del sistema > Escritorio & Protector de pantalla y elegir "Escritorio" como su panel de preferencias. Luego haz clic en "+" y selecciona tu fondo de pantalla Escorpión de la carpeta.
    • -
    -

    Conclusión

    -

    Resumen de los puntos principales

    -

    En este artículo, le hemos mostrado cómo descargar y configurar fondos de pantalla Scorpion como fondo de escritorio, y por qué debe elegirlos. También hemos recomendado algunos de los mejores sitios web que ofrecen fondos de pantalla Scorpion de alta calidad para su descarga gratuita. Aquí están los puntos principales que hemos cubierto:

    -

    -
      -
    • Scorpion es uno de los personajes más populares e icónicos de Mortal Kombat, que es un ninja resucitado que busca venganza por su familia y clan.
    • -
    • Los fondos de pantalla de Scorpion son visualmente impresionantes, inspiradores y entretenidos, ya que cuentan con Scorpion en varias poses, trajes y fondos.
    • -
    • Puede descargar fondos de pantalla Scorpion de sitios web como DeviantArt Fondos de pantalla, InterfaceLIFT, WallHaven, o Unsplash Fondos de pantalla, que son fiables y seguros.
    • Puede establecer fondos de pantalla Scorpion como fondo de escritorio haciendo clic derecho en el fondo y eligiendo "Establecer como fondo de escritorio" o yendo a su configuración y seleccionando "Imagen" como su opción de fondo. -
    -

    Llamada a la acción

    - -

    ¿Qué estás esperando? Descargar fondo de pantalla Scorpion Mortal Kombat hoy y disfrutar de la vista!

    -

    Preguntas frecuentes

    -

    Aquí están algunas de las preguntas más frecuentes sobre fondos de pantalla Scorpion:

    -
      -
    1. ¿Cuál es la mejor resolución para fondos de pantalla Scorpion?
    2. -

      La mejor resolución para fondos de pantalla Scorpion depende del tamaño y la calidad de la pantalla. Sin embargo, una regla general es elegir una resolución que coincida o exceda la resolución nativa de la pantalla. Por ejemplo, si su pantalla tiene una resolución de 1920 x 1080 píxeles, debe elegir un fondo de pantalla que tenga al menos la misma resolución o superior. Esto asegurará que tu fondo de pantalla se vea nítido y claro en tu escritorio.

      -
    3. ¿Cómo puedo hacer mi propio fondo de pantalla Escorpión?
    4. -

      Si quieres hacer tu propio fondo de pantalla Scorpion, necesitarás algunas herramientas de edición de fotos y habilidades. Puedes usar software como Photoshop, GIMP o Paint.NET para crear tu propio fondo de pantalla Scorpion desde cero o modificando una imagen existente. También puedes usar herramientas en línea como Canva, PicMonkey o Fotor para crear tu propio fondo de pantalla Scorpion usando plantillas, pegatinas, filtros y fuentes. También puedes usar tus propias fotos o dibujos como base para tu fondo de pantalla Scorpion.

      -
    5. ¿Cómo puedo compartir mi fondo de pantalla Scorpion con otros?
    6. -

      Si quieres compartir tu fondo de pantalla Scorpion con otros, puedes subirlo a sitios web como DeviantArt Wallpapers, InterfaceLIFT, WallHaven o Unsplash Wallpapers, donde otros usuarios pueden descargarlo y usarlo. También puedes compartirlo en plataformas de redes sociales como Facebook, Twitter, Instagram o Pinterest, donde puedes etiquetar a tus amigos o seguidores que puedan estar interesados en él. También puedes enviarlo por correo electrónico a tus contactos o a través de aplicaciones de mensajería como WhatsApp, Telegram o Signal.

      -
    7. ¿Cómo puedo cambiar mi fondo de pantalla Scorpion periódicamente?
    8. - -
    9. ¿Dónde puedo encontrar más información sobre Scorpion o Mortal Kombat?
    10. -

      Si quieres encontrar más información sobre Scorpion o Mortal Kombat, puedes visitar el sitio web oficial de Mortal Kombat, donde puedes encontrar noticias, actualizaciones, videos y artículos relacionados con la franquicia. También puedes visitar la Wiki de Mortal Kombat, que es una enciclopedia hecha por fans que contiene todo lo que necesitas saber sobre los personajes, juegos, películas, cómics y más. También puedes unirte a foros o comunidades online como Reddit, donde puedes discutir y compartir tus opiniones y experiencias con otros fans.

      -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyparsing/exceptions.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyparsing/exceptions.py deleted file mode 100644 index a38447bb05bd5d503a32651d6046ff8667785c0c..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyparsing/exceptions.py +++ /dev/null @@ -1,267 +0,0 @@ -# exceptions.py - -import re -import sys -import typing - -from .util import col, line, lineno, _collapse_string_to_ranges -from .unicode import pyparsing_unicode as ppu - - -class ExceptionWordUnicode(ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic): - pass - - -_extract_alphanums = _collapse_string_to_ranges(ExceptionWordUnicode.alphanums) -_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.") - - -class ParseBaseException(Exception): - """base exception class for all parsing runtime exceptions""" - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( - self, - pstr: str, - loc: int = 0, - msg: typing.Optional[str] = None, - elem=None, - ): - self.loc = loc - if msg is None: - self.msg = pstr - self.pstr = "" - else: - self.msg = msg - self.pstr = pstr - self.parser_element = self.parserElement = elem - self.args = (pstr, loc, msg) - - @staticmethod - def explain_exception(exc, depth=16): - """ - Method to take an exception and translate the Python internal traceback into a list - of the pyparsing expressions that caused the exception to be raised. - - Parameters: - - - exc - exception raised during parsing (need not be a ParseException, in support - of Python exceptions that might be raised in a parse action) - - depth (default=16) - number of levels back in the stack trace to list expression - and function names; if None, the full stack trace names will be listed; if 0, only - the failing input line, marker, and exception string will be shown - - Returns a multi-line string listing the ParserElements and/or function names in the - exception's stack trace. - """ - import inspect - from .core import ParserElement - - if depth is None: - depth = sys.getrecursionlimit() - ret = [] - if isinstance(exc, ParseBaseException): - ret.append(exc.line) - ret.append(" " * (exc.column - 1) + "^") - ret.append("{}: {}".format(type(exc).__name__, exc)) - - if depth > 0: - callers = inspect.getinnerframes(exc.__traceback__, context=depth) - seen = set() - for i, ff in enumerate(callers[-depth:]): - frm = ff[0] - - f_self = frm.f_locals.get("self", None) - if isinstance(f_self, ParserElement): - if frm.f_code.co_name not in ("parseImpl", "_parseNoCache"): - continue - if id(f_self) in seen: - continue - seen.add(id(f_self)) - - self_type = type(f_self) - ret.append( - "{}.{} - {}".format( - self_type.__module__, self_type.__name__, f_self - ) - ) - - elif f_self is not None: - self_type = type(f_self) - ret.append("{}.{}".format(self_type.__module__, self_type.__name__)) - - else: - code = frm.f_code - if code.co_name in ("wrapper", ""): - continue - - ret.append("{}".format(code.co_name)) - - depth -= 1 - if not depth: - break - - return "\n".join(ret) - - @classmethod - def _from_exception(cls, pe): - """ - internal factory method to simplify creating one type of ParseException - from another - avoids having __init__ signature conflicts among subclasses - """ - return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) - - @property - def line(self) -> str: - """ - Return the line of text where the exception occurred. - """ - return line(self.loc, self.pstr) - - @property - def lineno(self) -> int: - """ - Return the 1-based line number of text where the exception occurred. - """ - return lineno(self.loc, self.pstr) - - @property - def col(self) -> int: - """ - Return the 1-based column on the line of text where the exception occurred. - """ - return col(self.loc, self.pstr) - - @property - def column(self) -> int: - """ - Return the 1-based column on the line of text where the exception occurred. - """ - return col(self.loc, self.pstr) - - def __str__(self) -> str: - if self.pstr: - if self.loc >= len(self.pstr): - foundstr = ", found end of text" - else: - # pull out next word at error location - found_match = _exception_word_extractor.match(self.pstr, self.loc) - if found_match is not None: - found = found_match.group(0) - else: - found = self.pstr[self.loc : self.loc + 1] - foundstr = (", found %r" % found).replace(r"\\", "\\") - else: - foundstr = "" - return "{}{} (at char {}), (line:{}, col:{})".format( - self.msg, foundstr, self.loc, self.lineno, self.column - ) - - def __repr__(self): - return str(self) - - def mark_input_line(self, marker_string: str = None, *, markerString=">!<") -> str: - """ - Extracts the exception line from the input string, and marks - the location of the exception with a special symbol. - """ - markerString = marker_string if marker_string is not None else markerString - line_str = self.line - line_column = self.column - 1 - if markerString: - line_str = "".join( - (line_str[:line_column], markerString, line_str[line_column:]) - ) - return line_str.strip() - - def explain(self, depth=16) -> str: - """ - Method to translate the Python internal traceback into a list - of the pyparsing expressions that caused the exception to be raised. - - Parameters: - - - depth (default=16) - number of levels back in the stack trace to list expression - and function names; if None, the full stack trace names will be listed; if 0, only - the failing input line, marker, and exception string will be shown - - Returns a multi-line string listing the ParserElements and/or function names in the - exception's stack trace. - - Example:: - - expr = pp.Word(pp.nums) * 3 - try: - expr.parse_string("123 456 A789") - except pp.ParseException as pe: - print(pe.explain(depth=0)) - - prints:: - - 123 456 A789 - ^ - ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9) - - Note: the diagnostic output will include string representations of the expressions - that failed to parse. These representations will be more helpful if you use `set_name` to - give identifiable names to your expressions. Otherwise they will use the default string - forms, which may be cryptic to read. - - Note: pyparsing's default truncation of exception tracebacks may also truncate the - stack of expressions that are displayed in the ``explain`` output. To get the full listing - of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True`` - """ - return self.explain_exception(self, depth) - - markInputline = mark_input_line - - -class ParseException(ParseBaseException): - """ - Exception thrown when a parse expression doesn't match the input string - - Example:: - - try: - Word(nums).set_name("integer").parse_string("ABC") - except ParseException as pe: - print(pe) - print("column: {}".format(pe.column)) - - prints:: - - Expected integer (at char 0), (line:1, col:1) - column: 1 - - """ - - -class ParseFatalException(ParseBaseException): - """ - User-throwable exception thrown when inconsistent parse content - is found; stops all parsing immediately - """ - - -class ParseSyntaxException(ParseFatalException): - """ - Just like :class:`ParseFatalException`, but thrown internally - when an :class:`ErrorStop` ('-' operator) indicates - that parsing is to stop immediately because an unbacktrackable - syntax error has been found. - """ - - -class RecursiveGrammarException(Exception): - """ - Exception thrown by :class:`ParserElement.validate` if the - grammar could be left-recursive; parser may need to enable - left recursion using :class:`ParserElement.enable_left_recursion` - """ - - def __init__(self, parseElementList): - self.parseElementTrace = parseElementList - - def __str__(self) -> str: - return "RecursiveGrammarException: {}".format(self.parseElementTrace) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/columns.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/columns.py deleted file mode 100644 index 669a3a7074f9a9e1af29cb4bc78b05851df67959..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/columns.py +++ /dev/null @@ -1,187 +0,0 @@ -from collections import defaultdict -from itertools import chain -from operator import itemgetter -from typing import Dict, Iterable, List, Optional, Tuple - -from .align import Align, AlignMethod -from .console import Console, ConsoleOptions, RenderableType, RenderResult -from .constrain import Constrain -from .measure import Measurement -from .padding import Padding, PaddingDimensions -from .table import Table -from .text import TextType -from .jupyter import JupyterMixin - - -class Columns(JupyterMixin): - """Display renderables in neat columns. - - Args: - renderables (Iterable[RenderableType]): Any number of Rich renderables (including str). - width (int, optional): The desired width of the columns, or None to auto detect. Defaults to None. - padding (PaddingDimensions, optional): Optional padding around cells. Defaults to (0, 1). - expand (bool, optional): Expand columns to full width. Defaults to False. - equal (bool, optional): Arrange in to equal sized columns. Defaults to False. - column_first (bool, optional): Align items from top to bottom (rather than left to right). Defaults to False. - right_to_left (bool, optional): Start column from right hand side. Defaults to False. - align (str, optional): Align value ("left", "right", or "center") or None for default. Defaults to None. - title (TextType, optional): Optional title for Columns. - """ - - def __init__( - self, - renderables: Optional[Iterable[RenderableType]] = None, - padding: PaddingDimensions = (0, 1), - *, - width: Optional[int] = None, - expand: bool = False, - equal: bool = False, - column_first: bool = False, - right_to_left: bool = False, - align: Optional[AlignMethod] = None, - title: Optional[TextType] = None, - ) -> None: - self.renderables = list(renderables or []) - self.width = width - self.padding = padding - self.expand = expand - self.equal = equal - self.column_first = column_first - self.right_to_left = right_to_left - self.align: Optional[AlignMethod] = align - self.title = title - - def add_renderable(self, renderable: RenderableType) -> None: - """Add a renderable to the columns. - - Args: - renderable (RenderableType): Any renderable object. - """ - self.renderables.append(renderable) - - def __rich_console__( - self, console: Console, options: ConsoleOptions - ) -> RenderResult: - render_str = console.render_str - renderables = [ - render_str(renderable) if isinstance(renderable, str) else renderable - for renderable in self.renderables - ] - if not renderables: - return - _top, right, _bottom, left = Padding.unpack(self.padding) - width_padding = max(left, right) - max_width = options.max_width - widths: Dict[int, int] = defaultdict(int) - column_count = len(renderables) - - get_measurement = Measurement.get - renderable_widths = [ - get_measurement(console, options, renderable).maximum - for renderable in renderables - ] - if self.equal: - renderable_widths = [max(renderable_widths)] * len(renderable_widths) - - def iter_renderables( - column_count: int, - ) -> Iterable[Tuple[int, Optional[RenderableType]]]: - item_count = len(renderables) - if self.column_first: - width_renderables = list(zip(renderable_widths, renderables)) - - column_lengths: List[int] = [item_count // column_count] * column_count - for col_no in range(item_count % column_count): - column_lengths[col_no] += 1 - - row_count = (item_count + column_count - 1) // column_count - cells = [[-1] * column_count for _ in range(row_count)] - row = col = 0 - for index in range(item_count): - cells[row][col] = index - column_lengths[col] -= 1 - if column_lengths[col]: - row += 1 - else: - col += 1 - row = 0 - for index in chain.from_iterable(cells): - if index == -1: - break - yield width_renderables[index] - else: - yield from zip(renderable_widths, renderables) - # Pad odd elements with spaces - if item_count % column_count: - for _ in range(column_count - (item_count % column_count)): - yield 0, None - - table = Table.grid(padding=self.padding, collapse_padding=True, pad_edge=False) - table.expand = self.expand - table.title = self.title - - if self.width is not None: - column_count = (max_width) // (self.width + width_padding) - for _ in range(column_count): - table.add_column(width=self.width) - else: - while column_count > 1: - widths.clear() - column_no = 0 - for renderable_width, _ in iter_renderables(column_count): - widths[column_no] = max(widths[column_no], renderable_width) - total_width = sum(widths.values()) + width_padding * ( - len(widths) - 1 - ) - if total_width > max_width: - column_count = len(widths) - 1 - break - else: - column_no = (column_no + 1) % column_count - else: - break - - get_renderable = itemgetter(1) - _renderables = [ - get_renderable(_renderable) - for _renderable in iter_renderables(column_count) - ] - if self.equal: - _renderables = [ - None - if renderable is None - else Constrain(renderable, renderable_widths[0]) - for renderable in _renderables - ] - if self.align: - align = self.align - _Align = Align - _renderables = [ - None if renderable is None else _Align(renderable, align) - for renderable in _renderables - ] - - right_to_left = self.right_to_left - add_row = table.add_row - for start in range(0, len(_renderables), column_count): - row = _renderables[start : start + column_count] - if right_to_left: - row = row[::-1] - add_row(*row) - yield table - - -if __name__ == "__main__": # pragma: no cover - import os - - console = Console() - - files = [f"{i} {s}" for i, s in enumerate(sorted(os.listdir()))] - columns = Columns(files, padding=(0, 1), expand=False, equal=False) - console.print(columns) - console.rule() - columns.column_first = True - console.print(columns) - columns.right_to_left = True - console.rule() - console.print(columns) diff --git a/spaces/Boops88/gsdf-Counterfeit-V2.5/app.py b/spaces/Boops88/gsdf-Counterfeit-V2.5/app.py deleted file mode 100644 index 3e61c8452c0bc94ea6cf8e7fd4fab00c30fccba4..0000000000000000000000000000000000000000 --- a/spaces/Boops88/gsdf-Counterfeit-V2.5/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/gsdf/Counterfeit-V2.5").launch() \ No newline at end of file diff --git a/spaces/Brasd99/JustClothify/helpers/processor.py b/spaces/Brasd99/JustClothify/helpers/processor.py deleted file mode 100644 index b094dca3b63d98d9bd382f1c52162f51e9e427fc..0000000000000000000000000000000000000000 --- a/spaces/Brasd99/JustClothify/helpers/processor.py +++ /dev/null @@ -1,174 +0,0 @@ -import io -import cv2 -import imageio -import numpy as np -import torch -from typing import Dict, List -from fvcore.common.config import CfgNode -from detectron2.config import get_cfg -from detectron2.engine.defaults import DefaultPredictor -from detectron2.structures.instances import Instances -from densepose import add_densepose_config -from densepose.vis.base import CompoundVisualizer -from densepose.vis.densepose_outputs_vertex import get_texture_atlases -from densepose.vis.densepose_results_textures import DensePoseResultsVisualizerWithTexture as dp_iuv_texture -from densepose.vis.extractor import CompoundExtractor, create_extractor, DensePoseResultExtractor - -class TextureProcessor: - def __init__(self, config: str, weights: str) -> None: - self.config = self.get_config(config, weights) - self.predictor = DefaultPredictor(self.config) - self.extractor = DensePoseResultExtractor() - - def process_texture(self, image: np.ndarray) -> np.ndarray: - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - output = self.execute(image) - if 'pred_densepose' in output: - texture = self.create_iuv(output, image) - atlas_texture_bytes = io.BytesIO() - imageio.imwrite(atlas_texture_bytes, texture, format='PNG') - texture_atlas_array = np.frombuffer(atlas_texture_bytes.getvalue(), dtype=np.uint8) - texture_atlas = cv2.imdecode(texture_atlas_array, cv2.IMREAD_COLOR) - texture_atlas = cv2.cvtColor(texture_atlas, cv2.COLOR_BGR2RGB) - return texture_atlas - else: - raise Exception('Clothes not found') - - def extract(self, person_img, model_img): - texture_atlas = self.process_texture(model_img) - return self.overlay_texture(texture_atlas, person_img) - - def overlay_texture(self, texture_atlas: np.ndarray, original_image: np.ndarray) -> np.ndarray: - texture_atlas[:, :, :3] = texture_atlas[:, :, 2::-1] - texture_atlases_dict = get_texture_atlases(None) - vis = dp_iuv_texture( - cfg=self.config, - texture_atlas=texture_atlas, - texture_atlases_dict=texture_atlases_dict - ) - - extractor = create_extractor(vis) - - visualizer = CompoundVisualizer([vis]) - extractor = CompoundExtractor([extractor]) - - with torch.no_grad(): - outputs = self.predictor(original_image)['instances'] - - image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY) - image = np.tile(image[:, :, np.newaxis], [1, 1, 3]) - data = extractor(outputs) - image_vis = visualizer.visualize(image, data) - - return image_vis - - def parse_iuv(self, result: Dict) -> np.ndarray: - i = result['pred_densepose'][0].labels.cpu().numpy().astype(float) - uv = (result['pred_densepose'][0].uv.cpu().numpy() * 255.0).astype(float) - iuv = np.stack((uv[1, :, :], uv[0, :, :], i)) - iuv = np.transpose(iuv, (1, 2, 0)) - return iuv - - def parse_bbox(self, result: Dict) -> np.ndarray: - return result['pred_boxes_XYXY'][0].cpu().numpy() - - def interpolate_tex(self, tex: np.ndarray) -> np.ndarray: - valid_mask = np.array((tex.sum(0) != 0) * 1, dtype='uint8') - radius_increase = 10 - kernel = np.ones((radius_increase, radius_increase), np.uint8) - dilated_mask = cv2.dilate(valid_mask, kernel, iterations=1) - - invalid_region = 1 - valid_mask - actual_part_max = tex.max() - actual_part_min = tex.min() - actual_part_uint = np.array((tex - actual_part_min) / (actual_part_max - actual_part_min) * 255, dtype='uint8') - - actual_part_uint = cv2.inpaint(actual_part_uint.transpose((1, 2, 0)), invalid_region, 1, cv2.INPAINT_TELEA).transpose((2, 0, 1)) - - actual_part = (actual_part_uint / 255.0) * (actual_part_max - actual_part_min) + actual_part_min - actual_part = actual_part * dilated_mask - - return actual_part - - def concat_textures(self, array: List[np.ndarray]) -> np.ndarray: - texture_rows = [np.concatenate(array[i:i+6], axis=1) for i in range(0, 24, 6)] - texture = np.concatenate(texture_rows, axis=0) - return texture - - def get_texture( - self, - im: np.ndarray, - iuv: np.ndarray, - bbox: List[int], - tex_part_size: int = 200) -> np.ndarray: - - im = im.transpose(2, 1, 0) / 255 - image_w, image_h = im.shape[1], im.shape[2] - bbox[2] = bbox[2] - bbox[0] - bbox[3] = bbox[3] - bbox[1] - x, y, w, h = [int(v) for v in bbox] - bg = np.zeros((image_h, image_w, 3)) - bg[y:y + h, x:x + w, :] = iuv - iuv = bg - iuv = iuv.transpose((2, 1, 0)) - i, u, v = iuv[2], iuv[1], iuv[0] - - n_parts = 22 - texture = np.zeros((n_parts, 3, tex_part_size, tex_part_size)) - - for part_id in range(1, n_parts + 1): - generated = np.zeros((3, tex_part_size, tex_part_size)) - - x, y = u[i == part_id], v[i == part_id] - - tex_u_coo = (x * (tex_part_size - 1) / 255).astype(int) - tex_v_coo = (y * (tex_part_size - 1) / 255).astype(int) - - tex_u_coo = np.clip(tex_u_coo, 0, tex_part_size - 1) - tex_v_coo = np.clip(tex_v_coo, 0, tex_part_size - 1) - - for channel in range(3): - generated[channel][tex_v_coo, tex_u_coo] = im[channel][i == part_id] - - if np.sum(generated) > 0: - generated = self.interpolate_tex(generated) - - texture[part_id - 1] = generated[:, ::-1, :] - - tex_concat = np.zeros((24, tex_part_size, tex_part_size, 3)) - for i in range(texture.shape[0]): - tex_concat[i] = texture[i].transpose(2, 1, 0) - tex = self.concat_textures(tex_concat) - - return tex - - def create_iuv(self, results: Dict, image: np.ndarray) -> np.ndarray: - iuv = self.parse_iuv(results) - bbox = self.parse_bbox(results) - uv_texture = self.get_texture(image, iuv, bbox) - uv_texture = uv_texture.transpose([1, 0, 2]) - return uv_texture - - def get_config(self, config_fpath: str, model_fpath: str) -> CfgNode: - cfg = get_cfg() - add_densepose_config(cfg) - cfg.merge_from_file(config_fpath) - cfg.MODEL.WEIGHTS = model_fpath - cfg.MODEL.DEVICE = 'cpu' - cfg.freeze() - return cfg - - def execute(self, image: np.ndarray) -> Dict: - with torch.no_grad(): - outputs = self.predictor(image)['instances'] - return self.execute_on_outputs(outputs) - - def execute_on_outputs(self, outputs: Instances) -> Dict: - result = {} - if outputs.has('scores'): - result['scores'] = outputs.get('scores').cpu() - if outputs.has('pred_boxes'): - result['pred_boxes_XYXY'] = outputs.get('pred_boxes').tensor.cpu() - if outputs.has('pred_densepose'): - result['pred_densepose'] = self.extractor(outputs)[0] - return result \ No newline at end of file diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/replace.h b/spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/replace.h deleted file mode 100644 index c6ae90664ad9538e73febfde86c334011de417c8..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/replace.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// this system has no special version of this algorithm - diff --git a/spaces/CVPR/WALT/mmdet/models/dense_heads/__init__.py b/spaces/CVPR/WALT/mmdet/models/dense_heads/__init__.py deleted file mode 100644 index f004dd95d97df16167f932587b3ce73b05b04a37..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/models/dense_heads/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -from .anchor_free_head import AnchorFreeHead -from .anchor_head import AnchorHead -from .atss_head import ATSSHead -from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead -from .centripetal_head import CentripetalHead -from .corner_head import CornerHead -from .embedding_rpn_head import EmbeddingRPNHead -from .fcos_head import FCOSHead -from .fovea_head import FoveaHead -from .free_anchor_retina_head import FreeAnchorRetinaHead -from .fsaf_head import FSAFHead -from .ga_retina_head import GARetinaHead -from .ga_rpn_head import GARPNHead -from .gfl_head import GFLHead -from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead -from .ld_head import LDHead -from .nasfcos_head import NASFCOSHead -from .paa_head import PAAHead -from .pisa_retinanet_head import PISARetinaHead -from .pisa_ssd_head import PISASSDHead -from .reppoints_head import RepPointsHead -from .retina_head import RetinaHead -from .retina_sepbn_head import RetinaSepBNHead -from .rpn_head import RPNHead -from .sabl_retina_head import SABLRetinaHead -from .ssd_head import SSDHead -from .transformer_head import TransformerHead -from .vfnet_head import VFNetHead -from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead -from .yolo_head import YOLOV3Head - -__all__ = [ - 'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption', - 'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead', - 'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead', - 'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead', - 'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead', - 'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', - 'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'TransformerHead', - 'StageCascadeRPNHead', 'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead' -] diff --git a/spaces/CVPR/WALT/mmdet/models/necks/fpn.py b/spaces/CVPR/WALT/mmdet/models/necks/fpn.py deleted file mode 100644 index 5e5dfe685964f06e7a66b63a13e66162e63fcafd..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/models/necks/fpn.py +++ /dev/null @@ -1,221 +0,0 @@ -import warnings - -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule, xavier_init -from mmcv.runner import auto_fp16 - -from ..builder import NECKS - - -@NECKS.register_module() -class FPN(nn.Module): - r"""Feature Pyramid Network. - - This is an implementation of paper `Feature Pyramid Networks for Object - Detection `_. - - Args: - in_channels (List[int]): Number of input channels per scale. - out_channels (int): Number of output channels (used at each scale) - num_outs (int): Number of output scales. - start_level (int): Index of the start input backbone level used to - build the feature pyramid. Default: 0. - end_level (int): Index of the end input backbone level (exclusive) to - build the feature pyramid. Default: -1, which means the last level. - add_extra_convs (bool | str): If bool, it decides whether to add conv - layers on top of the original feature maps. Default to False. - If True, its actual mode is specified by `extra_convs_on_inputs`. - If str, it specifies the source feature map of the extra convs. - Only the following options are allowed - - - 'on_input': Last feat map of neck inputs (i.e. backbone feature). - - 'on_lateral': Last feature map after lateral convs. - - 'on_output': The last output feature map after fpn convs. - extra_convs_on_inputs (bool, deprecated): Whether to apply extra convs - on the original feature from the backbone. If True, - it is equivalent to `add_extra_convs='on_input'`. If False, it is - equivalent to set `add_extra_convs='on_output'`. Default to True. - relu_before_extra_convs (bool): Whether to apply relu before the extra - conv. Default: False. - no_norm_on_lateral (bool): Whether to apply norm on lateral. - Default: False. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Config dict for normalization layer. Default: None. - act_cfg (str): Config dict for activation layer in ConvModule. - Default: None. - upsample_cfg (dict): Config dict for interpolate layer. - Default: `dict(mode='nearest')` - - Example: - >>> import torch - >>> in_channels = [2, 3, 5, 7] - >>> scales = [340, 170, 84, 43] - >>> inputs = [torch.rand(1, c, s, s) - ... for c, s in zip(in_channels, scales)] - >>> self = FPN(in_channels, 11, len(in_channels)).eval() - >>> outputs = self.forward(inputs) - >>> for i in range(len(outputs)): - ... print(f'outputs[{i}].shape = {outputs[i].shape}') - outputs[0].shape = torch.Size([1, 11, 340, 340]) - outputs[1].shape = torch.Size([1, 11, 170, 170]) - outputs[2].shape = torch.Size([1, 11, 84, 84]) - outputs[3].shape = torch.Size([1, 11, 43, 43]) - """ - - def __init__(self, - in_channels, - out_channels, - num_outs, - start_level=0, - end_level=-1, - add_extra_convs=False, - extra_convs_on_inputs=True, - relu_before_extra_convs=False, - no_norm_on_lateral=False, - conv_cfg=None, - norm_cfg=None, - act_cfg=None, - upsample_cfg=dict(mode='nearest')): - super(FPN, self).__init__() - assert isinstance(in_channels, list) - self.in_channels = in_channels - self.out_channels = out_channels - self.num_ins = len(in_channels) - self.num_outs = num_outs - self.relu_before_extra_convs = relu_before_extra_convs - self.no_norm_on_lateral = no_norm_on_lateral - self.fp16_enabled = False - self.upsample_cfg = upsample_cfg.copy() - - if end_level == -1: - self.backbone_end_level = self.num_ins - assert num_outs >= self.num_ins - start_level - else: - # if end_level < inputs, no extra level is allowed - self.backbone_end_level = end_level - assert end_level <= len(in_channels) - assert num_outs == end_level - start_level - self.start_level = start_level - self.end_level = end_level - self.add_extra_convs = add_extra_convs - assert isinstance(add_extra_convs, (str, bool)) - if isinstance(add_extra_convs, str): - # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' - assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') - elif add_extra_convs: # True - if extra_convs_on_inputs: - # TODO: deprecate `extra_convs_on_inputs` - warnings.simplefilter('once') - warnings.warn( - '"extra_convs_on_inputs" will be deprecated in v2.9.0,' - 'Please use "add_extra_convs"', DeprecationWarning) - self.add_extra_convs = 'on_input' - else: - self.add_extra_convs = 'on_output' - - self.lateral_convs = nn.ModuleList() - self.fpn_convs = nn.ModuleList() - - for i in range(self.start_level, self.backbone_end_level): - l_conv = ConvModule( - in_channels[i], - out_channels, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, - act_cfg=act_cfg, - inplace=False) - fpn_conv = ConvModule( - out_channels, - out_channels, - 3, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - inplace=False) - - self.lateral_convs.append(l_conv) - self.fpn_convs.append(fpn_conv) - - # add extra conv layers (e.g., RetinaNet) - extra_levels = num_outs - self.backbone_end_level + self.start_level - if self.add_extra_convs and extra_levels >= 1: - for i in range(extra_levels): - if i == 0 and self.add_extra_convs == 'on_input': - in_channels = self.in_channels[self.backbone_end_level - 1] - else: - in_channels = out_channels - extra_fpn_conv = ConvModule( - in_channels, - out_channels, - 3, - stride=2, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - inplace=False) - self.fpn_convs.append(extra_fpn_conv) - - # default init_weights for conv(msra) and norm in ConvModule - def init_weights(self): - """Initialize the weights of FPN module.""" - for m in self.modules(): - if isinstance(m, nn.Conv2d): - xavier_init(m, distribution='uniform') - - @auto_fp16() - def forward(self, inputs): - """Forward function.""" - assert len(inputs) == len(self.in_channels) - - # build laterals - laterals = [ - lateral_conv(inputs[i + self.start_level]) - for i, lateral_conv in enumerate(self.lateral_convs) - ] - - # build top-down path - used_backbone_levels = len(laterals) - for i in range(used_backbone_levels - 1, 0, -1): - # In some cases, fixing `scale factor` (e.g. 2) is preferred, but - # it cannot co-exist with `size` in `F.interpolate`. - if 'scale_factor' in self.upsample_cfg: - laterals[i - 1] += F.interpolate(laterals[i], - **self.upsample_cfg) - else: - prev_shape = laterals[i - 1].shape[2:] - laterals[i - 1] += F.interpolate( - laterals[i], size=prev_shape, **self.upsample_cfg) - - # build outputs - # part 1: from original levels - outs = [ - self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) - ] - # part 2: add extra levels - if self.num_outs > len(outs): - # use max pool to get more levels on top of outputs - # (e.g., Faster R-CNN, Mask R-CNN) - if not self.add_extra_convs: - for i in range(self.num_outs - used_backbone_levels): - outs.append(F.max_pool2d(outs[-1], 1, stride=2)) - # add conv layers on top of original feature maps (RetinaNet) - else: - if self.add_extra_convs == 'on_input': - extra_source = inputs[self.backbone_end_level - 1] - elif self.add_extra_convs == 'on_lateral': - extra_source = laterals[-1] - elif self.add_extra_convs == 'on_output': - extra_source = outs[-1] - else: - raise NotImplementedError - outs.append(self.fpn_convs[used_backbone_levels](extra_source)) - for i in range(used_backbone_levels + 1, self.num_outs): - if self.relu_before_extra_convs: - outs.append(self.fpn_convs[i](F.relu(outs[-1]))) - else: - outs.append(self.fpn_convs[i](outs[-1])) - return tuple(outs) diff --git a/spaces/CVPR/lama-example/bin/sample_from_dataset.py b/spaces/CVPR/lama-example/bin/sample_from_dataset.py deleted file mode 100644 index 31593b3212454dd0b6f74a39195a34b489df20a1..0000000000000000000000000000000000000000 --- a/spaces/CVPR/lama-example/bin/sample_from_dataset.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python3 - -import os - -import numpy as np -import tqdm -from skimage import io -from skimage.segmentation import mark_boundaries - -from saicinpainting.evaluation.data import InpaintingDataset -from saicinpainting.evaluation.vis import save_item_for_vis - -def save_mask_for_sidebyside(item, out_file): - mask = item['mask']# > 0.5 - if mask.ndim == 3: - mask = mask[0] - mask = np.clip(mask * 255, 0, 255).astype('uint8') - io.imsave(out_file, mask) - -def save_img_for_sidebyside(item, out_file): - img = np.transpose(item['image'], (1, 2, 0)) - img = np.clip(img * 255, 0, 255).astype('uint8') - io.imsave(out_file, img) - -def save_masked_img_for_sidebyside(item, out_file): - mask = item['mask'] - img = item['image'] - - img = (1-mask) * img + mask - img = np.transpose(img, (1, 2, 0)) - - img = np.clip(img * 255, 0, 255).astype('uint8') - io.imsave(out_file, img) - -def main(args): - dataset = InpaintingDataset(args.datadir, img_suffix='.png') - - area_bins = np.linspace(0, 1, args.area_bins + 1) - - heights = [] - widths = [] - image_areas = [] - hole_areas = [] - hole_area_percents = [] - area_bins_count = np.zeros(args.area_bins) - area_bin_titles = [f'{area_bins[i] * 100:.0f}-{area_bins[i + 1] * 100:.0f}' for i in range(args.area_bins)] - - bin2i = [[] for _ in range(args.area_bins)] - - for i, item in enumerate(tqdm.tqdm(dataset)): - h, w = item['image'].shape[1:] - heights.append(h) - widths.append(w) - full_area = h * w - image_areas.append(full_area) - hole_area = (item['mask'] == 1).sum() - hole_areas.append(hole_area) - hole_percent = hole_area / full_area - hole_area_percents.append(hole_percent) - bin_i = np.clip(np.searchsorted(area_bins, hole_percent) - 1, 0, len(area_bins_count) - 1) - area_bins_count[bin_i] += 1 - bin2i[bin_i].append(i) - - os.makedirs(args.outdir, exist_ok=True) - - for bin_i in range(args.area_bins): - bindir = os.path.join(args.outdir, area_bin_titles[bin_i]) - os.makedirs(bindir, exist_ok=True) - bin_idx = bin2i[bin_i] - for sample_i in np.random.choice(bin_idx, size=min(len(bin_idx), args.samples_n), replace=False): - item = dataset[sample_i] - path = os.path.join(bindir, dataset.img_filenames[sample_i].split('/')[-1]) - save_masked_img_for_sidebyside(item, path) - - -if __name__ == '__main__': - import argparse - - aparser = argparse.ArgumentParser() - aparser.add_argument('--datadir', type=str, - help='Path to folder with images and masks (output of gen_mask_dataset.py)') - aparser.add_argument('--outdir', type=str, help='Where to put results') - aparser.add_argument('--samples-n', type=int, default=10, - help='Number of sample images with masks to copy for visualization for each area bin') - aparser.add_argument('--area-bins', type=int, default=10, help='How many area bins to have') - - main(aparser.parse_args()) diff --git a/spaces/CVPR/lama-example/saicinpainting/training/trainers/base.py b/spaces/CVPR/lama-example/saicinpainting/training/trainers/base.py deleted file mode 100644 index f1b1c66fc96e7edfba7b1ee193272f92b5db7438..0000000000000000000000000000000000000000 --- a/spaces/CVPR/lama-example/saicinpainting/training/trainers/base.py +++ /dev/null @@ -1,291 +0,0 @@ -import copy -import logging -from typing import Dict, Tuple - -import pandas as pd -import pytorch_lightning as ptl -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.utils.data import DistributedSampler - -from saicinpainting.evaluation import make_evaluator -from saicinpainting.training.data.datasets import make_default_train_dataloader, make_default_val_dataloader -from saicinpainting.training.losses.adversarial import make_discrim_loss -from saicinpainting.training.losses.perceptual import PerceptualLoss, ResNetPL -from saicinpainting.training.modules import make_generator, make_discriminator -from saicinpainting.training.visualizers import make_visualizer -from saicinpainting.utils import add_prefix_to_keys, average_dicts, set_requires_grad, flatten_dict, \ - get_has_ddp_rank - -LOGGER = logging.getLogger(__name__) - - -def make_optimizer(parameters, kind='adamw', **kwargs): - if kind == 'adam': - optimizer_class = torch.optim.Adam - elif kind == 'adamw': - optimizer_class = torch.optim.AdamW - else: - raise ValueError(f'Unknown optimizer kind {kind}') - return optimizer_class(parameters, **kwargs) - - -def update_running_average(result: nn.Module, new_iterate_model: nn.Module, decay=0.999): - with torch.no_grad(): - res_params = dict(result.named_parameters()) - new_params = dict(new_iterate_model.named_parameters()) - - for k in res_params.keys(): - res_params[k].data.mul_(decay).add_(new_params[k].data, alpha=1 - decay) - - -def make_multiscale_noise(base_tensor, scales=6, scale_mode='bilinear'): - batch_size, _, height, width = base_tensor.shape - cur_height, cur_width = height, width - result = [] - align_corners = False if scale_mode in ('bilinear', 'bicubic') else None - for _ in range(scales): - cur_sample = torch.randn(batch_size, 1, cur_height, cur_width, device=base_tensor.device) - cur_sample_scaled = F.interpolate(cur_sample, size=(height, width), mode=scale_mode, align_corners=align_corners) - result.append(cur_sample_scaled) - cur_height //= 2 - cur_width //= 2 - return torch.cat(result, dim=1) - - -class BaseInpaintingTrainingModule(ptl.LightningModule): - def __init__(self, config, use_ddp, *args, predict_only=False, visualize_each_iters=100, - average_generator=False, generator_avg_beta=0.999, average_generator_start_step=30000, - average_generator_period=10, store_discr_outputs_for_vis=False, - **kwargs): - super().__init__(*args, **kwargs) - LOGGER.info('BaseInpaintingTrainingModule init called') - - self.config = config - - self.generator = make_generator(config, **self.config.generator) - self.use_ddp = use_ddp - - if not get_has_ddp_rank(): - LOGGER.info(f'Generator\n{self.generator}') - - if not predict_only: - self.save_hyperparameters(self.config) - self.discriminator = make_discriminator(**self.config.discriminator) - self.adversarial_loss = make_discrim_loss(**self.config.losses.adversarial) - self.visualizer = make_visualizer(**self.config.visualizer) - self.val_evaluator = make_evaluator(**self.config.evaluator) - self.test_evaluator = make_evaluator(**self.config.evaluator) - - if not get_has_ddp_rank(): - LOGGER.info(f'Discriminator\n{self.discriminator}') - - extra_val = self.config.data.get('extra_val', ()) - if extra_val: - self.extra_val_titles = list(extra_val) - self.extra_evaluators = nn.ModuleDict({k: make_evaluator(**self.config.evaluator) - for k in extra_val}) - else: - self.extra_evaluators = {} - - self.average_generator = average_generator - self.generator_avg_beta = generator_avg_beta - self.average_generator_start_step = average_generator_start_step - self.average_generator_period = average_generator_period - self.generator_average = None - self.last_generator_averaging_step = -1 - self.store_discr_outputs_for_vis = store_discr_outputs_for_vis - - if self.config.losses.get("l1", {"weight_known": 0})['weight_known'] > 0: - self.loss_l1 = nn.L1Loss(reduction='none') - - if self.config.losses.get("mse", {"weight": 0})['weight'] > 0: - self.loss_mse = nn.MSELoss(reduction='none') - - if self.config.losses.perceptual.weight > 0: - self.loss_pl = PerceptualLoss() - - if self.config.losses.get("resnet_pl", {"weight": 0})['weight'] > 0: - self.loss_resnet_pl = ResNetPL(**self.config.losses.resnet_pl) - else: - self.loss_resnet_pl = None - - self.visualize_each_iters = visualize_each_iters - LOGGER.info('BaseInpaintingTrainingModule init done') - - def configure_optimizers(self): - discriminator_params = list(self.discriminator.parameters()) - return [ - dict(optimizer=make_optimizer(self.generator.parameters(), **self.config.optimizers.generator)), - dict(optimizer=make_optimizer(discriminator_params, **self.config.optimizers.discriminator)), - ] - - def train_dataloader(self): - kwargs = dict(self.config.data.train) - if self.use_ddp: - kwargs['ddp_kwargs'] = dict(num_replicas=self.trainer.num_nodes * self.trainer.num_processes, - rank=self.trainer.global_rank, - shuffle=True) - dataloader = make_default_train_dataloader(**self.config.data.train) - return dataloader - - def val_dataloader(self): - res = [make_default_val_dataloader(**self.config.data.val)] - - if self.config.data.visual_test is not None: - res = res + [make_default_val_dataloader(**self.config.data.visual_test)] - else: - res = res + res - - extra_val = self.config.data.get('extra_val', ()) - if extra_val: - res += [make_default_val_dataloader(**extra_val[k]) for k in self.extra_val_titles] - - return res - - def training_step(self, batch, batch_idx, optimizer_idx=None): - self._is_training_step = True - return self._do_step(batch, batch_idx, mode='train', optimizer_idx=optimizer_idx) - - def validation_step(self, batch, batch_idx, dataloader_idx): - extra_val_key = None - if dataloader_idx == 0: - mode = 'val' - elif dataloader_idx == 1: - mode = 'test' - else: - mode = 'extra_val' - extra_val_key = self.extra_val_titles[dataloader_idx - 2] - self._is_training_step = False - return self._do_step(batch, batch_idx, mode=mode, extra_val_key=extra_val_key) - - def training_step_end(self, batch_parts_outputs): - if self.training and self.average_generator \ - and self.global_step >= self.average_generator_start_step \ - and self.global_step >= self.last_generator_averaging_step + self.average_generator_period: - if self.generator_average is None: - self.generator_average = copy.deepcopy(self.generator) - else: - update_running_average(self.generator_average, self.generator, decay=self.generator_avg_beta) - self.last_generator_averaging_step = self.global_step - - full_loss = (batch_parts_outputs['loss'].mean() - if torch.is_tensor(batch_parts_outputs['loss']) # loss is not tensor when no discriminator used - else torch.tensor(batch_parts_outputs['loss']).float().requires_grad_(True)) - log_info = {k: v.mean() for k, v in batch_parts_outputs['log_info'].items()} - self.log_dict(log_info, on_step=True, on_epoch=False) - return full_loss - - def validation_epoch_end(self, outputs): - outputs = [step_out for out_group in outputs for step_out in out_group] - averaged_logs = average_dicts(step_out['log_info'] for step_out in outputs) - self.log_dict({k: v.mean() for k, v in averaged_logs.items()}) - - pd.set_option('display.max_columns', 500) - pd.set_option('display.width', 1000) - - # standard validation - val_evaluator_states = [s['val_evaluator_state'] for s in outputs if 'val_evaluator_state' in s] - val_evaluator_res = self.val_evaluator.evaluation_end(states=val_evaluator_states) - val_evaluator_res_df = pd.DataFrame(val_evaluator_res).stack(1).unstack(0) - val_evaluator_res_df.dropna(axis=1, how='all', inplace=True) - LOGGER.info(f'Validation metrics after epoch #{self.current_epoch}, ' - f'total {self.global_step} iterations:\n{val_evaluator_res_df}') - - for k, v in flatten_dict(val_evaluator_res).items(): - self.log(f'val_{k}', v) - - # standard visual test - test_evaluator_states = [s['test_evaluator_state'] for s in outputs - if 'test_evaluator_state' in s] - test_evaluator_res = self.test_evaluator.evaluation_end(states=test_evaluator_states) - test_evaluator_res_df = pd.DataFrame(test_evaluator_res).stack(1).unstack(0) - test_evaluator_res_df.dropna(axis=1, how='all', inplace=True) - LOGGER.info(f'Test metrics after epoch #{self.current_epoch}, ' - f'total {self.global_step} iterations:\n{test_evaluator_res_df}') - - for k, v in flatten_dict(test_evaluator_res).items(): - self.log(f'test_{k}', v) - - # extra validations - if self.extra_evaluators: - for cur_eval_title, cur_evaluator in self.extra_evaluators.items(): - cur_state_key = f'extra_val_{cur_eval_title}_evaluator_state' - cur_states = [s[cur_state_key] for s in outputs if cur_state_key in s] - cur_evaluator_res = cur_evaluator.evaluation_end(states=cur_states) - cur_evaluator_res_df = pd.DataFrame(cur_evaluator_res).stack(1).unstack(0) - cur_evaluator_res_df.dropna(axis=1, how='all', inplace=True) - LOGGER.info(f'Extra val {cur_eval_title} metrics after epoch #{self.current_epoch}, ' - f'total {self.global_step} iterations:\n{cur_evaluator_res_df}') - for k, v in flatten_dict(cur_evaluator_res).items(): - self.log(f'extra_val_{cur_eval_title}_{k}', v) - - def _do_step(self, batch, batch_idx, mode='train', optimizer_idx=None, extra_val_key=None): - if optimizer_idx == 0: # step for generator - set_requires_grad(self.generator, True) - set_requires_grad(self.discriminator, False) - elif optimizer_idx == 1: # step for discriminator - set_requires_grad(self.generator, False) - set_requires_grad(self.discriminator, True) - - batch = self(batch) - - total_loss = 0 - metrics = {} - - if optimizer_idx is None or optimizer_idx == 0: # step for generator - total_loss, metrics = self.generator_loss(batch) - - elif optimizer_idx is None or optimizer_idx == 1: # step for discriminator - if self.config.losses.adversarial.weight > 0: - total_loss, metrics = self.discriminator_loss(batch) - - if self.get_ddp_rank() in (None, 0) and (batch_idx % self.visualize_each_iters == 0 or mode == 'test'): - if self.config.losses.adversarial.weight > 0: - if self.store_discr_outputs_for_vis: - with torch.no_grad(): - self.store_discr_outputs(batch) - vis_suffix = f'_{mode}' - if mode == 'extra_val': - vis_suffix += f'_{extra_val_key}' - self.visualizer(self.current_epoch, batch_idx, batch, suffix=vis_suffix) - - metrics_prefix = f'{mode}_' - if mode == 'extra_val': - metrics_prefix += f'{extra_val_key}_' - result = dict(loss=total_loss, log_info=add_prefix_to_keys(metrics, metrics_prefix)) - if mode == 'val': - result['val_evaluator_state'] = self.val_evaluator.process_batch(batch) - elif mode == 'test': - result['test_evaluator_state'] = self.test_evaluator.process_batch(batch) - elif mode == 'extra_val': - result[f'extra_val_{extra_val_key}_evaluator_state'] = self.extra_evaluators[extra_val_key].process_batch(batch) - - return result - - def get_current_generator(self, no_average=False): - if not no_average and not self.training and self.average_generator and self.generator_average is not None: - return self.generator_average - return self.generator - - def forward(self, batch: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: - """Pass data through generator and obtain at leas 'predicted_image' and 'inpainted' keys""" - raise NotImplementedError() - - def generator_loss(self, batch) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: - raise NotImplementedError() - - def discriminator_loss(self, batch) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: - raise NotImplementedError() - - def store_discr_outputs(self, batch): - out_size = batch['image'].shape[2:] - discr_real_out, _ = self.discriminator(batch['image']) - discr_fake_out, _ = self.discriminator(batch['predicted_image']) - batch['discr_output_real'] = F.interpolate(discr_real_out, size=out_size, mode='nearest') - batch['discr_output_fake'] = F.interpolate(discr_fake_out, size=out_size, mode='nearest') - batch['discr_output_diff'] = batch['discr_output_real'] - batch['discr_output_fake'] - - def get_ddp_rank(self): - return self.trainer.global_rank if (self.trainer.num_nodes * self.trainer.num_processes) > 1 else None diff --git a/spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/utils/onnx.py b/spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/utils/onnx.py deleted file mode 100644 index 4297b31291e036700d6ad0b818afb7dd72da3054..0000000000000000000000000000000000000000 --- a/spaces/Caoyunkang/Segment-Any-Anomaly/SAM/segment_anything/utils/onnx.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn as nn -from torch.nn import functional as F - -from typing import Tuple - -from ..modeling import Sam -from .amg import calculate_stability_score - - -class SamOnnxModel(nn.Module): - """ - This model should not be called directly, but is used in ONNX export. - It combines the prompt encoder, mask decoder, and mask postprocessing of Sam, - with some functions modified to enable model tracing. Also supports extra - options controlling what information. See the ONNX export script for details. - """ - - def __init__( - self, - model: Sam, - return_single_mask: bool, - use_stability_score: bool = False, - return_extra_metrics: bool = False, - ) -> None: - super().__init__() - self.mask_decoder = model.mask_decoder - self.model = model - self.img_size = model.image_encoder.img_size - self.return_single_mask = return_single_mask - self.use_stability_score = use_stability_score - self.stability_score_offset = 1.0 - self.return_extra_metrics = return_extra_metrics - - @staticmethod - def resize_longest_image_size( - input_image_size: torch.Tensor, longest_side: int - ) -> torch.Tensor: - input_image_size = input_image_size.to(torch.float32) - scale = longest_side / torch.max(input_image_size) - transformed_size = scale * input_image_size - transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64) - return transformed_size - - def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor: - point_coords = point_coords + 0.5 - point_coords = point_coords / self.img_size - point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords) - point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding) - - point_embedding = point_embedding * (point_labels != -1) - point_embedding = point_embedding + self.model.prompt_encoder.not_a_point_embed.weight * ( - point_labels == -1 - ) - - for i in range(self.model.prompt_encoder.num_point_embeddings): - point_embedding = point_embedding + self.model.prompt_encoder.point_embeddings[ - i - ].weight * (point_labels == i) - - return point_embedding - - def _embed_masks(self, input_mask: torch.Tensor, has_mask_input: torch.Tensor) -> torch.Tensor: - mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(input_mask) - mask_embedding = mask_embedding + ( - 1 - has_mask_input - ) * self.model.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1) - return mask_embedding - - def mask_postprocessing(self, masks: torch.Tensor, orig_im_size: torch.Tensor) -> torch.Tensor: - masks = F.interpolate( - masks, - size=(self.img_size, self.img_size), - mode="bilinear", - align_corners=False, - ) - - prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size) - masks = masks[..., : int(prepadded_size[0]), : int(prepadded_size[1])] - - orig_im_size = orig_im_size.to(torch.int64) - h, w = orig_im_size[0], orig_im_size[1] - masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False) - return masks - - def select_masks( - self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int - ) -> Tuple[torch.Tensor, torch.Tensor]: - # Determine if we should return the multiclick mask or not from the number of points. - # The reweighting is used to avoid control flow. - score_reweight = torch.tensor( - [[1000] + [0] * (self.model.mask_decoder.num_mask_tokens - 1)] - ).to(iou_preds.device) - score = iou_preds + (num_points - 2.5) * score_reweight - best_idx = torch.argmax(score, dim=1) - masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1) - iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1) - - return masks, iou_preds - - @torch.no_grad() - def forward( - self, - image_embeddings: torch.Tensor, - point_coords: torch.Tensor, - point_labels: torch.Tensor, - mask_input: torch.Tensor, - has_mask_input: torch.Tensor, - orig_im_size: torch.Tensor, - ): - sparse_embedding = self._embed_points(point_coords, point_labels) - dense_embedding = self._embed_masks(mask_input, has_mask_input) - - masks, scores = self.model.mask_decoder.predict_masks( - image_embeddings=image_embeddings, - image_pe=self.model.prompt_encoder.get_dense_pe(), - sparse_prompt_embeddings=sparse_embedding, - dense_prompt_embeddings=dense_embedding, - ) - - if self.use_stability_score: - scores = calculate_stability_score( - masks, self.model.mask_threshold, self.stability_score_offset - ) - - if self.return_single_mask: - masks, scores = self.select_masks(masks, scores, point_coords.shape[1]) - - upscaled_masks = self.mask_postprocessing(masks, orig_im_size) - - if self.return_extra_metrics: - stability_scores = calculate_stability_score( - upscaled_masks, self.model.mask_threshold, self.stability_score_offset - ) - areas = (upscaled_masks > self.model.mask_threshold).sum(-1).sum(-1) - return upscaled_masks, scores, stability_scores, areas, masks - - return upscaled_masks, scores, masks diff --git a/spaces/Chukwuka/Dog_Breed_ImageWoof/README.md b/spaces/Chukwuka/Dog_Breed_ImageWoof/README.md deleted file mode 100644 index 9ae3144729664b114e47e78e37b6bc9ebb6b74af..0000000000000000000000000000000000000000 --- a/spaces/Chukwuka/Dog_Breed_ImageWoof/README.md +++ /dev/null @@ -1,400 +0,0 @@ ---- -title: Dog Breed ImageWoof -emoji: ⚡ -colorFrom: blue -colorTo: indigo -sdk: gradio -sdk_version: 3.17.0 -app_file: app.py -pinned: false -license: mit ---- -# ImageWoof Classification -![](https://miro.medium.com/max/2240/1*e84otk0ul3xS_65l94qWeA.png) - -Click to visit the Github Repo -## Problem Statement And Description -A subset of 10 harder to classify classes from Imagenet (all dog breeds): Australian terrier, Border terrier, Samoyed, beagle, Shih-Tzu, English foxhound, Rhodesian ridgeback, dingo, golden retriever, Old English sheepdog. -An EfficientNetB2 feature extractor computer vision model to classify images of Dog breeds was created. -summary(eff_b2, (3,224,224),device='cpu') -
    -----------------------------------------------------------------
    -        Layer (type)               Output Shape         Param #
    -================================================================
    -            Conv2d-1         [-1, 32, 112, 112]             864
    -       BatchNorm2d-2         [-1, 32, 112, 112]              64
    -              SiLU-3         [-1, 32, 112, 112]               0
    -            Conv2d-4         [-1, 32, 112, 112]             288
    -       BatchNorm2d-5         [-1, 32, 112, 112]              64
    -              SiLU-6         [-1, 32, 112, 112]               0
    - AdaptiveAvgPool2d-7             [-1, 32, 1, 1]               0
    -            Conv2d-8              [-1, 8, 1, 1]             264
    -              SiLU-9              [-1, 8, 1, 1]               0
    -           Conv2d-10             [-1, 32, 1, 1]             288
    -          Sigmoid-11             [-1, 32, 1, 1]               0
    -SqueezeExcitation-12         [-1, 32, 112, 112]               0
    -           Conv2d-13         [-1, 16, 112, 112]             512
    -      BatchNorm2d-14         [-1, 16, 112, 112]              32
    -           MBConv-15         [-1, 16, 112, 112]               0
    -           Conv2d-16         [-1, 16, 112, 112]             144
    -      BatchNorm2d-17         [-1, 16, 112, 112]              32
    -             SiLU-18         [-1, 16, 112, 112]               0
    -AdaptiveAvgPool2d-19             [-1, 16, 1, 1]               0
    -           Conv2d-20              [-1, 4, 1, 1]              68
    -             SiLU-21              [-1, 4, 1, 1]               0
    -           Conv2d-22             [-1, 16, 1, 1]              80
    -          Sigmoid-23             [-1, 16, 1, 1]               0
    -SqueezeExcitation-24         [-1, 16, 112, 112]               0
    -           Conv2d-25         [-1, 16, 112, 112]             256
    -      BatchNorm2d-26         [-1, 16, 112, 112]              32
    -  StochasticDepth-27         [-1, 16, 112, 112]               0
    -           MBConv-28         [-1, 16, 112, 112]               0
    -           Conv2d-29         [-1, 96, 112, 112]           1,536
    -      BatchNorm2d-30         [-1, 96, 112, 112]             192
    -             SiLU-31         [-1, 96, 112, 112]               0
    -           Conv2d-32           [-1, 96, 56, 56]             864
    -      BatchNorm2d-33           [-1, 96, 56, 56]             192
    -             SiLU-34           [-1, 96, 56, 56]               0
    -AdaptiveAvgPool2d-35             [-1, 96, 1, 1]               0
    -           Conv2d-36              [-1, 4, 1, 1]             388
    -             SiLU-37              [-1, 4, 1, 1]               0
    -           Conv2d-38             [-1, 96, 1, 1]             480
    -          Sigmoid-39             [-1, 96, 1, 1]               0
    -SqueezeExcitation-40           [-1, 96, 56, 56]               0
    -           Conv2d-41           [-1, 24, 56, 56]           2,304
    -      BatchNorm2d-42           [-1, 24, 56, 56]              48
    -           MBConv-43           [-1, 24, 56, 56]               0
    -           Conv2d-44          [-1, 144, 56, 56]           3,456
    -      BatchNorm2d-45          [-1, 144, 56, 56]             288
    -             SiLU-46          [-1, 144, 56, 56]               0
    -           Conv2d-47          [-1, 144, 56, 56]           1,296
    -      BatchNorm2d-48          [-1, 144, 56, 56]             288
    -             SiLU-49          [-1, 144, 56, 56]               0
    -AdaptiveAvgPool2d-50            [-1, 144, 1, 1]               0
    -           Conv2d-51              [-1, 6, 1, 1]             870
    -             SiLU-52              [-1, 6, 1, 1]               0
    -           Conv2d-53            [-1, 144, 1, 1]           1,008
    -          Sigmoid-54            [-1, 144, 1, 1]               0
    -SqueezeExcitation-55          [-1, 144, 56, 56]               0
    -           Conv2d-56           [-1, 24, 56, 56]           3,456
    -      BatchNorm2d-57           [-1, 24, 56, 56]              48
    -  StochasticDepth-58           [-1, 24, 56, 56]               0
    -           MBConv-59           [-1, 24, 56, 56]               0
    -           Conv2d-60          [-1, 144, 56, 56]           3,456
    -      BatchNorm2d-61          [-1, 144, 56, 56]             288
    -             SiLU-62          [-1, 144, 56, 56]               0
    -           Conv2d-63          [-1, 144, 56, 56]           1,296
    -      BatchNorm2d-64          [-1, 144, 56, 56]             288
    -             SiLU-65          [-1, 144, 56, 56]               0
    -AdaptiveAvgPool2d-66            [-1, 144, 1, 1]               0
    -           Conv2d-67              [-1, 6, 1, 1]             870
    -             SiLU-68              [-1, 6, 1, 1]               0
    -           Conv2d-69            [-1, 144, 1, 1]           1,008
    -          Sigmoid-70            [-1, 144, 1, 1]               0
    -SqueezeExcitation-71          [-1, 144, 56, 56]               0
    -           Conv2d-72           [-1, 24, 56, 56]           3,456
    -      BatchNorm2d-73           [-1, 24, 56, 56]              48
    -  StochasticDepth-74           [-1, 24, 56, 56]               0
    -           MBConv-75           [-1, 24, 56, 56]               0
    -           Conv2d-76          [-1, 144, 56, 56]           3,456
    -      BatchNorm2d-77          [-1, 144, 56, 56]             288
    -             SiLU-78          [-1, 144, 56, 56]               0
    -           Conv2d-79          [-1, 144, 28, 28]           3,600
    -      BatchNorm2d-80          [-1, 144, 28, 28]             288
    -             SiLU-81          [-1, 144, 28, 28]               0
    -AdaptiveAvgPool2d-82            [-1, 144, 1, 1]               0
    -           Conv2d-83              [-1, 6, 1, 1]             870
    -             SiLU-84              [-1, 6, 1, 1]               0
    -           Conv2d-85            [-1, 144, 1, 1]           1,008
    -          Sigmoid-86            [-1, 144, 1, 1]               0
    -SqueezeExcitation-87          [-1, 144, 28, 28]               0
    -           Conv2d-88           [-1, 48, 28, 28]           6,912
    -      BatchNorm2d-89           [-1, 48, 28, 28]              96
    -           MBConv-90           [-1, 48, 28, 28]               0
    -           Conv2d-91          [-1, 288, 28, 28]          13,824
    -      BatchNorm2d-92          [-1, 288, 28, 28]             576
    -             SiLU-93          [-1, 288, 28, 28]               0
    -           Conv2d-94          [-1, 288, 28, 28]           7,200
    -      BatchNorm2d-95          [-1, 288, 28, 28]             576
    -             SiLU-96          [-1, 288, 28, 28]               0
    -AdaptiveAvgPool2d-97            [-1, 288, 1, 1]               0
    -           Conv2d-98             [-1, 12, 1, 1]           3,468
    -             SiLU-99             [-1, 12, 1, 1]               0
    -          Conv2d-100            [-1, 288, 1, 1]           3,744
    -         Sigmoid-101            [-1, 288, 1, 1]               0
    -SqueezeExcitation-102          [-1, 288, 28, 28]               0
    -          Conv2d-103           [-1, 48, 28, 28]          13,824
    -     BatchNorm2d-104           [-1, 48, 28, 28]              96
    - StochasticDepth-105           [-1, 48, 28, 28]               0
    -          MBConv-106           [-1, 48, 28, 28]               0
    -          Conv2d-107          [-1, 288, 28, 28]          13,824
    -     BatchNorm2d-108          [-1, 288, 28, 28]             576
    -            SiLU-109          [-1, 288, 28, 28]               0
    -          Conv2d-110          [-1, 288, 28, 28]           7,200
    -     BatchNorm2d-111          [-1, 288, 28, 28]             576
    -            SiLU-112          [-1, 288, 28, 28]               0
    -AdaptiveAvgPool2d-113            [-1, 288, 1, 1]               0
    -          Conv2d-114             [-1, 12, 1, 1]           3,468
    -            SiLU-115             [-1, 12, 1, 1]               0
    -          Conv2d-116            [-1, 288, 1, 1]           3,744
    -         Sigmoid-117            [-1, 288, 1, 1]               0
    -SqueezeExcitation-118          [-1, 288, 28, 28]               0
    -          Conv2d-119           [-1, 48, 28, 28]          13,824
    -     BatchNorm2d-120           [-1, 48, 28, 28]              96
    - StochasticDepth-121           [-1, 48, 28, 28]               0
    -          MBConv-122           [-1, 48, 28, 28]               0
    -          Conv2d-123          [-1, 288, 28, 28]          13,824
    -     BatchNorm2d-124          [-1, 288, 28, 28]             576
    -            SiLU-125          [-1, 288, 28, 28]               0
    -          Conv2d-126          [-1, 288, 14, 14]           2,592
    -     BatchNorm2d-127          [-1, 288, 14, 14]             576
    -            SiLU-128          [-1, 288, 14, 14]               0
    -AdaptiveAvgPool2d-129            [-1, 288, 1, 1]               0
    -          Conv2d-130             [-1, 12, 1, 1]           3,468
    -            SiLU-131             [-1, 12, 1, 1]               0
    -          Conv2d-132            [-1, 288, 1, 1]           3,744
    -         Sigmoid-133            [-1, 288, 1, 1]               0
    -SqueezeExcitation-134          [-1, 288, 14, 14]               0
    -          Conv2d-135           [-1, 88, 14, 14]          25,344
    -     BatchNorm2d-136           [-1, 88, 14, 14]             176
    -          MBConv-137           [-1, 88, 14, 14]               0
    -          Conv2d-138          [-1, 528, 14, 14]          46,464
    -     BatchNorm2d-139          [-1, 528, 14, 14]           1,056
    -            SiLU-140          [-1, 528, 14, 14]               0
    -          Conv2d-141          [-1, 528, 14, 14]           4,752
    -     BatchNorm2d-142          [-1, 528, 14, 14]           1,056
    -            SiLU-143          [-1, 528, 14, 14]               0
    -AdaptiveAvgPool2d-144            [-1, 528, 1, 1]               0
    -          Conv2d-145             [-1, 22, 1, 1]          11,638
    -            SiLU-146             [-1, 22, 1, 1]               0
    -          Conv2d-147            [-1, 528, 1, 1]          12,144
    -         Sigmoid-148            [-1, 528, 1, 1]               0
    -SqueezeExcitation-149          [-1, 528, 14, 14]               0
    -          Conv2d-150           [-1, 88, 14, 14]          46,464
    -     BatchNorm2d-151           [-1, 88, 14, 14]             176
    - StochasticDepth-152           [-1, 88, 14, 14]               0
    -          MBConv-153           [-1, 88, 14, 14]               0
    -          Conv2d-154          [-1, 528, 14, 14]          46,464
    -     BatchNorm2d-155          [-1, 528, 14, 14]           1,056
    -            SiLU-156          [-1, 528, 14, 14]               0
    -          Conv2d-157          [-1, 528, 14, 14]           4,752
    -     BatchNorm2d-158          [-1, 528, 14, 14]           1,056
    -            SiLU-159          [-1, 528, 14, 14]               0
    -AdaptiveAvgPool2d-160            [-1, 528, 1, 1]               0
    -          Conv2d-161             [-1, 22, 1, 1]          11,638
    -            SiLU-162             [-1, 22, 1, 1]               0
    -          Conv2d-163            [-1, 528, 1, 1]          12,144
    -         Sigmoid-164            [-1, 528, 1, 1]               0
    -SqueezeExcitation-165          [-1, 528, 14, 14]               0
    -          Conv2d-166           [-1, 88, 14, 14]          46,464
    -     BatchNorm2d-167           [-1, 88, 14, 14]             176
    - StochasticDepth-168           [-1, 88, 14, 14]               0
    -          MBConv-169           [-1, 88, 14, 14]               0
    -          Conv2d-170          [-1, 528, 14, 14]          46,464
    -     BatchNorm2d-171          [-1, 528, 14, 14]           1,056
    -            SiLU-172          [-1, 528, 14, 14]               0
    -          Conv2d-173          [-1, 528, 14, 14]           4,752
    -     BatchNorm2d-174          [-1, 528, 14, 14]           1,056
    -            SiLU-175          [-1, 528, 14, 14]               0
    -AdaptiveAvgPool2d-176            [-1, 528, 1, 1]               0
    -          Conv2d-177             [-1, 22, 1, 1]          11,638
    -            SiLU-178             [-1, 22, 1, 1]               0
    -          Conv2d-179            [-1, 528, 1, 1]          12,144
    -         Sigmoid-180            [-1, 528, 1, 1]               0
    -SqueezeExcitation-181          [-1, 528, 14, 14]               0
    -          Conv2d-182           [-1, 88, 14, 14]          46,464
    -     BatchNorm2d-183           [-1, 88, 14, 14]             176
    - StochasticDepth-184           [-1, 88, 14, 14]               0
    -          MBConv-185           [-1, 88, 14, 14]               0
    -          Conv2d-186          [-1, 528, 14, 14]          46,464
    -     BatchNorm2d-187          [-1, 528, 14, 14]           1,056
    -            SiLU-188          [-1, 528, 14, 14]               0
    -          Conv2d-189          [-1, 528, 14, 14]          13,200
    -     BatchNorm2d-190          [-1, 528, 14, 14]           1,056
    -            SiLU-191          [-1, 528, 14, 14]               0
    -AdaptiveAvgPool2d-192            [-1, 528, 1, 1]               0
    -          Conv2d-193             [-1, 22, 1, 1]          11,638
    -            SiLU-194             [-1, 22, 1, 1]               0
    -          Conv2d-195            [-1, 528, 1, 1]          12,144
    -         Sigmoid-196            [-1, 528, 1, 1]               0
    -SqueezeExcitation-197          [-1, 528, 14, 14]               0
    -          Conv2d-198          [-1, 120, 14, 14]          63,360
    -     BatchNorm2d-199          [-1, 120, 14, 14]             240
    -          MBConv-200          [-1, 120, 14, 14]               0
    -          Conv2d-201          [-1, 720, 14, 14]          86,400
    -     BatchNorm2d-202          [-1, 720, 14, 14]           1,440
    -            SiLU-203          [-1, 720, 14, 14]               0
    -          Conv2d-204          [-1, 720, 14, 14]          18,000
    -     BatchNorm2d-205          [-1, 720, 14, 14]           1,440
    -            SiLU-206          [-1, 720, 14, 14]               0
    -AdaptiveAvgPool2d-207            [-1, 720, 1, 1]               0
    -          Conv2d-208             [-1, 30, 1, 1]          21,630
    -            SiLU-209             [-1, 30, 1, 1]               0
    -          Conv2d-210            [-1, 720, 1, 1]          22,320
    -         Sigmoid-211            [-1, 720, 1, 1]               0
    -SqueezeExcitation-212          [-1, 720, 14, 14]               0
    -          Conv2d-213          [-1, 120, 14, 14]          86,400
    -     BatchNorm2d-214          [-1, 120, 14, 14]             240
    - StochasticDepth-215          [-1, 120, 14, 14]               0
    -          MBConv-216          [-1, 120, 14, 14]               0
    -          Conv2d-217          [-1, 720, 14, 14]          86,400
    -     BatchNorm2d-218          [-1, 720, 14, 14]           1,440
    -            SiLU-219          [-1, 720, 14, 14]               0
    -          Conv2d-220          [-1, 720, 14, 14]          18,000
    -     BatchNorm2d-221          [-1, 720, 14, 14]           1,440
    -            SiLU-222          [-1, 720, 14, 14]               0
    -AdaptiveAvgPool2d-223            [-1, 720, 1, 1]               0
    -          Conv2d-224             [-1, 30, 1, 1]          21,630
    -            SiLU-225             [-1, 30, 1, 1]               0
    -          Conv2d-226            [-1, 720, 1, 1]          22,320
    -         Sigmoid-227            [-1, 720, 1, 1]               0
    -SqueezeExcitation-228          [-1, 720, 14, 14]               0
    -          Conv2d-229          [-1, 120, 14, 14]          86,400
    -     BatchNorm2d-230          [-1, 120, 14, 14]             240
    - StochasticDepth-231          [-1, 120, 14, 14]               0
    -          MBConv-232          [-1, 120, 14, 14]               0
    -          Conv2d-233          [-1, 720, 14, 14]          86,400
    -     BatchNorm2d-234          [-1, 720, 14, 14]           1,440
    -            SiLU-235          [-1, 720, 14, 14]               0
    -          Conv2d-236          [-1, 720, 14, 14]          18,000
    -     BatchNorm2d-237          [-1, 720, 14, 14]           1,440
    -            SiLU-238          [-1, 720, 14, 14]               0
    -AdaptiveAvgPool2d-239            [-1, 720, 1, 1]               0
    -          Conv2d-240             [-1, 30, 1, 1]          21,630
    -            SiLU-241             [-1, 30, 1, 1]               0
    -          Conv2d-242            [-1, 720, 1, 1]          22,320
    -         Sigmoid-243            [-1, 720, 1, 1]               0
    -SqueezeExcitation-244          [-1, 720, 14, 14]               0
    -          Conv2d-245          [-1, 120, 14, 14]          86,400
    -     BatchNorm2d-246          [-1, 120, 14, 14]             240
    - StochasticDepth-247          [-1, 120, 14, 14]               0
    -          MBConv-248          [-1, 120, 14, 14]               0
    -          Conv2d-249          [-1, 720, 14, 14]          86,400
    -     BatchNorm2d-250          [-1, 720, 14, 14]           1,440
    -            SiLU-251          [-1, 720, 14, 14]               0
    -          Conv2d-252            [-1, 720, 7, 7]          18,000
    -     BatchNorm2d-253            [-1, 720, 7, 7]           1,440
    -            SiLU-254            [-1, 720, 7, 7]               0
    -AdaptiveAvgPool2d-255            [-1, 720, 1, 1]               0
    -          Conv2d-256             [-1, 30, 1, 1]          21,630
    -            SiLU-257             [-1, 30, 1, 1]               0
    -          Conv2d-258            [-1, 720, 1, 1]          22,320
    -         Sigmoid-259            [-1, 720, 1, 1]               0
    -SqueezeExcitation-260            [-1, 720, 7, 7]               0
    -          Conv2d-261            [-1, 208, 7, 7]         149,760
    -     BatchNorm2d-262            [-1, 208, 7, 7]             416
    -          MBConv-263            [-1, 208, 7, 7]               0
    -          Conv2d-264           [-1, 1248, 7, 7]         259,584
    -     BatchNorm2d-265           [-1, 1248, 7, 7]           2,496
    -            SiLU-266           [-1, 1248, 7, 7]               0
    -          Conv2d-267           [-1, 1248, 7, 7]          31,200
    -     BatchNorm2d-268           [-1, 1248, 7, 7]           2,496
    -            SiLU-269           [-1, 1248, 7, 7]               0
    -AdaptiveAvgPool2d-270           [-1, 1248, 1, 1]               0
    -          Conv2d-271             [-1, 52, 1, 1]          64,948
    -            SiLU-272             [-1, 52, 1, 1]               0
    -          Conv2d-273           [-1, 1248, 1, 1]          66,144
    -         Sigmoid-274           [-1, 1248, 1, 1]               0
    -SqueezeExcitation-275           [-1, 1248, 7, 7]               0
    -          Conv2d-276            [-1, 208, 7, 7]         259,584
    -     BatchNorm2d-277            [-1, 208, 7, 7]             416
    - StochasticDepth-278            [-1, 208, 7, 7]               0
    -          MBConv-279            [-1, 208, 7, 7]               0
    -          Conv2d-280           [-1, 1248, 7, 7]         259,584
    -     BatchNorm2d-281           [-1, 1248, 7, 7]           2,496
    -            SiLU-282           [-1, 1248, 7, 7]               0
    -          Conv2d-283           [-1, 1248, 7, 7]          31,200
    -     BatchNorm2d-284           [-1, 1248, 7, 7]           2,496
    -            SiLU-285           [-1, 1248, 7, 7]               0
    -AdaptiveAvgPool2d-286           [-1, 1248, 1, 1]               0
    -          Conv2d-287             [-1, 52, 1, 1]          64,948
    -            SiLU-288             [-1, 52, 1, 1]               0
    -          Conv2d-289           [-1, 1248, 1, 1]          66,144
    -         Sigmoid-290           [-1, 1248, 1, 1]               0
    -SqueezeExcitation-291           [-1, 1248, 7, 7]               0
    -          Conv2d-292            [-1, 208, 7, 7]         259,584
    -     BatchNorm2d-293            [-1, 208, 7, 7]             416
    - StochasticDepth-294            [-1, 208, 7, 7]               0
    -          MBConv-295            [-1, 208, 7, 7]               0
    -          Conv2d-296           [-1, 1248, 7, 7]         259,584
    -     BatchNorm2d-297           [-1, 1248, 7, 7]           2,496
    -            SiLU-298           [-1, 1248, 7, 7]               0
    -          Conv2d-299           [-1, 1248, 7, 7]          31,200
    -     BatchNorm2d-300           [-1, 1248, 7, 7]           2,496
    -            SiLU-301           [-1, 1248, 7, 7]               0
    -AdaptiveAvgPool2d-302           [-1, 1248, 1, 1]               0
    -          Conv2d-303             [-1, 52, 1, 1]          64,948
    -            SiLU-304             [-1, 52, 1, 1]               0
    -          Conv2d-305           [-1, 1248, 1, 1]          66,144
    -         Sigmoid-306           [-1, 1248, 1, 1]               0
    -SqueezeExcitation-307           [-1, 1248, 7, 7]               0
    -          Conv2d-308            [-1, 208, 7, 7]         259,584
    -     BatchNorm2d-309            [-1, 208, 7, 7]             416
    - StochasticDepth-310            [-1, 208, 7, 7]               0
    -          MBConv-311            [-1, 208, 7, 7]               0
    -          Conv2d-312           [-1, 1248, 7, 7]         259,584
    -     BatchNorm2d-313           [-1, 1248, 7, 7]           2,496
    -            SiLU-314           [-1, 1248, 7, 7]               0
    -          Conv2d-315           [-1, 1248, 7, 7]          31,200
    -     BatchNorm2d-316           [-1, 1248, 7, 7]           2,496
    -            SiLU-317           [-1, 1248, 7, 7]               0
    -AdaptiveAvgPool2d-318           [-1, 1248, 1, 1]               0
    -          Conv2d-319             [-1, 52, 1, 1]          64,948
    -            SiLU-320             [-1, 52, 1, 1]               0
    -          Conv2d-321           [-1, 1248, 1, 1]          66,144
    -         Sigmoid-322           [-1, 1248, 1, 1]               0
    -SqueezeExcitation-323           [-1, 1248, 7, 7]               0
    -          Conv2d-324            [-1, 208, 7, 7]         259,584
    -     BatchNorm2d-325            [-1, 208, 7, 7]             416
    - StochasticDepth-326            [-1, 208, 7, 7]               0
    -          MBConv-327            [-1, 208, 7, 7]               0
    -          Conv2d-328           [-1, 1248, 7, 7]         259,584
    -     BatchNorm2d-329           [-1, 1248, 7, 7]           2,496
    -            SiLU-330           [-1, 1248, 7, 7]               0
    -          Conv2d-331           [-1, 1248, 7, 7]          11,232
    -     BatchNorm2d-332           [-1, 1248, 7, 7]           2,496
    -            SiLU-333           [-1, 1248, 7, 7]               0
    -AdaptiveAvgPool2d-334           [-1, 1248, 1, 1]               0
    -          Conv2d-335             [-1, 52, 1, 1]          64,948
    -            SiLU-336             [-1, 52, 1, 1]               0
    -          Conv2d-337           [-1, 1248, 1, 1]          66,144
    -         Sigmoid-338           [-1, 1248, 1, 1]               0
    -SqueezeExcitation-339           [-1, 1248, 7, 7]               0
    -          Conv2d-340            [-1, 352, 7, 7]         439,296
    -     BatchNorm2d-341            [-1, 352, 7, 7]             704
    -          MBConv-342            [-1, 352, 7, 7]               0
    -          Conv2d-343           [-1, 2112, 7, 7]         743,424
    -     BatchNorm2d-344           [-1, 2112, 7, 7]           4,224
    -            SiLU-345           [-1, 2112, 7, 7]               0
    -          Conv2d-346           [-1, 2112, 7, 7]          19,008
    -     BatchNorm2d-347           [-1, 2112, 7, 7]           4,224
    -            SiLU-348           [-1, 2112, 7, 7]               0
    -AdaptiveAvgPool2d-349           [-1, 2112, 1, 1]               0
    -          Conv2d-350             [-1, 88, 1, 1]         185,944
    -            SiLU-351             [-1, 88, 1, 1]               0
    -          Conv2d-352           [-1, 2112, 1, 1]         187,968
    -         Sigmoid-353           [-1, 2112, 1, 1]               0
    -SqueezeExcitation-354           [-1, 2112, 7, 7]               0
    -          Conv2d-355            [-1, 352, 7, 7]         743,424
    -     BatchNorm2d-356            [-1, 352, 7, 7]             704
    - StochasticDepth-357            [-1, 352, 7, 7]               0
    -          MBConv-358            [-1, 352, 7, 7]               0
    -          Conv2d-359           [-1, 1408, 7, 7]         495,616
    -     BatchNorm2d-360           [-1, 1408, 7, 7]           2,816
    -            SiLU-361           [-1, 1408, 7, 7]               0
    -AdaptiveAvgPool2d-362           [-1, 1408, 1, 1]               0
    -         Dropout-363                 [-1, 1408]               0
    -          Linear-364                   [-1, 10]          14,090
    -    EfficientNet-365                   [-1, 10]               0
    -================================================================
    -Total params: 7,715,084
    -Trainable params: 14,090
    -Non-trainable params: 7,700,994
    -----------------------------------------------------------------
    -Input size (MB): 0.57
    -Forward/backward pass size (MB): 257.42
    -Params size (MB): 29.43
    -Estimated Total Size (MB): 287.43
    -----------------------------------------------------------------
    -
    -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git "a/spaces/CikeyQI/Yunzai/Yunzai/plugins/example/\344\270\273\345\212\250\345\244\215\350\257\273.js" "b/spaces/CikeyQI/Yunzai/Yunzai/plugins/example/\344\270\273\345\212\250\345\244\215\350\257\273.js" deleted file mode 100644 index f57cfa8815c6b98daa06b60804abc69c04261086..0000000000000000000000000000000000000000 --- "a/spaces/CikeyQI/Yunzai/Yunzai/plugins/example/\344\270\273\345\212\250\345\244\215\350\257\273.js" +++ /dev/null @@ -1,37 +0,0 @@ -import plugin from '../../lib/plugins/plugin.js' - -export class example2 extends plugin { - constructor () { - super({ - name: '复读', - dsc: '复读用户发送的内容,然后撤回', - /** https://oicqjs.github.io/oicq/#events */ - event: 'message', - priority: 5000, - rule: [ - { - /** 命令正则匹配 */ - reg: '^#复读$', - /** 执行方法 */ - fnc: 'repeat' - } - ] - }) - } - - /** 复读 */ - async repeat () { - /** 设置上下文,后续接收到内容会执行doRep方法 */ - this.setContext('doRep') - /** 回复 */ - await this.reply('请发送要复读的内容', false, { at: true }) - } - - /** 接受内容 */ - doRep () { - /** 复读内容 */ - this.reply(this.e.message, false, { recallMsg: 5 }) - /** 结束上下文 */ - this.finish('doRep') - } -} diff --git a/spaces/ClassCat/wide-resnet-cifar10-classification/README.md b/spaces/ClassCat/wide-resnet-cifar10-classification/README.md deleted file mode 100644 index 0faeff6c03ded49c28bba4c22d4cf52e3bca1768..0000000000000000000000000000000000000000 --- a/spaces/ClassCat/wide-resnet-cifar10-classification/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Wide Resnet Cifar10 Classification -emoji: 📈 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/sigmoid_focal_loss.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/sigmoid_focal_loss.py deleted file mode 100644 index c42b4d69900e6222d972ee1296648eae97fec511..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/sigmoid_focal_loss.py +++ /dev/null @@ -1,76 +0,0 @@ -import torch -from torch import nn -from torch.autograd import Function -from torch.autograd.function import once_differentiable - -from maskrcnn_benchmark import _C - -# TODO: Use JIT to replace CUDA implementation in the future. -class _SigmoidFocalLoss(Function): - @staticmethod - def forward(ctx, logits, targets, gamma, alpha): - ctx.save_for_backward(logits, targets) - num_classes = logits.shape[1] - ctx.num_classes = num_classes - ctx.gamma = gamma - ctx.alpha = alpha - - losses = _C.sigmoid_focalloss_forward( - logits, targets, num_classes, gamma, alpha - ) - return losses - - @staticmethod - @once_differentiable - def backward(ctx, d_loss): - logits, targets = ctx.saved_tensors - num_classes = ctx.num_classes - gamma = ctx.gamma - alpha = ctx.alpha - d_loss = d_loss.contiguous() - d_logits = _C.sigmoid_focalloss_backward( - logits, targets, d_loss, num_classes, gamma, alpha - ) - return d_logits, None, None, None, None - - -sigmoid_focal_loss_cuda = _SigmoidFocalLoss.apply - - -def sigmoid_focal_loss_cpu(logits, targets, gamma, alpha): - num_classes = logits.shape[1] - gamma = gamma[0] - alpha = alpha[0] - dtype = targets.dtype - device = targets.device - class_range = torch.arange(1, num_classes+1, dtype=dtype, device=device).unsqueeze(0) - - t = targets.unsqueeze(1) - p = torch.sigmoid(logits) - term1 = (1 - p) ** gamma * torch.log(p) - term2 = p ** gamma * torch.log(1 - p) - return -(t == class_range).float() * term1 * alpha - ((t != class_range) * (t >= 0)).float() * term2 * (1 - alpha) - - -class SigmoidFocalLoss(nn.Module): - def __init__(self, gamma, alpha): - super(SigmoidFocalLoss, self).__init__() - self.gamma = gamma - self.alpha = alpha - - def forward(self, logits, targets): - device = logits.device - if logits.is_cuda: - loss_func = sigmoid_focal_loss_cuda - else: - loss_func = sigmoid_focal_loss_cpu - - loss = loss_func(logits, targets, self.gamma, self.alpha) - return loss.sum() - - def __repr__(self): - tmpstr = self.__class__.__name__ + "(" - tmpstr += "gamma=" + str(self.gamma) - tmpstr += ", alpha=" + str(self.alpha) - tmpstr += ")" - return tmpstr diff --git a/spaces/DataDreamweavers/LegaWeaver/README.md b/spaces/DataDreamweavers/LegaWeaver/README.md deleted file mode 100644 index d09bcbb90104a832752f111efb505d0f6099047c..0000000000000000000000000000000000000000 --- a/spaces/DataDreamweavers/LegaWeaver/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: LegaWeaver -emoji: 🌍 -colorFrom: blue -colorTo: purple -sdk: streamlit -sdk_version: 1.25.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/metrics/linear_separability.py b/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/metrics/linear_separability.py deleted file mode 100644 index e50be5a0fea00eba7af2d05cccf74bacedbea1c3..0000000000000000000000000000000000000000 --- a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/metrics/linear_separability.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -"""Linear Separability (LS).""" - -from collections import defaultdict -import numpy as np -import sklearn.svm -import tensorflow as tf -import dnnlib.tflib as tflib - -from metrics import metric_base -from training import misc - -#---------------------------------------------------------------------------- - -classifier_urls = [ - 'https://drive.google.com/uc?id=1Q5-AI6TwWhCVM7Muu4tBM7rp5nG_gmCX', # celebahq-classifier-00-male.pkl - 'https://drive.google.com/uc?id=1Q5c6HE__ReW2W8qYAXpao68V1ryuisGo', # celebahq-classifier-01-smiling.pkl - 'https://drive.google.com/uc?id=1Q7738mgWTljPOJQrZtSMLxzShEhrvVsU', # celebahq-classifier-02-attractive.pkl - 'https://drive.google.com/uc?id=1QBv2Mxe7ZLvOv1YBTLq-T4DS3HjmXV0o', # celebahq-classifier-03-wavy-hair.pkl - 'https://drive.google.com/uc?id=1QIvKTrkYpUrdA45nf7pspwAqXDwWOLhV', # celebahq-classifier-04-young.pkl - 'https://drive.google.com/uc?id=1QJPH5rW7MbIjFUdZT7vRYfyUjNYDl4_L', # celebahq-classifier-05-5-o-clock-shadow.pkl - 'https://drive.google.com/uc?id=1QPZXSYf6cptQnApWS_T83sqFMun3rULY', # celebahq-classifier-06-arched-eyebrows.pkl - 'https://drive.google.com/uc?id=1QPgoAZRqINXk_PFoQ6NwMmiJfxc5d2Pg', # celebahq-classifier-07-bags-under-eyes.pkl - 'https://drive.google.com/uc?id=1QQPQgxgI6wrMWNyxFyTLSgMVZmRr1oO7', # celebahq-classifier-08-bald.pkl - 'https://drive.google.com/uc?id=1QcSphAmV62UrCIqhMGgcIlZfoe8hfWaF', # celebahq-classifier-09-bangs.pkl - 'https://drive.google.com/uc?id=1QdWTVwljClTFrrrcZnPuPOR4mEuz7jGh', # celebahq-classifier-10-big-lips.pkl - 'https://drive.google.com/uc?id=1QgvEWEtr2mS4yj1b_Y3WKe6cLWL3LYmK', # celebahq-classifier-11-big-nose.pkl - 'https://drive.google.com/uc?id=1QidfMk9FOKgmUUIziTCeo8t-kTGwcT18', # celebahq-classifier-12-black-hair.pkl - 'https://drive.google.com/uc?id=1QthrJt-wY31GPtV8SbnZQZ0_UEdhasHO', # celebahq-classifier-13-blond-hair.pkl - 'https://drive.google.com/uc?id=1QvCAkXxdYT4sIwCzYDnCL9Nb5TDYUxGW', # celebahq-classifier-14-blurry.pkl - 'https://drive.google.com/uc?id=1QvLWuwSuWI9Ln8cpxSGHIciUsnmaw8L0', # celebahq-classifier-15-brown-hair.pkl - 'https://drive.google.com/uc?id=1QxW6THPI2fqDoiFEMaV6pWWHhKI_OoA7', # celebahq-classifier-16-bushy-eyebrows.pkl - 'https://drive.google.com/uc?id=1R71xKw8oTW2IHyqmRDChhTBkW9wq4N9v', # celebahq-classifier-17-chubby.pkl - 'https://drive.google.com/uc?id=1RDn_fiLfEGbTc7JjazRXuAxJpr-4Pl67', # celebahq-classifier-18-double-chin.pkl - 'https://drive.google.com/uc?id=1RGBuwXbaz5052bM4VFvaSJaqNvVM4_cI', # celebahq-classifier-19-eyeglasses.pkl - 'https://drive.google.com/uc?id=1RIxOiWxDpUwhB-9HzDkbkLegkd7euRU9', # celebahq-classifier-20-goatee.pkl - 'https://drive.google.com/uc?id=1RPaNiEnJODdr-fwXhUFdoSQLFFZC7rC-', # celebahq-classifier-21-gray-hair.pkl - 'https://drive.google.com/uc?id=1RQH8lPSwOI2K_9XQCZ2Ktz7xm46o80ep', # celebahq-classifier-22-heavy-makeup.pkl - 'https://drive.google.com/uc?id=1RXZM61xCzlwUZKq-X7QhxOg0D2telPow', # celebahq-classifier-23-high-cheekbones.pkl - 'https://drive.google.com/uc?id=1RgASVHW8EWMyOCiRb5fsUijFu-HfxONM', # celebahq-classifier-24-mouth-slightly-open.pkl - 'https://drive.google.com/uc?id=1RkC8JLqLosWMaRne3DARRgolhbtg_wnr', # celebahq-classifier-25-mustache.pkl - 'https://drive.google.com/uc?id=1RqtbtFT2EuwpGTqsTYJDyXdnDsFCPtLO', # celebahq-classifier-26-narrow-eyes.pkl - 'https://drive.google.com/uc?id=1Rs7hU-re8bBMeRHR-fKgMbjPh-RIbrsh', # celebahq-classifier-27-no-beard.pkl - 'https://drive.google.com/uc?id=1RynDJQWdGOAGffmkPVCrLJqy_fciPF9E', # celebahq-classifier-28-oval-face.pkl - 'https://drive.google.com/uc?id=1S0TZ_Hdv5cb06NDaCD8NqVfKy7MuXZsN', # celebahq-classifier-29-pale-skin.pkl - 'https://drive.google.com/uc?id=1S3JPhZH2B4gVZZYCWkxoRP11q09PjCkA', # celebahq-classifier-30-pointy-nose.pkl - 'https://drive.google.com/uc?id=1S3pQuUz-Jiywq_euhsfezWfGkfzLZ87W', # celebahq-classifier-31-receding-hairline.pkl - 'https://drive.google.com/uc?id=1S6nyIl_SEI3M4l748xEdTV2vymB_-lrY', # celebahq-classifier-32-rosy-cheeks.pkl - 'https://drive.google.com/uc?id=1S9P5WCi3GYIBPVYiPTWygrYIUSIKGxbU', # celebahq-classifier-33-sideburns.pkl - 'https://drive.google.com/uc?id=1SANviG-pp08n7AFpE9wrARzozPIlbfCH', # celebahq-classifier-34-straight-hair.pkl - 'https://drive.google.com/uc?id=1SArgyMl6_z7P7coAuArqUC2zbmckecEY', # celebahq-classifier-35-wearing-earrings.pkl - 'https://drive.google.com/uc?id=1SC5JjS5J-J4zXFO9Vk2ZU2DT82TZUza_', # celebahq-classifier-36-wearing-hat.pkl - 'https://drive.google.com/uc?id=1SDAQWz03HGiu0MSOKyn7gvrp3wdIGoj-', # celebahq-classifier-37-wearing-lipstick.pkl - 'https://drive.google.com/uc?id=1SEtrVK-TQUC0XeGkBE9y7L8VXfbchyKX', # celebahq-classifier-38-wearing-necklace.pkl - 'https://drive.google.com/uc?id=1SF_mJIdyGINXoV-I6IAxHB_k5dxiF6M-', # celebahq-classifier-39-wearing-necktie.pkl -] - -#---------------------------------------------------------------------------- - -def prob_normalize(p): - p = np.asarray(p).astype(np.float32) - assert len(p.shape) == 2 - return p / np.sum(p) - -def mutual_information(p): - p = prob_normalize(p) - px = np.sum(p, axis=1) - py = np.sum(p, axis=0) - result = 0.0 - for x in range(p.shape[0]): - p_x = px[x] - for y in range(p.shape[1]): - p_xy = p[x][y] - p_y = py[y] - if p_xy > 0.0: - result += p_xy * np.log2(p_xy / (p_x * p_y)) # get bits as output - return result - -def entropy(p): - p = prob_normalize(p) - result = 0.0 - for x in range(p.shape[0]): - for y in range(p.shape[1]): - p_xy = p[x][y] - if p_xy > 0.0: - result -= p_xy * np.log2(p_xy) - return result - -def conditional_entropy(p): - # H(Y|X) where X corresponds to axis 0, Y to axis 1 - # i.e., How many bits of additional information are needed to where we are on axis 1 if we know where we are on axis 0? - p = prob_normalize(p) - y = np.sum(p, axis=0, keepdims=True) # marginalize to calculate H(Y) - return max(0.0, entropy(y) - mutual_information(p)) # can slip just below 0 due to FP inaccuracies, clean those up. - -#---------------------------------------------------------------------------- - -class LS(metric_base.MetricBase): - def __init__(self, num_samples, num_keep, attrib_indices, minibatch_per_gpu, **kwargs): - assert num_keep <= num_samples - super().__init__(**kwargs) - self.num_samples = num_samples - self.num_keep = num_keep - self.attrib_indices = attrib_indices - self.minibatch_per_gpu = minibatch_per_gpu - - def _evaluate(self, Gs, num_gpus): - minibatch_size = num_gpus * self.minibatch_per_gpu - - # Construct TensorFlow graph for each GPU. - result_expr = [] - for gpu_idx in range(num_gpus): - with tf.device('/gpu:%d' % gpu_idx): - Gs_clone = Gs.clone() - - # Generate images. - latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:]) - dlatents = Gs_clone.components.mapping.get_output_for(latents, None, is_validation=True) - images = Gs_clone.components.synthesis.get_output_for(dlatents, is_validation=True, randomize_noise=True) - - # Downsample to 256x256. The attribute classifiers were built for 256x256. - if images.shape[2] > 256: - factor = images.shape[2] // 256 - images = tf.reshape(images, [-1, images.shape[1], images.shape[2] // factor, factor, images.shape[3] // factor, factor]) - images = tf.reduce_mean(images, axis=[3, 5]) - - # Run classifier for each attribute. - result_dict = dict(latents=latents, dlatents=dlatents[:,-1]) - for attrib_idx in self.attrib_indices: - classifier = misc.load_pkl(classifier_urls[attrib_idx]) - logits = classifier.get_output_for(images, None) - predictions = tf.nn.softmax(tf.concat([logits, -logits], axis=1)) - result_dict[attrib_idx] = predictions - result_expr.append(result_dict) - - # Sampling loop. - results = [] - for _ in range(0, self.num_samples, minibatch_size): - results += tflib.run(result_expr) - results = {key: np.concatenate([value[key] for value in results], axis=0) for key in results[0].keys()} - - # Calculate conditional entropy for each attribute. - conditional_entropies = defaultdict(list) - for attrib_idx in self.attrib_indices: - # Prune the least confident samples. - pruned_indices = list(range(self.num_samples)) - pruned_indices = sorted(pruned_indices, key=lambda i: -np.max(results[attrib_idx][i])) - pruned_indices = pruned_indices[:self.num_keep] - - # Fit SVM to the remaining samples. - svm_targets = np.argmax(results[attrib_idx][pruned_indices], axis=1) - for space in ['latents', 'dlatents']: - svm_inputs = results[space][pruned_indices] - try: - svm = sklearn.svm.LinearSVC() - svm.fit(svm_inputs, svm_targets) - svm.score(svm_inputs, svm_targets) - svm_outputs = svm.predict(svm_inputs) - except: - svm_outputs = svm_targets # assume perfect prediction - - # Calculate conditional entropy. - p = [[np.mean([case == (row, col) for case in zip(svm_outputs, svm_targets)]) for col in (0, 1)] for row in (0, 1)] - conditional_entropies[space].append(conditional_entropy(p)) - - # Calculate separability scores. - scores = {key: 2**np.sum(values) for key, values in conditional_entropies.items()} - self._report_result(scores['latents'], suffix='_z') - self._report_result(scores['dlatents'], suffix='_w') - -#---------------------------------------------------------------------------- diff --git a/spaces/Dormin22/Proxy/Dockerfile b/spaces/Dormin22/Proxy/Dockerfile deleted file mode 100644 index cee9bcd0c69dbeb6e903c3f64531b2ff70f021f6..0000000000000000000000000000000000000000 --- a/spaces/Dormin22/Proxy/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM node:18-bullseye-slim -RUN apt-get update && \ - apt-get install -y git -RUN git clone https://gitlab.com/khanon/oai-proxy.git /app -WORKDIR /app -RUN npm install -COPY Dockerfile greeting.md* .env* ./ -RUN npm run build -EXPOSE 7860 -ENV NODE_ENV=production -CMD [ "npm", "start" ] \ No newline at end of file diff --git a/spaces/EDGAhab/Paimon-Talking/attentions.py b/spaces/EDGAhab/Paimon-Talking/attentions.py deleted file mode 100644 index 4e0b0c1fd48c962e21e1fbe60b23fc574927435c..0000000000000000000000000000000000000000 --- a/spaces/EDGAhab/Paimon-Talking/attentions.py +++ /dev/null @@ -1,303 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/Eddycrack864/Applio-Inference/infer/lib/uvr5_pack/lib_v5/model_param_init.py b/spaces/Eddycrack864/Applio-Inference/infer/lib/uvr5_pack/lib_v5/model_param_init.py deleted file mode 100644 index b995c0bfb1194746187692e2ab1c2a6dbaaaec6c..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/infer/lib/uvr5_pack/lib_v5/model_param_init.py +++ /dev/null @@ -1,69 +0,0 @@ -import json -import os -import pathlib - -default_param = {} -default_param["bins"] = 768 -default_param["unstable_bins"] = 9 # training only -default_param["reduction_bins"] = 762 # training only -default_param["sr"] = 44100 -default_param["pre_filter_start"] = 757 -default_param["pre_filter_stop"] = 768 -default_param["band"] = {} - - -default_param["band"][1] = { - "sr": 11025, - "hl": 128, - "n_fft": 960, - "crop_start": 0, - "crop_stop": 245, - "lpf_start": 61, # inference only - "res_type": "polyphase", -} - -default_param["band"][2] = { - "sr": 44100, - "hl": 512, - "n_fft": 1536, - "crop_start": 24, - "crop_stop": 547, - "hpf_start": 81, # inference only - "res_type": "sinc_best", -} - - -def int_keys(d): - r = {} - for k, v in d: - if k.isdigit(): - k = int(k) - r[k] = v - return r - - -class ModelParameters(object): - def __init__(self, config_path=""): - if ".pth" == pathlib.Path(config_path).suffix: - import zipfile - - with zipfile.ZipFile(config_path, "r") as zip: - self.param = json.loads( - zip.read("param.json"), object_pairs_hook=int_keys - ) - elif ".json" == pathlib.Path(config_path).suffix: - with open(config_path, "r") as f: - self.param = json.loads(f.read(), object_pairs_hook=int_keys) - else: - self.param = default_param - - for k in [ - "mid_side", - "mid_side_b", - "mid_side_b2", - "stereo_w", - "stereo_n", - "reverse", - ]: - if not k in self.param: - self.param[k] = False diff --git a/spaces/Eddycrack864/Applio-Inference/infer/modules/vc/pipeline.py b/spaces/Eddycrack864/Applio-Inference/infer/modules/vc/pipeline.py deleted file mode 100644 index 76e712c649b95e21f9bbe6416ae8b7050317b479..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/infer/modules/vc/pipeline.py +++ /dev/null @@ -1,655 +0,0 @@ -import os -import sys -import traceback -import logging - -logger = logging.getLogger(__name__) - -from functools import lru_cache -from time import time as ttime -from torch import Tensor -import faiss -import librosa -import numpy as np -import parselmouth -import pyworld -import torch -import torch.nn.functional as F -import torchcrepe -from scipy import signal -from tqdm import tqdm - -import random -now_dir = os.getcwd() -sys.path.append(now_dir) -import re -from functools import partial -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - -input_audio_path2wav = {} -from LazyImport import lazyload -torchcrepe = lazyload("torchcrepe") # Fork Feature. Crepe algo for training and preprocess -torch = lazyload("torch") -from infer.lib.rmvpe import RMVPE - -@lru_cache -def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period): - audio = input_audio_path2wav[input_audio_path] - f0, t = pyworld.harvest( - audio, - fs=fs, - f0_ceil=f0max, - f0_floor=f0min, - frame_period=frame_period, - ) - f0 = pyworld.stonemask(audio, f0, t, fs) - return f0 - - -def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比 - # print(data1.max(),data2.max()) - rms1 = librosa.feature.rms( - y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2 - ) # 每半秒一个点 - rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2) - rms1 = torch.from_numpy(rms1) - rms1 = F.interpolate( - rms1.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.from_numpy(rms2) - rms2 = F.interpolate( - rms2.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6) - data2 *= ( - torch.pow(rms1, torch.tensor(1 - rate)) - * torch.pow(rms2, torch.tensor(rate - 1)) - ).numpy() - return data2 - - -class Pipeline(object): - def __init__(self, tgt_sr, config): - self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = ( - config.x_pad, - config.x_query, - config.x_center, - config.x_max, - config.is_half, - ) - self.sr = 16000 # hubert输入采样率 - self.window = 160 # 每帧点数 - self.t_pad = self.sr * self.x_pad # 每条前后pad时间 - self.t_pad_tgt = tgt_sr * self.x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * self.x_query # 查询切点前后查询时间 - self.t_center = self.sr * self.x_center # 查询切点位置 - self.t_max = self.sr * self.x_max # 免查询时长阈值 - self.device = config.device - self.model_rmvpe = RMVPE("%s/rmvpe.pt" % os.environ["rmvpe_root"], is_half=self.is_half, device=self.device) - self.f0_method_dict = { - "pm": self.get_pm, - "harvest": self.get_harvest, - "dio": self.get_dio, - "rmvpe": self.get_rmvpe, - "rmvpe+": self.get_pitch_dependant_rmvpe, - "crepe": self.get_f0_official_crepe_computation, - "crepe-tiny": partial(self.get_f0_official_crepe_computation, model='model'), - "mangio-crepe": self.get_f0_crepe_computation, - "mangio-crepe-tiny": partial(self.get_f0_crepe_computation, model='model'), - - } - self.note_dict = [ - 65.41, 69.30, 73.42, 77.78, 82.41, 87.31, - 92.50, 98.00, 103.83, 110.00, 116.54, 123.47, - 130.81, 138.59, 146.83, 155.56, 164.81, 174.61, - 185.00, 196.00, 207.65, 220.00, 233.08, 246.94, - 261.63, 277.18, 293.66, 311.13, 329.63, 349.23, - 369.99, 392.00, 415.30, 440.00, 466.16, 493.88, - 523.25, 554.37, 587.33, 622.25, 659.25, 698.46, - 739.99, 783.99, 830.61, 880.00, 932.33, 987.77, - 1046.50, 1108.73, 1174.66, 1244.51, 1318.51, 1396.91, - 1479.98, 1567.98, 1661.22, 1760.00, 1864.66, 1975.53, - 2093.00, 2217.46, 2349.32, 2489.02, 2637.02, 2793.83, - 2959.96, 3135.96, 3322.44, 3520.00, 3729.31, 3951.07 - ] - - # Fork Feature: Get the best torch device to use for f0 algorithms that require a torch device. Will return the type (torch.device) - def get_optimal_torch_device(self, index: int = 0) -> torch.device: - if torch.cuda.is_available(): - return torch.device( - f"cuda:{index % torch.cuda.device_count()}" - ) # Very fast - elif torch.backends.mps.is_available(): - return torch.device("mps") - return torch.device("cpu") - - # Fork Feature: Compute f0 with the crepe method - def get_f0_crepe_computation( - self, - x, - f0_min, - f0_max, - p_len, - *args, # 512 before. Hop length changes the speed that the voice jumps to a different dramatic pitch. Lower hop lengths means more pitch accuracy but longer inference time. - **kwargs, # Either use crepe-tiny "tiny" or crepe "full". Default is full - ): - x = x.astype( - np.float32 - ) # fixes the F.conv2D exception. We needed to convert double to float. - x /= np.quantile(np.abs(x), 0.999) - torch_device = self.get_optimal_torch_device() - audio = torch.from_numpy(x).to(torch_device, copy=True) - audio = torch.unsqueeze(audio, dim=0) - if audio.ndim == 2 and audio.shape[0] > 1: - audio = torch.mean(audio, dim=0, keepdim=True).detach() - audio = audio.detach() - hop_length = kwargs.get('crepe_hop_length', 160) - model = kwargs.get('model', 'full') - print("Initiating prediction with a crepe_hop_length of: " + str(hop_length)) - pitch: Tensor = torchcrepe.predict( - audio, - self.sr, - hop_length, - f0_min, - f0_max, - model, - batch_size=hop_length * 2, - device=torch_device, - pad=True, - ) - p_len = p_len or x.shape[0] // hop_length - # Resize the pitch for final f0 - source = np.array(pitch.squeeze(0).cpu().float().numpy()) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * p_len, len(source)) / p_len, - np.arange(0, len(source)), - source, - ) - f0 = np.nan_to_num(target) - return f0 # Resized f0 - - def get_f0_official_crepe_computation( - self, - x, - f0_min, - f0_max, - *args, - **kwargs - ): - # Pick a batch size that doesn't cause memory errors on your gpu - batch_size = 512 - # Compute pitch using first gpu - audio = torch.tensor(np.copy(x))[None].float() - model = kwargs.get('model', 'full') - f0, pd = torchcrepe.predict( - audio, - self.sr, - self.window, - f0_min, - f0_max, - model, - batch_size=batch_size, - device=self.device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - return f0 - - # Fork Feature: Compute pYIN f0 method - def get_f0_pyin_computation(self, x, f0_min, f0_max): - y, sr = librosa.load("saudio/Sidney.wav", self.sr, mono=True) - f0, _, _ = librosa.pyin(y, sr=self.sr, fmin=f0_min, fmax=f0_max) - f0 = f0[1:] # Get rid of extra first frame - return f0 - - def get_pm(self, x, p_len, *args, **kwargs): - f0 = parselmouth.Sound(x, self.sr).to_pitch_ac( - time_step=160 / 16000, - voicing_threshold=0.6, - pitch_floor=kwargs.get('f0_min'), - pitch_ceiling=kwargs.get('f0_max'), - ).selected_array["frequency"] - - return np.pad( - f0, - [[max(0, (p_len - len(f0) + 1) // 2), max(0, p_len - len(f0) - (p_len - len(f0) + 1) // 2)]], - mode="constant" - ) - - def get_harvest(self, x, *args, **kwargs): - f0_spectral = pyworld.harvest( - x.astype(np.double), - fs=self.sr, - f0_ceil=kwargs.get('f0_max'), - f0_floor=kwargs.get('f0_min'), - frame_period=1000 * kwargs.get('hop_length', 160) / self.sr, - ) - return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.sr) - - def get_dio(self, x, *args, **kwargs): - f0_spectral = pyworld.dio( - x.astype(np.double), - fs=self.sr, - f0_ceil=kwargs.get('f0_max'), - f0_floor=kwargs.get('f0_min'), - frame_period=1000 * kwargs.get('hop_length', 160) / self.sr, - ) - return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.sr) - - - def get_rmvpe(self, x, *args, **kwargs): - if not hasattr(self, "model_rmvpe"): - from infer.lib.rmvpe import RMVPE - - logger.info( - "Loading rmvpe model,%s" % "%s/rmvpe.pt" % os.environ["rmvpe_root"] - ) - self.model_rmvpe = RMVPE( - "%s/rmvpe.pt" % os.environ["rmvpe_root"], - is_half=self.is_half, - device=self.device, - ) - f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) - - return f0 - - - def get_pitch_dependant_rmvpe(self, x, f0_min=1, f0_max=40000, *args, **kwargs): - return self.model_rmvpe.infer_from_audio_with_pitch(x, thred=0.03, f0_min=f0_min, f0_max=f0_max) - - def autotune_f0(self, f0): - autotuned_f0 = [] - for freq in f0: - closest_notes = [x for x in self.note_dict if abs(x - freq) == min(abs(n - freq) for n in self.note_dict)] - autotuned_f0.append(random.choice(closest_notes)) - return np.array(autotuned_f0, np.float64) - - # Fork Feature: Acquire median hybrid f0 estimation calculation - def get_f0_hybrid_computation( - self, - methods_str, - input_audio_path, - x, - f0_min, - f0_max, - p_len, - filter_radius, - crepe_hop_length, - time_step - ): - # Get various f0 methods from input to use in the computation stack - params = {'x': x, 'p_len': p_len, 'f0_min': f0_min, - 'f0_max': f0_max, 'time_step': time_step, 'filter_radius': filter_radius, - 'crepe_hop_length': crepe_hop_length, 'model': "full" - } - methods_str = re.search('hybrid\[(.+)\]', methods_str) - if methods_str: # Ensure a match was found - methods = [method.strip() for method in methods_str.group(1).split('+')] - f0_computation_stack = [] - - print(f"Calculating f0 pitch estimations for methods: {str(methods)}") - x = x.astype(np.float32) - x /= np.quantile(np.abs(x), 0.999) - # Get f0 calculations for all methods specified - - for method in methods: - if method not in self.f0_method_dict: - print(f"Method {method} not found.") - continue - f0 = self.f0_method_dict[method](**params) - if method == 'harvest' and filter_radius > 2: - f0 = signal.medfilt(f0, 3) - f0 = f0[1:] # Get rid of first frame. - f0_computation_stack.append(f0) - - for fc in f0_computation_stack: - print(len(fc)) - - print(f"Calculating hybrid median f0 from the stack of: {str(methods)}") - f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0) - return f0_median_hybrid - - def get_f0( - self, - input_audio_path, - x, - p_len, - f0_up_key, - f0_method, - filter_radius, - crepe_hop_length, - f0_autotune, - inp_f0=None, - f0_min=50, - f0_max=1100, - ): - global input_audio_path2wav - time_step = self.window / self.sr * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - params = {'x': x, 'p_len': p_len, 'f0_up_key': f0_up_key, 'f0_min': f0_min, - 'f0_max': f0_max, 'time_step': time_step, 'filter_radius': filter_radius, - 'crepe_hop_length': crepe_hop_length, 'model': "full" - } - - if "hybrid" in f0_method: - # Perform hybrid median pitch estimation - input_audio_path2wav[input_audio_path] = x.astype(np.double) - f0 = self.get_f0_hybrid_computation( - f0_method,+ - input_audio_path, - x, - f0_min, - f0_max, - p_len, - filter_radius, - crepe_hop_length, - time_step, - ) - else: - f0 = self.f0_method_dict[f0_method](**params) - - if "privateuseone" in str(self.device): # clean ortruntime memory - del self.model_rmvpe.model - del self.model_rmvpe - logger.info("Cleaning ortruntime memory") - - if f0_autotune: - f0 = self.autotune_f0(f0) - - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0] - f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[ - :shape - ] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int32) - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9 if version == "v1" else 12, - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) if version == "v1" else logits[0] - if protect < 0.5 and pitch is not None and pitchf is not None: - feats0 = feats.clone() - if ( - not isinstance(index, type(None)) - and not isinstance(big_npy, type(None)) - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - - # _, I = index.search(npy, 1) - # npy = big_npy[I.squeeze()] - - score, ix = index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - if protect < 0.5 and pitch is not None and pitchf is not None: - feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute( - 0, 2, 1 - ) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch is not None and pitchf is not None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - - if protect < 0.5 and pitch is not None and pitchf is not None: - pitchff = pitchf.clone() - pitchff[pitchf > 0] = 1 - pitchff[pitchf < 1] = protect - pitchff = pitchff.unsqueeze(-1) - feats = feats * pitchff + feats0 * (1 - pitchff) - feats = feats.to(feats0.dtype) - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - hasp = pitch is not None and pitchf is not None - arg = (feats, p_len, pitch, pitchf, sid) if hasp else (feats, p_len, sid) - audio1 = (net_g.infer(*arg)[0][0, 0]).data.cpu().float().numpy() - del hasp, arg - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - def process_t(self, t, s, window, audio_pad, pitch, pitchf, times, index, big_npy, index_rate, version, protect, t_pad_tgt, if_f0, sid, model, net_g): - t = t // window * window - if if_f0 == 1: - return self.vc( - model, - net_g, - sid, - audio_pad[s : t + t_pad_tgt + window], - pitch[:, s // window : (t + t_pad_tgt) // window], - pitchf[:, s // window : (t + t_pad_tgt) // window], - times, - index, - big_npy, - index_rate, - version, - protect, - )[t_pad_tgt : -t_pad_tgt] - else: - return self.vc( - model, - net_g, - sid, - audio_pad[s : t + t_pad_tgt + window], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[t_pad_tgt : -t_pad_tgt] - - - def pipeline( - self, - model, - net_g, - sid, - audio, - input_audio_path, - times, - f0_up_key, - f0_method, - file_index, - index_rate, - if_f0, - filter_radius, - tgt_sr, - resample_sr, - rms_mix_rate, - version, - protect, - crepe_hop_length, - f0_autotune, - f0_file=None, - f0_min=50, - f0_max=1100 - ): - if ( - file_index != "" - # and file_big_npy != "" - # and os.path.exists(file_big_npy) == True - and os.path.exists(file_index) - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - # big_npy = np.load(file_big_npy) - big_npy = index.reconstruct_n(0, index.ntotal) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - if hasattr(f0_file, "name"): - try: - with open(f0_file.name, "r") as f: - lines = f.read().strip("\n").split("\n") - inp_f0 = [] - for line in lines: - inp_f0.append([float(i) for i in line.split(",")]) - inp_f0 = np.array(inp_f0, dtype="float32") - except: - traceback.print_exc() - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0: - pitch, pitchf = self.get_f0( - input_audio_path, - audio_pad, - p_len, - f0_up_key, - f0_method, - filter_radius, - crepe_hop_length, - f0_autotune, - inp_f0, - f0_min, - f0_max - ) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - if self.device == "mps" or "xpu" in self.device: - pitchf = pitchf.astype(np.float32) - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - t2 = ttime() - times[1] += t2 - t1 - - with tqdm(total=len(opt_ts), desc="Processing", unit="window") as pbar: - for i, t in enumerate(opt_ts): - t = t // self.window * self.window - start = s - end = t + self.t_pad2 + self.window - audio_slice = audio_pad[start:end] - pitch_slice = pitch[:, start // self.window:end // self.window] if if_f0 else None - pitchf_slice = pitchf[:, start // self.window:end // self.window] if if_f0 else None - audio_opt.append(self.vc(model, net_g, sid, audio_slice, pitch_slice, pitchf_slice, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt]) - s = t - pbar.update(1) - pbar.refresh() - - audio_slice = audio_pad[t:] - pitch_slice = pitch[:, t // self.window:] if if_f0 and t is not None else pitch - pitchf_slice = pitchf[:, t // self.window:] if if_f0 and t is not None else pitchf - audio_opt.append(self.vc(model, net_g, sid, audio_slice, pitch_slice, pitchf_slice, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt]) - - audio_opt = np.concatenate(audio_opt) - if rms_mix_rate != 1: - audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate) - if tgt_sr != resample_sr >= 16000: - audio_opt = librosa.resample( - audio_opt, orig_sr=tgt_sr, target_sr=resample_sr - ) - audio_max = np.abs(audio_opt).max() / 0.99 - max_int16 = 32768 - if audio_max > 1: - max_int16 /= audio_max - audio_opt = (audio_opt * max_int16).astype(np.int16) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - print("Returning completed audio...") - print("-------------------") - return audio_opt diff --git a/spaces/Felix0810/textgenerator/README.md b/spaces/Felix0810/textgenerator/README.md deleted file mode 100644 index d3d3bf03db58b85e3b8071fe4909188c868229e2..0000000000000000000000000000000000000000 --- a/spaces/Felix0810/textgenerator/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Textgenerator -emoji: 🐢 -colorFrom: yellow -colorTo: indigo -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Felix123456/bingo/cloudflare/worker.js b/spaces/Felix123456/bingo/cloudflare/worker.js deleted file mode 100644 index e0debd750615f1329b2c72fbce73e1b9291f7137..0000000000000000000000000000000000000000 --- a/spaces/Felix123456/bingo/cloudflare/worker.js +++ /dev/null @@ -1,18 +0,0 @@ -const TRAGET_HOST='hf4all-bingo.hf.space' // 请将此域名改成你自己的,域名信息在设置》站点域名查看。 - -export default { - async fetch(request) { - const uri = new URL(request.url); - if (uri.protocol === 'http:') { - uri.protocol = 'https:'; - return new Response('', { - status: 301, - headers: { - location: uri.toString(), - }, - }) - } - uri.host = TRAGET_HOST - return fetch(new Request(uri.toString(), request)); - }, -}; diff --git a/spaces/FunnyDannyG/VoiceFixer/app.py b/spaces/FunnyDannyG/VoiceFixer/app.py deleted file mode 100644 index 9aeebd5a3134fb40a9b7de333d65aaaf84118f0e..0000000000000000000000000000000000000000 --- a/spaces/FunnyDannyG/VoiceFixer/app.py +++ /dev/null @@ -1,24 +0,0 @@ -import os -os.system('pip install gradio==2.3.0a0') -os.system('pip install voicefixer --upgrade') -from voicefixer import VoiceFixer -import gradio as gr -voicefixer = VoiceFixer() -def inference(audio,mode): - voicefixer.restore(input=audio.name, # input wav file path - output="output.wav", # output wav file path - cuda=False, # whether to use gpu acceleration - mode = int(mode)) # You can try out mode 0, 1 to find out the best result - return 'output.wav' - -inputs = [gr.inputs.Audio(type="file", label="Input Audio"),gr.inputs.Radio(choices=['0','1','2'], type="value", default='0', label='mode')] -outputs = gr.outputs.Audio(type="file",label="Output Audio") - - -title = "Voice Fixer" -description = "Gradio demo for VoiceFixer: Toward General Speech Restoration With Neural Vocoder. To use it, simply add your audio, or click one of the examples to load them. Read more at the links below." -article = "

    VoiceFixer: Toward General Speech Restoration With Neural Vocoder | Github Repo

    " - -examples=[['bruce.wav','2']] - -gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples, enable_queue=True).launch() \ No newline at end of file diff --git a/spaces/GipAdonimus/Real-Time-Voice-Cloning/encoder/visualizations.py b/spaces/GipAdonimus/Real-Time-Voice-Cloning/encoder/visualizations.py deleted file mode 100644 index 980c74f95f1f7df41ebccc983600b2713c0b0502..0000000000000000000000000000000000000000 --- a/spaces/GipAdonimus/Real-Time-Voice-Cloning/encoder/visualizations.py +++ /dev/null @@ -1,178 +0,0 @@ -from encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataset -from datetime import datetime -from time import perf_counter as timer -import matplotlib.pyplot as plt -import numpy as np -# import webbrowser -import visdom -import umap - -colormap = np.array([ - [76, 255, 0], - [0, 127, 70], - [255, 0, 0], - [255, 217, 38], - [0, 135, 255], - [165, 0, 165], - [255, 167, 255], - [0, 255, 255], - [255, 96, 38], - [142, 76, 0], - [33, 0, 127], - [0, 0, 0], - [183, 183, 183], -], dtype=np.float) / 255 - - -class Visualizations: - def __init__(self, env_name=None, update_every=10, server="http://localhost", disabled=False): - # Tracking data - self.last_update_timestamp = timer() - self.update_every = update_every - self.step_times = [] - self.losses = [] - self.eers = [] - print("Updating the visualizations every %d steps." % update_every) - - # If visdom is disabled TODO: use a better paradigm for that - self.disabled = disabled - if self.disabled: - return - - # Set the environment name - now = str(datetime.now().strftime("%d-%m %Hh%M")) - if env_name is None: - self.env_name = now - else: - self.env_name = "%s (%s)" % (env_name, now) - - # Connect to visdom and open the corresponding window in the browser - try: - self.vis = visdom.Visdom(server, env=self.env_name, raise_exceptions=True) - except ConnectionError: - raise Exception("No visdom server detected. Run the command \"visdom\" in your CLI to " - "start it.") - # webbrowser.open("http://localhost:8097/env/" + self.env_name) - - # Create the windows - self.loss_win = None - self.eer_win = None - # self.lr_win = None - self.implementation_win = None - self.projection_win = None - self.implementation_string = "" - - def log_params(self): - if self.disabled: - return - from encoder import params_data - from encoder import params_model - param_string = "Model parameters:
    " - for param_name in (p for p in dir(params_model) if not p.startswith("__")): - value = getattr(params_model, param_name) - param_string += "\t%s: %s
    " % (param_name, value) - param_string += "Data parameters:
    " - for param_name in (p for p in dir(params_data) if not p.startswith("__")): - value = getattr(params_data, param_name) - param_string += "\t%s: %s
    " % (param_name, value) - self.vis.text(param_string, opts={"title": "Parameters"}) - - def log_dataset(self, dataset: SpeakerVerificationDataset): - if self.disabled: - return - dataset_string = "" - dataset_string += "Speakers: %s\n" % len(dataset.speakers) - dataset_string += "\n" + dataset.get_logs() - dataset_string = dataset_string.replace("\n", "
    ") - self.vis.text(dataset_string, opts={"title": "Dataset"}) - - def log_implementation(self, params): - if self.disabled: - return - implementation_string = "" - for param, value in params.items(): - implementation_string += "%s: %s\n" % (param, value) - implementation_string = implementation_string.replace("\n", "
    ") - self.implementation_string = implementation_string - self.implementation_win = self.vis.text( - implementation_string, - opts={"title": "Training implementation"} - ) - - def update(self, loss, eer, step): - # Update the tracking data - now = timer() - self.step_times.append(1000 * (now - self.last_update_timestamp)) - self.last_update_timestamp = now - self.losses.append(loss) - self.eers.append(eer) - print(".", end="") - - # Update the plots every steps - if step % self.update_every != 0: - return - time_string = "Step time: mean: %5dms std: %5dms" % \ - (int(np.mean(self.step_times)), int(np.std(self.step_times))) - print("\nStep %6d Loss: %.4f EER: %.4f %s" % - (step, np.mean(self.losses), np.mean(self.eers), time_string)) - if not self.disabled: - self.loss_win = self.vis.line( - [np.mean(self.losses)], - [step], - win=self.loss_win, - update="append" if self.loss_win else None, - opts=dict( - legend=["Avg. loss"], - xlabel="Step", - ylabel="Loss", - title="Loss", - ) - ) - self.eer_win = self.vis.line( - [np.mean(self.eers)], - [step], - win=self.eer_win, - update="append" if self.eer_win else None, - opts=dict( - legend=["Avg. EER"], - xlabel="Step", - ylabel="EER", - title="Equal error rate" - ) - ) - if self.implementation_win is not None: - self.vis.text( - self.implementation_string + ("%s" % time_string), - win=self.implementation_win, - opts={"title": "Training implementation"}, - ) - - # Reset the tracking - self.losses.clear() - self.eers.clear() - self.step_times.clear() - - def draw_projections(self, embeds, utterances_per_speaker, step, out_fpath=None, - max_speakers=10): - max_speakers = min(max_speakers, len(colormap)) - embeds = embeds[:max_speakers * utterances_per_speaker] - - n_speakers = len(embeds) // utterances_per_speaker - ground_truth = np.repeat(np.arange(n_speakers), utterances_per_speaker) - colors = [colormap[i] for i in ground_truth] - - reducer = umap.UMAP() - projected = reducer.fit_transform(embeds) - plt.scatter(projected[:, 0], projected[:, 1], c=colors) - plt.gca().set_aspect("equal", "datalim") - plt.title("UMAP projection (step %d)" % step) - if not self.disabled: - self.projection_win = self.vis.matplot(plt, win=self.projection_win) - if out_fpath is not None: - plt.savefig(out_fpath) - plt.clf() - - def save(self): - if not self.disabled: - self.vis.save([self.env_name]) - \ No newline at end of file diff --git a/spaces/GipAdonimus/Real-Time-Voice-Cloning/synthesizer_preprocess_embeds.py b/spaces/GipAdonimus/Real-Time-Voice-Cloning/synthesizer_preprocess_embeds.py deleted file mode 100644 index 94f864d5d3c36c6177b211f5818e7c920a41cd8c..0000000000000000000000000000000000000000 --- a/spaces/GipAdonimus/Real-Time-Voice-Cloning/synthesizer_preprocess_embeds.py +++ /dev/null @@ -1,25 +0,0 @@ -from synthesizer.preprocess import create_embeddings -from utils.argutils import print_args -from pathlib import Path -import argparse - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Creates embeddings for the synthesizer from the LibriSpeech utterances.", - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument("synthesizer_root", type=Path, help=\ - "Path to the synthesizer training data that contains the audios and the train.txt file. " - "If you let everything as default, it should be /SV2TTS/synthesizer/.") - parser.add_argument("-e", "--encoder_model_fpath", type=Path, - default="encoder/saved_models/pretrained.pt", help=\ - "Path your trained encoder model.") - parser.add_argument("-n", "--n_processes", type=int, default=4, help= \ - "Number of parallel processes. An encoder is created for each, so you may need to lower " - "this value on GPUs with low memory. Set it to 1 if CUDA is unhappy.") - args = parser.parse_args() - - # Preprocess the dataset - print_args(args, parser) - create_embeddings(**vars(args)) diff --git a/spaces/Gradio-Blocks/anime-colorization/test_danbooru.sh b/spaces/Gradio-Blocks/anime-colorization/test_danbooru.sh deleted file mode 100644 index 0512be74d28ea244b3d57559b29c9d43baf22382..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/anime-colorization/test_danbooru.sh +++ /dev/null @@ -1,6 +0,0 @@ - -MODEL_FLAGS="--image_size 32 --guide_size 128 --num_channels 128 --num_res_blocks 3 --learn_sigma True --dropout 0.0" -DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule cosine" -TEST_FLAGS="--batch_size 4" - -OPENAI_LOGDIR="./danbooru2017_guided_test_log" python scripts/pixel_guide_sample.py --data_dir data/danbooru2017/anime --guide_dir data/danbooru2017/anime_sketch --timestep_respacing ddim25 --use_ddim True --model_path danbooru2017_guided_log/ema_0.9999_360000.pt $MODEL_FLAGS $DIFFUSION_FLAGS $TEST_FLAGS diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/rpn/rpn_x101_64x4d_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/rpn/rpn_x101_64x4d_fpn_1x_coco.py deleted file mode 100644 index bb7f0a630b9f2e9263183e003c288a33eb972e71..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/rpn/rpn_x101_64x4d_fpn_1x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = './rpn_r50_fpn_1x_coco.py' -model = dict( - pretrained='open-mmlab://resnext101_64x4d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch')) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/scnet/scnet_r50_fpn_20e_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/scnet/scnet_r50_fpn_20e_coco.py deleted file mode 100644 index 3b121a6a2836ac7626f7b383ada9508f8b9d972d..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/scnet/scnet_r50_fpn_20e_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './scnet_r50_fpn_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 19]) -runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/fastscnn/README.md b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/fastscnn/README.md deleted file mode 100644 index 9cea8d0fd0dd76f5322c3d53263d3d5faef539fa..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/fastscnn/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# Fast-SCNN for Semantic Segmentation - -## Introduction - - - -```latex -@article{poudel2019fast, - title={Fast-scnn: Fast semantic segmentation network}, - author={Poudel, Rudra PK and Liwicki, Stephan and Cipolla, Roberto}, - journal={arXiv preprint arXiv:1902.04502}, - year={2019} -} -``` - -## Results and models - -### Cityscapes - -| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | -| --------- | --------- | --------- | ------: | -------- | -------------- | ----: | ------------- | --------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Fast-SCNN | Fast-SCNN | 512x1024 | 80000 | 8.4 | 63.61 | 69.06 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fast_scnn.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_4x8_80k_lr0.12_cityscapes-f5096c79.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_4x8_80k_lr0.12_cityscapes-20200807_165744.log.json) | diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x512_20k_voc12aug.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x512_20k_voc12aug.py deleted file mode 100644 index c2dd6d1158bd31ecdd7874827fd37bffb5d26db6..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x512_20k_voc12aug.py +++ /dev/null @@ -1,39 +0,0 @@ -_base_ = './ocrnet_hr18_512x512_20k_voc12aug.py' -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w48', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=[ - dict( - type='FCNHead', - in_channels=[48, 96, 192, 384], - channels=sum([48, 96, 192, 384]), - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - kernel_size=1, - num_convs=1, - norm_cfg=norm_cfg, - concat_input=False, - dropout_ratio=-1, - num_classes=21, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[48, 96, 192, 384], - channels=512, - ocr_channels=256, - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - norm_cfg=norm_cfg, - dropout_ratio=-1, - num_classes=21, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) - ]) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/psp_head.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/psp_head.py deleted file mode 100644 index bdbe2c8ac8dc2d21dd3e21aa5ed9f74504545c62..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/psp_head.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule - -from mmseg.ops import resize -from ..builder import HEADS -from .decode_head import BaseDecodeHead - - -class PPM(nn.ModuleList): - """Pooling Pyramid Module used in PSPNet. - - Args: - pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid - Module. - in_channels (int): Input channels. - channels (int): Channels after modules, before conv_seg. - conv_cfg (dict|None): Config of conv layers. - norm_cfg (dict|None): Config of norm layers. - act_cfg (dict): Config of activation layers. - align_corners (bool): align_corners argument of F.interpolate. - """ - - def __init__(self, pool_scales, in_channels, channels, conv_cfg, norm_cfg, - act_cfg, align_corners): - super(PPM, self).__init__() - self.pool_scales = pool_scales - self.align_corners = align_corners - self.in_channels = in_channels - self.channels = channels - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - for pool_scale in pool_scales: - self.append( - nn.Sequential( - nn.AdaptiveAvgPool2d(pool_scale), - ConvModule( - self.in_channels, - self.channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg))) - - def forward(self, x): - """Forward function.""" - ppm_outs = [] - for ppm in self: - ppm_out = ppm(x) - upsampled_ppm_out = resize( - ppm_out, - size=x.size()[2:], - mode='bilinear', - align_corners=self.align_corners) - ppm_outs.append(upsampled_ppm_out) - return ppm_outs - - -@HEADS.register_module() -class PSPHead(BaseDecodeHead): - """Pyramid Scene Parsing Network. - - This head is the implementation of - `PSPNet `_. - - Args: - pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid - Module. Default: (1, 2, 3, 6). - """ - - def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs): - super(PSPHead, self).__init__(**kwargs) - assert isinstance(pool_scales, (list, tuple)) - self.pool_scales = pool_scales - self.psp_modules = PPM( - self.pool_scales, - self.in_channels, - self.channels, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - align_corners=self.align_corners) - self.bottleneck = ConvModule( - self.in_channels + len(pool_scales) * self.channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - psp_outs = [x] - psp_outs.extend(self.psp_modules(x)) - psp_outs = torch.cat(psp_outs, dim=1) - output = self.bottleneck(psp_outs) - output = self.cls_seg(output) - return output diff --git a/spaces/Grezz/generate_human_motion/pyrender/pyrender/platforms/pyglet_platform.py b/spaces/Grezz/generate_human_motion/pyrender/pyrender/platforms/pyglet_platform.py deleted file mode 100644 index a70cf7b659bc85a92f6c9c8ebcc360662a068507..0000000000000000000000000000000000000000 --- a/spaces/Grezz/generate_human_motion/pyrender/pyrender/platforms/pyglet_platform.py +++ /dev/null @@ -1,90 +0,0 @@ -from pyrender.constants import (TARGET_OPEN_GL_MAJOR, TARGET_OPEN_GL_MINOR, - MIN_OPEN_GL_MAJOR, MIN_OPEN_GL_MINOR) -from .base import Platform - -import OpenGL - - -__all__ = ['PygletPlatform'] - - -class PygletPlatform(Platform): - """Renders on-screen using a 1x1 hidden Pyglet window for getting - an OpenGL context. - """ - - def __init__(self, viewport_width, viewport_height): - super(PygletPlatform, self).__init__(viewport_width, viewport_height) - self._window = None - - def init_context(self): - import pyglet - pyglet.options['shadow_window'] = False - - try: - pyglet.lib.x11.xlib.XInitThreads() - except Exception: - pass - - self._window = None - confs = [pyglet.gl.Config(sample_buffers=1, samples=4, - depth_size=24, - double_buffer=True, - major_version=TARGET_OPEN_GL_MAJOR, - minor_version=TARGET_OPEN_GL_MINOR), - pyglet.gl.Config(depth_size=24, - double_buffer=True, - major_version=TARGET_OPEN_GL_MAJOR, - minor_version=TARGET_OPEN_GL_MINOR), - pyglet.gl.Config(sample_buffers=1, samples=4, - depth_size=24, - double_buffer=True, - major_version=MIN_OPEN_GL_MAJOR, - minor_version=MIN_OPEN_GL_MINOR), - pyglet.gl.Config(depth_size=24, - double_buffer=True, - major_version=MIN_OPEN_GL_MAJOR, - minor_version=MIN_OPEN_GL_MINOR)] - for conf in confs: - try: - self._window = pyglet.window.Window(config=conf, visible=False, - resizable=False, - width=1, height=1) - break - except pyglet.window.NoSuchConfigException as e: - pass - - if not self._window: - raise ValueError( - 'Failed to initialize Pyglet window with an OpenGL >= 3+ ' - 'context. If you\'re logged in via SSH, ensure that you\'re ' - 'running your script with vglrun (i.e. VirtualGL). The ' - 'internal error message was "{}"'.format(e) - ) - - def make_current(self): - if self._window: - self._window.switch_to() - - def make_uncurrent(self): - try: - import pyglet - pyglet.gl.xlib.glx.glXMakeContextCurrent(self._window.context.x_display, 0, 0, None) - except Exception: - pass - - def delete_context(self): - if self._window is not None: - self.make_current() - cid = OpenGL.contextdata.getContext() - try: - self._window.context.destroy() - self._window.close() - except Exception: - pass - self._window = None - OpenGL.contextdata.cleanupContext(cid) - del cid - - def supports_framebuffers(self): - return True diff --git a/spaces/Groenewaldt/stabilityai-stable-diffusion-xl-refiner-1.0/README.md b/spaces/Groenewaldt/stabilityai-stable-diffusion-xl-refiner-1.0/README.md deleted file mode 100644 index 9cfce3ee99de57827e9a024e794850d51cdb691d..0000000000000000000000000000000000000000 --- a/spaces/Groenewaldt/stabilityai-stable-diffusion-xl-refiner-1.0/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stabilityai Stable Diffusion Xl Refiner 1.0 -emoji: 💻 -colorFrom: pink -colorTo: gray -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/GuXiaoBei/wechat-chatbot/bot/bot.py b/spaces/GuXiaoBei/wechat-chatbot/bot/bot.py deleted file mode 100644 index 850ba3b1e4e31d8a7b079c9827fbd15bec32e9f3..0000000000000000000000000000000000000000 --- a/spaces/GuXiaoBei/wechat-chatbot/bot/bot.py +++ /dev/null @@ -1,13 +0,0 @@ -""" -Auto-replay chat robot abstract class -""" - - -class Bot(object): - def reply(self, query, context=None): - """ - bot auto-reply content - :param req: received message - :return: reply content - """ - raise NotImplementedError diff --git a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/dpt/blocks.py b/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/dpt/blocks.py deleted file mode 100644 index 46b3fe3fffe17cae3c885491937bbb1f09a21e9d..0000000000000000000000000000000000000000 --- a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/dpt/blocks.py +++ /dev/null @@ -1,383 +0,0 @@ -import torch -import torch.nn as nn - -from .vit import ( - _make_pretrained_vitb_rn50_384, - _make_pretrained_vitl16_384, - _make_pretrained_vitb16_384, - forward_vit, -) - - -def _make_encoder( - backbone, - features, - use_pretrained, - groups=1, - expand=False, - exportable=True, - hooks=None, - use_vit_only=False, - use_readout="ignore", - enable_attention_hooks=False, -): - if backbone == "vitl16_384": - pretrained = _make_pretrained_vitl16_384( - use_pretrained, - hooks=hooks, - use_readout=use_readout, - enable_attention_hooks=enable_attention_hooks, - ) - scratch = _make_scratch( - [256, 512, 1024, 1024], features, groups=groups, expand=expand - ) # ViT-L/16 - 85.0% Top1 (backbone) - elif backbone == "vitb_rn50_384": - pretrained = _make_pretrained_vitb_rn50_384( - use_pretrained, - hooks=hooks, - use_vit_only=use_vit_only, - use_readout=use_readout, - enable_attention_hooks=enable_attention_hooks, - ) - scratch = _make_scratch( - [256, 512, 768, 768], features, groups=groups, expand=expand - ) # ViT-H/16 - 85.0% Top1 (backbone) - elif backbone == "vitb16_384": - pretrained = _make_pretrained_vitb16_384( - use_pretrained, - hooks=hooks, - use_readout=use_readout, - enable_attention_hooks=enable_attention_hooks, - ) - scratch = _make_scratch( - [96, 192, 384, 768], features, groups=groups, expand=expand - ) # ViT-B/16 - 84.6% Top1 (backbone) - elif backbone == "resnext101_wsl": - pretrained = _make_pretrained_resnext101_wsl(use_pretrained) - scratch = _make_scratch( - [256, 512, 1024, 2048], features, groups=groups, expand=expand - ) # efficientnet_lite3 - else: - print(f"Backbone '{backbone}' not implemented") - assert False - - return pretrained, scratch - - -def _make_scratch(in_shape, out_shape, groups=1, expand=False): - scratch = nn.Module() - - out_shape1 = out_shape - out_shape2 = out_shape - out_shape3 = out_shape - out_shape4 = out_shape - if expand == True: - out_shape1 = out_shape - out_shape2 = out_shape * 2 - out_shape3 = out_shape * 4 - out_shape4 = out_shape * 8 - - scratch.layer1_rn = nn.Conv2d( - in_shape[0], - out_shape1, - kernel_size=3, - stride=1, - padding=1, - bias=False, - groups=groups, - ) - scratch.layer2_rn = nn.Conv2d( - in_shape[1], - out_shape2, - kernel_size=3, - stride=1, - padding=1, - bias=False, - groups=groups, - ) - scratch.layer3_rn = nn.Conv2d( - in_shape[2], - out_shape3, - kernel_size=3, - stride=1, - padding=1, - bias=False, - groups=groups, - ) - scratch.layer4_rn = nn.Conv2d( - in_shape[3], - out_shape4, - kernel_size=3, - stride=1, - padding=1, - bias=False, - groups=groups, - ) - - return scratch - - -def _make_resnet_backbone(resnet): - pretrained = nn.Module() - pretrained.layer1 = nn.Sequential( - resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1 - ) - - pretrained.layer2 = resnet.layer2 - pretrained.layer3 = resnet.layer3 - pretrained.layer4 = resnet.layer4 - - return pretrained - - -def _make_pretrained_resnext101_wsl(use_pretrained): - resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl") - return _make_resnet_backbone(resnet) - - -class Interpolate(nn.Module): - """Interpolation module.""" - - def __init__(self, scale_factor, mode, align_corners=False): - """Init. - - Args: - scale_factor (float): scaling - mode (str): interpolation mode - """ - super(Interpolate, self).__init__() - - self.interp = nn.functional.interpolate - self.scale_factor = scale_factor - self.mode = mode - self.align_corners = align_corners - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input - - Returns: - tensor: interpolated data - """ - - x = self.interp( - x, - scale_factor=self.scale_factor, - mode=self.mode, - align_corners=self.align_corners, - ) - - return x - - -class ResidualConvUnit(nn.Module): - """Residual convolution module.""" - - def __init__(self, features): - """Init. - - Args: - features (int): number of features - """ - super().__init__() - - self.conv1 = nn.Conv2d( - features, features, kernel_size=3, stride=1, padding=1, bias=True - ) - - self.conv2 = nn.Conv2d( - features, features, kernel_size=3, stride=1, padding=1, bias=True - ) - - self.relu = nn.ReLU(inplace=True) - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input - - Returns: - tensor: output - """ - out = self.relu(x) - out = self.conv1(out) - out = self.relu(out) - out = self.conv2(out) - - return out + x - - -class FeatureFusionBlock(nn.Module): - """Feature fusion block.""" - - def __init__(self, features): - """Init. - - Args: - features (int): number of features - """ - super(FeatureFusionBlock, self).__init__() - - self.resConfUnit1 = ResidualConvUnit(features) - self.resConfUnit2 = ResidualConvUnit(features) - - def forward(self, *xs): - """Forward pass. - - Returns: - tensor: output - """ - output = xs[0] - - if len(xs) == 2: - output += self.resConfUnit1(xs[1]) - - output = self.resConfUnit2(output) - - output = nn.functional.interpolate( - output, scale_factor=2, mode="bilinear", align_corners=True - ) - - return output - - -class ResidualConvUnit_custom(nn.Module): - """Residual convolution module.""" - - def __init__(self, features, activation, bn): - """Init. - - Args: - features (int): number of features - """ - super().__init__() - - self.bn = bn - - self.groups = 1 - - self.conv1 = nn.Conv2d( - features, - features, - kernel_size=3, - stride=1, - padding=1, - bias=not self.bn, - groups=self.groups, - ) - - self.conv2 = nn.Conv2d( - features, - features, - kernel_size=3, - stride=1, - padding=1, - bias=not self.bn, - groups=self.groups, - ) - - if self.bn == True: - self.bn1 = nn.BatchNorm2d(features) - self.bn2 = nn.BatchNorm2d(features) - - self.activation = activation - - self.skip_add = nn.quantized.FloatFunctional() - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input - - Returns: - tensor: output - """ - - out = self.activation(x) - out = self.conv1(out) - if self.bn == True: - out = self.bn1(out) - - out = self.activation(out) - out = self.conv2(out) - if self.bn == True: - out = self.bn2(out) - - if self.groups > 1: - out = self.conv_merge(out) - - return self.skip_add.add(out, x) - - # return out + x - - -class FeatureFusionBlock_custom(nn.Module): - """Feature fusion block.""" - - def __init__( - self, - features, - activation, - deconv=False, - bn=False, - expand=False, - align_corners=True, - ): - """Init. - - Args: - features (int): number of features - """ - super(FeatureFusionBlock_custom, self).__init__() - - self.deconv = deconv - self.align_corners = align_corners - - self.groups = 1 - - self.expand = expand - out_features = features - if self.expand == True: - out_features = features // 2 - - self.out_conv = nn.Conv2d( - features, - out_features, - kernel_size=1, - stride=1, - padding=0, - bias=True, - groups=1, - ) - - self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn) - self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn) - - self.skip_add = nn.quantized.FloatFunctional() - - def forward(self, *xs): - """Forward pass. - - Returns: - tensor: output - """ - output = xs[0] - - if len(xs) == 2: - res = self.resConfUnit1(xs[1]) - output = self.skip_add.add(output, res) - # output += res - - output = self.resConfUnit2(output) - - output = nn.functional.interpolate( - output, scale_factor=2, mode="bilinear", align_corners=self.align_corners - ) - - output = self.out_conv(output) - - return output diff --git a/spaces/HarlanHong/DaGAN/README.md b/spaces/HarlanHong/DaGAN/README.md deleted file mode 100644 index 54ead99c470522b7ebf2101786f7038326b55955..0000000000000000000000000000000000000000 --- a/spaces/HarlanHong/DaGAN/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: DaGAN -emoji: 💩 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/clib/libbase/balanced_assignment.cpp b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/clib/libbase/balanced_assignment.cpp deleted file mode 100644 index 1a5a1061f3892be5a17e49192f744c39e0d395e8..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/clib/libbase/balanced_assignment.cpp +++ /dev/null @@ -1,109 +0,0 @@ -/** - * Copyright 2017-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the license found in the - * LICENSE file in the root directory of this source tree. - */ - -/* -C++ code for solving the linear assignment problem. -Based on the Auction Algorithm from -https://dspace.mit.edu/bitstream/handle/1721.1/3265/P-2108-26912652.pdf and the -implementation from: https://github.com/bkj/auction-lap Adapted to be more -efficient when each worker is looking for k jobs instead of 1. -*/ -#include -#include -using namespace torch::indexing; -torch::Tensor balanced_assignment(torch::Tensor job_and_worker_to_score) { - int max_iterations = 100; - torch::Tensor epsilon = - (job_and_worker_to_score.max() - job_and_worker_to_score.min()) / 50; - epsilon.clamp_min_(1e-04); - torch::Tensor worker_and_job_to_score = - job_and_worker_to_score.detach().transpose(0, 1).contiguous(); - int num_workers = worker_and_job_to_score.size(0); - int num_jobs = worker_and_job_to_score.size(1); - auto device = worker_and_job_to_score.device(); - int jobs_per_worker = num_jobs / num_workers; - torch::Tensor value = worker_and_job_to_score.clone(); - int counter = 0; - torch::Tensor max_value = worker_and_job_to_score.max(); - - torch::Tensor bid_indices; - torch::Tensor cost = worker_and_job_to_score.new_zeros({1, num_jobs}); - torch::Tensor bids = - worker_and_job_to_score.new_empty({num_workers, num_jobs}); - torch::Tensor bid_increments = - worker_and_job_to_score.new_empty({num_workers, jobs_per_worker}); - torch::Tensor top_values = - worker_and_job_to_score.new_empty({num_workers, jobs_per_worker + 1}); - torch::Tensor high_bids = worker_and_job_to_score.new_empty({num_jobs}); - - torch::Tensor top_index = top_values.to(torch::kLong); - torch::Tensor high_bidders = top_index.new_empty({num_jobs}); - torch::Tensor have_bids = high_bidders.to(torch::kBool); - torch::Tensor jobs_indices = - torch::arange({num_jobs}, torch::dtype(torch::kLong).device(device)); - torch::Tensor true_tensor = - torch::ones({1}, torch::dtype(torch::kBool).device(device)); - - while (true) { - bids.zero_(); - torch::topk_out(top_values, top_index, value, jobs_per_worker + 1, 1); - - // Each worker bids the difference in value between that job and the k+1th - // job - torch::sub_out( - bid_increments, - top_values.index({Slice(None, None), Slice(0, jobs_per_worker)}), - top_values.index({Slice(None, None), jobs_per_worker}).unsqueeze(1)); - - bid_increments.add_(epsilon); - bids.scatter_( - 1, - top_index.index({Slice(None, None), Slice(0, jobs_per_worker)}), - bid_increments); - - if (counter < max_iterations && counter > 0) { - // Put in a minimal bid to retain items from the last round if no-one else - // bids for them this round - bids.view(-1).index_put_({bid_indices}, epsilon); - } - - // Find the highest bidding worker per job - torch::max_out(high_bids, high_bidders, bids, 0); - torch::gt_out(have_bids, high_bids, 0); - - if (have_bids.all().item()) { - // All jobs were bid for - break; - } - - // Make popular items more expensive - cost.add_(high_bids); - torch::sub_out(value, worker_and_job_to_score, cost); - - bid_indices = ((high_bidders * num_jobs) + jobs_indices).index({have_bids}); - - if (counter < max_iterations) { - // Make sure that this item will be in the winning worker's top-k next - // time. - value.view(-1).index_put_({bid_indices}, max_value); - } else { - // Suboptimal approximation that converges quickly from current solution - value.view(-1).index_put_( - {bid_indices}, worker_and_job_to_score.view(-1).index({bid_indices})); - } - - counter += 1; - } - - return top_index.index({Slice(None, None), Slice(0, jobs_per_worker)}) - .reshape(-1); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("balanced_assignment", &balanced_assignment, "Balanced Assignment"); -} diff --git a/spaces/HarshulNanda/VV/app.py b/spaces/HarshulNanda/VV/app.py deleted file mode 100644 index 73ee47924c90a68d99652047bbcd402a8f77f04f..0000000000000000000000000000000000000000 --- a/spaces/HarshulNanda/VV/app.py +++ /dev/null @@ -1,90 +0,0 @@ -import base64 -import os -import time -import streamlit as st -from PIL import Image -from transformers import pipeline - -def zero_shot_classification(text, progress): - classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli") - labels = ["Criminal Activity", "Safe and Ethical"] - - for i in range(0, 101, 10): - progress.progress(i) - time.sleep(0.1) - - result = classifier(text, labels) - return result["labels"][0] - -st.set_page_config( - page_title="Verbal Vanguard", - # page_icon=Image.open("./assets/my_photo.png") -) - -def custom_css(css: str): - st.markdown(f'', unsafe_allow_html=True) - -css = f""" -body, h1, h2, h3, h4, h5, h6, p, div, a, span, label, input, button, textarea {{ - color: white !important; -}} - -::placeholder {{ - color: white !important; - opacity: 1 !important; -}} - -div.stNavBar {{ - background-color: #222 !important; -}} - -div.stButton {{ - background-color: #1B0099 !important; - color: #fff !important; - border-color: #fff !important; -}} - -div.stButton button:hover {{ - background-color: #1573F8 !important; - color: #fff !important; - border-color: #fff !important; -}} - -div.stButton > button:first-child {{ border: 1px solid #FFF; border-radius:20px 20px 20px 20px; background: none;}} - -div.stButton > button:first-child:hover {{ - background: #1B0099; - color: white; -}} - -h1, h3, span {{ - color: #fff; -}} - -footer, header {{ - visibility: hidden; -}} -""" - -custom_css(css) - -# with st.sidebar: -# st.markdown("

    Verbal Vanguard

    Crime & Safety Identifier

    ", unsafe_allow_html=True) - -def main(): - st.title("Verbal Vanguard") - st.markdown("

    Crime & Safety Identifier


    This prototype utilizes a BERT model to evaluate conversations and categorize them as either 'Criminal Activity' or 'Safe and Ethical'. The model is under training phase.
    ", unsafe_allow_html=True) - - user_text = st.text_area("Enter a conversation:- ", height=300) - - if st.button("Classify"): - if not user_text.strip(): - st.warning("Please input a conversation to classify.") - else: - progress = st.progress(0) - prediction = zero_shot_classification(user_text, progress) - progress.empty() - st.success(f"Predicted Category: {prediction}") - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/hifi_gan/env.py b/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/hifi_gan/env.py deleted file mode 100644 index 2bdbc95d4f7a8bad8fd4f5eef657e2b51d946056..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/src/hifi_gan/env.py +++ /dev/null @@ -1,15 +0,0 @@ -import os -import shutil - - -class AttrDict(dict): - def __init__(self, *args, **kwargs): - super(AttrDict, self).__init__(*args, **kwargs) - self.__dict__ = self - - -def build_env(config, config_name, path): - t_path = os.path.join(path, config_name) - if config != t_path: - os.makedirs(path, exist_ok=True) - shutil.copyfile(config, os.path.join(path, config_name)) diff --git a/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/utils/inference/run_gradio.py b/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/utils/inference/run_gradio.py deleted file mode 100644 index 5a31ddcf345c9a25dc638499fe375e44bc3cf785..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/utils/inference/run_gradio.py +++ /dev/null @@ -1,60 +0,0 @@ -import gradio as gr -import argparse -import numpy as np -from argparse import Namespace -from .advanced_tts import load_all_models, run_tts_paragraph - - -def hit_tts(textbox, gender, slider_noise_scale, slider_length_sclae, choice_transliteration, choice_number_conversion, choice_split_sentences): - inputs_to_gradio = {'text' : textbox, - 'gender' : gender, - 'noise_scale': slider_noise_scale, - 'length_scale': slider_length_sclae, - 'transliteration' : 1 if choice_transliteration else 0, - 'number_conversion' : 1 if choice_number_conversion else 0, - 'split_sentences' : 1 if choice_split_sentences else 0 - } - - args = Namespace(**inputs_to_gradio) - args.wav = None - args.lang = lang - args.gender = gender - - if args.text: - sr, audio = run_tts_paragraph(args) - return (sr, audio) - -def build_gradio(args): - global lang - lang = args.lang - load_all_models(args) - textbox = gr.inputs.Textbox(placeholder="Enter Text to run", default="", label="Enter Input Text") - gender = gr.inputs.Radio(choices = ['Female', 'Male'], default='Female', label='Gender') - slider_noise_scale = gr.inputs.Slider(minimum=0, maximum=1.0, step=0.001, default=0.667, label='Noise Scale') - slider_length_sclae = gr.inputs.Slider(minimum=0, maximum=2.0, step=0.1, default=1.0, label='Length Scale') - - choice_transliteration = gr.inputs.Checkbox(default=True, label="Transliteration") - choice_number_conversion = gr.inputs.Checkbox(default=True, label="Number Conversion") - choice_split_sentences = gr.inputs.Checkbox(default=True, label="Split Sentences") - - examples = [['இந்தியா எனது நாடு, நான் இந்தியனாக இருப்பதில் பெருமை கொள்கிறேன்.', 'Male', 0.667, 1, 0, 1, 1]] - - op = gr.outputs.Audio(type="numpy", label=None) - - inputs_to_gradio = [textbox, gender, slider_noise_scale, slider_length_sclae, choice_transliteration, choice_number_conversion, choice_split_sentences] - iface = gr.Interface(fn=hit_tts, examples = examples, inputs=inputs_to_gradio, outputs=op, theme='huggingface', title='Vakyansh Tamil TTS', article = 'Note: Transliteration models may not work well in some scenarios which can hamper the TTS quality, to evaluate the model in better sense it is advisable to provide input in the required language and switch off transliteration. Contact @harveenchadha on twitter for any issues.') - iface.launch(enable_queue=True) - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("-a", "--acoustic", required=True, type=str) - parser.add_argument("-v", "--vocoder", required=True, type=str) - parser.add_argument("-d", "--device", type=str, default="cpu") - parser.add_argument("-L", "--lang", type=str, required=True) - - global lang - - args = parser.parse_args() - lang = args.lang - - build_gradio(args) \ No newline at end of file diff --git a/spaces/Hazem/Pub_face/app.py b/spaces/Hazem/Pub_face/app.py deleted file mode 100644 index 67fcac0171bbb77d2b1d3b23b7293635b6297e28..0000000000000000000000000000000000000000 --- a/spaces/Hazem/Pub_face/app.py +++ /dev/null @@ -1,142 +0,0 @@ -import os - -import cv2 -import gradio as gr -import torch -from basicsr.archs.srvgg_arch import SRVGGNetCompact -from gfpgan.utils import GFPGANer -from realesrgan.utils import RealESRGANer - -os.system("pip freeze") -# download weights -if not os.path.exists('realesr-general-x4v3.pth'): - os.system("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth -P .") -if not os.path.exists('GFPGANv1.2.pth'): - os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.2.pth -P .") -if not os.path.exists('GFPGANv1.3.pth'): - os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth -P .") -if not os.path.exists('GFPGANv1.4.pth'): - os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P .") -if not os.path.exists('RestoreFormer.pth'): - os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth -P .") -if not os.path.exists('CodeFormer.pth'): - os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/CodeFormer.pth -P .") - -torch.hub.download_url_to_file( - 'https://thumbs.dreamstime.com/b/tower-bridge-traditional-red-bus-black-white-colors-view-to-tower-bridge-london-black-white-colors-108478942.jpg', - 'a1.jpg') -torch.hub.download_url_to_file( - 'https://media.istockphoto.com/id/523514029/photo/london-skyline-b-w.jpg?s=612x612&w=0&k=20&c=kJS1BAtfqYeUDaORupj0sBPc1hpzJhBUUqEFfRnHzZ0=', - 'a2.jpg') -torch.hub.download_url_to_file( - 'https://i.guim.co.uk/img/media/06f614065ed82ca0e917b149a32493c791619854/0_0_3648_2789/master/3648.jpg?width=700&quality=85&auto=format&fit=max&s=05764b507c18a38590090d987c8b6202', - 'a3.jpg') -torch.hub.download_url_to_file( - 'https://i.pinimg.com/736x/46/96/9e/46969eb94aec2437323464804d27706d--victorian-london-victorian-era.jpg', - 'a4.jpg') - -# background enhancer with RealESRGAN -model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu') -model_path = 'realesr-general-x4v3.pth' -half = True if torch.cuda.is_available() else False -upsampler = RealESRGANer(scale=4, model_path=model_path, model=model, tile=0, tile_pad=10, pre_pad=0, half=half) - -os.makedirs('output', exist_ok=True) - - -# def inference(img, version, scale, weight): -def inference(img, version, scale): - # weight /= 100 - print(img, version, scale) - try: - extension = os.path.splitext(os.path.basename(str(img)))[1] - img = cv2.imread(img, cv2.IMREAD_UNCHANGED) - if len(img.shape) == 3 and img.shape[2] == 4: - img_mode = 'RGBA' - elif len(img.shape) == 2: # for gray inputs - img_mode = None - img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - else: - img_mode = None - - h, w = img.shape[0:2] - if h < 300: - img = cv2.resize(img, (w * 2, h * 2), interpolation=cv2.INTER_LANCZOS4) - - if version == 'v1.2': - face_enhancer = GFPGANer( - model_path='GFPGANv1.2.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler) - elif version == 'v1.3': - face_enhancer = GFPGANer( - model_path='GFPGANv1.3.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler) - elif version == 'v1.4': - face_enhancer = GFPGANer( - model_path='GFPGANv1.4.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler) - elif version == 'RestoreFormer': - face_enhancer = GFPGANer( - model_path='RestoreFormer.pth', upscale=2, arch='RestoreFormer', channel_multiplier=2, bg_upsampler=upsampler) - elif version == 'CodeFormer': - face_enhancer = GFPGANer( - model_path='CodeFormer.pth', upscale=2, arch='CodeFormer', channel_multiplier=2, bg_upsampler=upsampler) - elif version == 'RealESR-General-x4v3': - face_enhancer = GFPGANer( - model_path='realesr-general-x4v3.pth', upscale=2, arch='realesr-general', channel_multiplier=2, bg_upsampler=upsampler) - - try: - # _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True, weight=weight) - _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True) - except RuntimeError as error: - print('Error', error) - - try: - if scale != 2: - interpolation = cv2.INTER_AREA if scale < 2 else cv2.INTER_LANCZOS4 - h, w = img.shape[0:2] - output = cv2.resize(output, (int(w * scale / 2), int(h * scale / 2)), interpolation=interpolation) - except Exception as error: - print('wrong scale input.', error) - if img_mode == 'RGBA': # RGBA images should be saved in png format - extension = 'png' - else: - extension = 'jpg' - save_path = f'output/out.{extension}' - cv2.imwrite(save_path, output) - - output = cv2.cvtColor(output, cv2.COLOR_BGR2RGB) - return output, save_path - except Exception as error: - print('global exception', error) - return None, None - - -title = "Image Upscaling & Restoration(esp. Face) using GFPGAN Algorithm" -description = r"""Gradio demo for GFPGAN: Towards Real-World Blind Face Restoration and Upscalling of the image with a Generative Facial Prior.
    -Practically the algorithm is used to restore your **old photos** or improve **AI-generated faces**.
    -To use it, simply just upload the concerned image.
    -""" -article = r""" -[![download](https://img.shields.io/github/downloads/TencentARC/GFPGAN/total.svg)](https://github.com/TencentARC/GFPGAN/releases) -[![GitHub Stars](https://img.shields.io/github/stars/TencentARC/GFPGAN?style=social)](https://github.com/TencentARC/GFPGAN) -[![arXiv](https://img.shields.io/badge/arXiv-Paper-.svg)](https://arxiv.org/abs/2101.04061) -
    visitor badge
    -""" -demo = gr.Interface( - inference, [ - gr.inputs.Image(type="filepath", label="Input"), - # gr.inputs.Radio(['v1.2', 'v1.3', 'v1.4', 'RestoreFormer', 'CodeFormer'], type="value", default='v1.4', label='version'), - gr.inputs.Radio(['v1.2', 'v1.3', 'v1.4', 'RestoreFormer','CodeFormer','RealESR-General-x4v3'], type="value", default='v1.4', label='version'), - gr.inputs.Number(label="Rescaling factor", default=2), - # gr.Slider(0, 100, label='Weight, only for CodeFormer. 0 for better quality, 100 for better identity', default=50) - ], [ - gr.outputs.Image(type="numpy", label="Output (The whole image)"), - gr.outputs.File(label="Download the output image") - ], - title=title, - description=description, - article=article, - # examples=[['AI-generate.jpg', 'v1.4', 2, 50], ['lincoln.jpg', 'v1.4', 2, 50], ['Blake_Lively.jpg', 'v1.4', 2, 50], - # ['10045.png', 'v1.4', 2, 50]]).launch() - examples=[['a1.jpg', 'v1.4', 2], ['a2.jpg', 'v1.4', 2], ['a3.jpg', 'v1.4', 2],['a4.jpg', 'v1.4', 2]]) - -demo.queue(concurrency_count=4) -demo.launch() \ No newline at end of file diff --git a/spaces/Hexamind/GDOC/src/view/view.py b/spaces/Hexamind/GDOC/src/view/view.py deleted file mode 100644 index c148d23a0538f8228e2b7b13bb6e131154db5d82..0000000000000000000000000000000000000000 --- a/spaces/Hexamind/GDOC/src/view/view.py +++ /dev/null @@ -1,407 +0,0 @@ -from random import randint -import gradio as gr -from typing import Dict -import asyncio -import os -from src.control.controller import Controller -import tkinter as ttk -from Levenshtein import distance -from src.tools.list_tool import keep_last_occurrences - - -def run(config: Dict, controller: Controller): - - """ - ===================================================== - Global variables - ================ - """ - controller.clear_docs() - #can u make me a title for the app and add a gradio component for it using the css above - title = "

    Automatic Document Generation

    " - with gr.Blocks() as formatdoc: - gr.Markdown(title) - gr.Markdown("

    _________________________________________

    ") - with gr.Row(): - with gr.Column(): - pass - with gr.Column(scale=10): - """ - ===================================================== - Input and style components - ========================== - """ - - input_files_comp = gr.File(file_count="multiple", file_types=[".docx"]) - - with gr.Accordion("Modifier automatiquement les styles", open=False) as style_acc: - templates_radio = gr.Radio( - label="Templates", - choices=config['templates'], - value=config['templates'][config['default_template_index']], - ) - options_btn = gr.CheckboxGroup(choices=config['options'], - label="Options", - interactive=True,) - - with gr.Accordion("Mapper les styles qui n'existent pas dans le template", open=False) \ - as newstyles_acc: - with gr.Column(scale=2): - newstyle_comps = [gr.Dropdown(visible=False, interactive=True) - for _ in range(config['max_styles'])] - - log_comp = gr.Textbox(label="Journal des modifications", visible=False) - - output_styles_files_comp = gr.File(file_count="multiple", file_types=[".docx"], visible=False) - - with gr.Row(): - run_style_btn = gr.Button("Appliquer le template et les modifications de style", visible=False) - clear_style_btn = gr.Button("Annuler les modifications de style", visible=False) - - """ - =============================================== - Generation components - ====================== - """ - with gr.Accordion("Générer automatiquement une première version du document", open=False) as gen_acc: - - generate_option_btn = gr.Radio( - label="Automatically generate a draft based on your own database", - choices=["Auto generation", "No generation"], - value="No generation", - interactive=True, - visible=False, - ) - - db_list_comp = gr.CheckboxGroup( - label="Base de connaissance", - info="Ces documents constituent la source de référence. Désélectionner pour qu'ils ne soient " - "pas pris en compte lors de la génération automatiqueF", - visible=True, - interactive=True, - ) - db_reset_btn = gr.Button("Effacer la base de connaissance", visible=False) \ - .style(full_width=False, size="sm") - with gr.Accordion("Ajouter des documents dans la base de connaissance", open=False) as add_acc: - with gr.Column(visible=True, variant="panel") as add_col: - with gr.Tab("Depuis Wikipedia"): - wiki_fetch_btn = gr.Button("Rechercher les pages Wikipedia", visible=True) - wiki_fetch_btn.style(full_width=False, size="sm") - wiki_list_comp = gr.CheckboxGroup( - label="Sélectionner les pages à ajouter dans la base de connaissance", - visible=False, - interactive=True, - ) - - with gr.Column(): - wiki_add_to_db_btn = \ - gr.Button("Ajouter les documents sélectionnés à la base de connaissance", - visible=False) - wiki_add_to_db_btn.style(full_width=False, size="sm") - - wiki_clear_btn = gr.Button("Effacer les choix de documents", visible=False) \ - .style(full_width=False, size="sm") - - with gr.Tab("Depuis le disque local"): - my_files_list_comp = gr.Files( - label="Charger ses documents", - info="Les documents fournissent le contexte utilisé pour la génération de texte", - visible=True, - ) - my_files_add_to_db_btn = gr.Button("Add files to sources", visible=False) - my_files_add_to_db_btn.style(full_width=False, size="sm") - - add_close_btn = gr.Button("Close", visible=False).style(size='sm', full_width=False) - with gr.Row(): - db_add_doc_btn = gr.Button("Ajouter de nouveaux documents", visible=False)\ - .style(full_width=False, size="sm") - - output_files_comp = gr.Files(file_count="multiple", visible=False) - - generate_btn = gr.Button("Générer", interactive=True) - - clear_btn = gr.Button('Nettoyer', visible=False) - rerun_btn = gr.Button('Relancer', visible=False) - - with gr.Column(): - pass - - """ - =================================================== - state variables - =============== - """ - wiki_source_var: [str] = gr.State([]) # list of wikipage titles of interest for the input text tasks - wiki_db_var: [str] = gr.State([]) # list of wiki document titles in the db (as seen from the UI) - my_files_db_var: [str] = gr.State([]) # list of titles of the files uploaded in the db (as seen from the UI) - db_collection_var: str = gr.State("-1") # name of the collection of documents sources in the db # list of styles to modify - - """ - =================================================== - Input and styles functions and listeners - ======================================== - """ - - def input_files_upload_fn(input_files_): - for files in input_files_: - if(not files.name.endswith('.docx')): - raise gr.Error(f'File {files.name} is not a docx file, please upload only docx files') - else: - continue - controller.copy_docs(input_files_) - update_ = { - newstyles_acc: gr.update(open=True), - style_acc: gr.update(open=False,visible=True), - run_style_btn: gr.update(visible=True), - clear_style_btn: gr.update(visible=True), - } - newstyles_update = newstyles_fn() - update_.update(newstyles_update) - return update_ - - input_files_comp.upload(input_files_upload_fn, - inputs=[input_files_comp], - outputs=[style_acc, newstyles_acc, run_style_btn, clear_style_btn] + newstyle_comps - ) - - def input_file_clear_fn(): - controller.clear_docs() - update_ = { - options_btn: gr.update(value=[]), - log_comp: gr.update(value="", visible=False), - output_styles_files_comp: gr.update(value=[], visible=False), - newstyles_acc: gr.update(open=False), - style_acc: gr.update(open=False), - gen_acc: gr.update(open=False), - output_files_comp: gr.update(visible=False), - run_style_btn: gr.update(visible=False), - clear_style_btn: gr.update(visible=False), - } - newstyles_update_ = newstyles_reset() - update_.update(newstyles_update_) - return update_ - - input_files_comp.clear( - input_file_clear_fn, - inputs=[], - outputs=[options_btn, output_styles_files_comp, output_files_comp, log_comp, newstyles_acc, - gen_acc, style_acc, run_style_btn, clear_style_btn] + newstyle_comps - ) - - - - def newstyles_fn(): - different_styles, template_styles = controller.get_difference_with_template() - update_ = {} - get_label = lambda i: f"document: {different_styles[i]['doc'].name} style: {different_styles[i]['style']}" - newstyles_update_ = { - newstyle_comps[i]: gr.update(visible=i < len(different_styles), - #sort the styles using levenstein distance function - choices=sorted(template_styles, key=lambda x: distance(x, different_styles[i]['style'])), - value=None, - label=get_label(i)) if i < len(different_styles) else '' - for i in range(config['max_styles']) - } - update_.update(newstyles_update_) - return update_ - - - def newstyles_reset(): - update_ = { - newstyle_comps[i]: gr.update(visible=False, - choices=[], - value=None, - label='') - for i in range(config['max_styles']) - } - return update_ - - def templates_fn(templates_): - controller.set_template(templates_) - update_ = newstyles_fn() - return update_ - - templates_radio.change(templates_fn, - inputs=[templates_radio], - outputs=newstyle_comps) - - def newstyle_fns(src_index: int): - def newstyle_fn(newstyle_): - controller.update_style(src_index, newstyle_) - return newstyle_fn - - for src_index, newstyle_comp in enumerate(newstyle_comps): - newstyle_comp.input(newstyle_fns(src_index), inputs=[newstyle_comp], outputs=[]) - - - def clear_style_fn(input_files_): - controller.clear_docs() - if input_files_: - controller.copy_docs(input_files_) - controller.set_template() - update_ = { - options_btn: gr.update(value=[]), - log_comp: gr.update(value="", visible=False), - output_styles_files_comp: gr.update(value=[], visible=False), - newstyles_acc: gr.update(open=False), - run_style_btn: gr.update(visible=True), - templates_radio: gr.update(value=config['templates'][config['default_template_index']]), - } - newstyles_update_ = newstyles_fn() - update_.update(newstyles_update_) - return update_ - - clear_style_btn.click(clear_style_fn, - inputs=[input_files_comp], - outputs=[options_btn, output_styles_files_comp, log_comp, newstyles_acc, run_style_btn, templates_radio] - + newstyle_comps - ) - - def run_style_fn(options_btn_): - print(f"options activated : {options_btn_}") - controller.apply_template(options_btn_) - log = controller.get_log() - new_docs_path = controller.generated_docs_path - output_paths = [f"{new_docs_path}/{f}" for f in os.listdir(new_docs_path)] - print(f"output_paths: {output_paths}") - update_ = { - log_comp: gr.update(value=log, visible=True), - output_styles_files_comp: gr.update(value=output_paths, visible=True), - run_style_btn: gr.update(visible=False), - } - return update_ - - - run_style_btn.click(run_style_fn, - inputs=[options_btn], - outputs=[log_comp, output_styles_files_comp, run_style_btn] + newstyle_comps) - - """ - ===================================================== - Generation functions - ==================== - """ - - def generate_option_fn(db_collection_): - id_ = controller.get_or_create_collection(db_collection_) - update_ = { - db_collection_var: id_, - } - return update_ - - def wiki_fetch1_fn(): - """ - fetch the wikifiles interesting for solving the tasks as defined in the input doc - """ - update_ = { - wiki_list_comp: gr.update(visible=True), - } - return update_ - - async def wiki_fetch2_fn(): - """ - fetch the wikifiles interesting for solving the tasks as defined in the input doc - """ - wiki_interesting_files = await controller.wiki_fetch() - wiki_files = wiki_interesting_files # [w for w in wiki_interesting_files if w not in wiki_db_files_] - update_ = { - wiki_list_comp: gr.update(visible=True, value=[], choices=wiki_files), - wiki_source_var: wiki_interesting_files, - wiki_add_to_db_btn: gr.update(visible=True), - # wiki_clear_btn: gr.update(visible=True), #Button to clear the choices that are by default all ticked - } - return update_ - - async def wiki_add_to_db_fn(wiki_list_, wiki_source_, wiki_db_, db_list_, db_collection_): - """ - adds the wikipages to the db source - """ - wiki_to_add = [wiki for wiki in wiki_list_ if wiki not in wiki_db_] - db_list_ += wiki_to_add - wiki_db_ += wiki_to_add - wiki_source_remaining = [wiki for wiki in wiki_source_ if wiki not in wiki_db_] - async_upload_and_store_tasks = [asyncio.create_task(controller.wiki_upload_and_store(wiki, db_collection_)) for wiki in wiki_to_add] # A DEPLACER DANS LE CONTROLLER - await asyncio.gather(*async_upload_and_store_tasks) - db_not_empty = 0 < len(db_list_) - wiki_to_add_not_empty = 0 < len(wiki_source_remaining) - update_ = { - wiki_db_var: wiki_db_, - wiki_list_comp: gr.update(value=[], choices=wiki_source_remaining), - wiki_add_to_db_btn: gr.update(visible=wiki_to_add_not_empty), - db_list_comp: gr.update( - visible=True, - value=db_list_, - choices=db_list_, - label="Database content"), - db_reset_btn: gr.update(visible=db_not_empty), - generate_btn: gr.update(visible=True, interactive=db_not_empty), - add_acc: gr.update(open=False), - } - return update_ - - def generate_fn1(): - update_ = { - output_files_comp: gr.update(visible=True) - } - return update_ - - async def generate_fn2(db_collection_, db_list_): - output_files = await controller.generate_doc_from_db(collection_name=db_collection_, - from_files=db_list_) - update_ = { - output_files_comp: gr.update(value=output_files, visible=True), - } - return update_ - - - """ - ===================================================== - Generation listeners - ==================== - """ - - wiki_fetch_btn \ - .click(wiki_fetch1_fn, inputs=[], outputs=[wiki_list_comp]) \ - .then(wiki_fetch2_fn, - inputs=[], - outputs=[wiki_list_comp, wiki_source_var, wiki_add_to_db_btn, wiki_clear_btn]) - - wiki_add_to_db_btn\ - .click(generate_option_fn, - inputs=[db_collection_var], - outputs=[db_collection_var])\ - .then(wiki_add_to_db_fn, - inputs=[wiki_list_comp, wiki_source_var, wiki_db_var, db_list_comp, db_collection_var], - outputs=[db_list_comp, wiki_list_comp, wiki_db_var, - generate_btn, wiki_add_to_db_btn, db_reset_btn, add_acc]) - - generate_btn\ - .click(generate_fn1, - inputs=[], - outputs=[output_files_comp])\ - .then(generate_fn2, - inputs=[db_collection_var, db_list_comp], - outputs=[output_files_comp]) - - - """ - ===================================================== - Clear and rerun functions and listeners - ======================================= - """ - - def clear_fn(): - update_ = { - input_files_comp: gr.update(value=None), - output_files_comp: gr.update(value=None, visible=False), - clear_btn: gr.update(visible=False), - rerun_btn: gr.update(visible=False), - } - return update_ - - clear_btn.click(clear_fn, - inputs=[], - outputs=[input_files_comp, output_files_comp, clear_btn, rerun_btn]) - - # wiki_clear_btn.click(clear_choices_fn, inputs=[], outputs=[wiki_list_comp]) #listener for the clear button of the wiki choices - return formatdoc diff --git a/spaces/ICML2022/OFA/fairseq/examples/latent_depth/latent_depth_src/models/latent_transformer.py b/spaces/ICML2022/OFA/fairseq/examples/latent_depth/latent_depth_src/models/latent_transformer.py deleted file mode 100644 index 6a825301a452bd935deafdaf78fa2427ca9a469e..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/latent_depth/latent_depth_src/models/latent_transformer.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from typing import Any, Dict, Optional - -import torch.nn as nn -from fairseq.models.fairseq_encoder import EncoderOut -from fairseq.models.transformer import TransformerDecoder, TransformerEncoder -from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer -from torch import Tensor - -from ..modules.latent_layers import LayerSelect - - -class LatentTransformerEncoder(TransformerEncoder): - """Latent depth (https://arxiv.org/abs/2009.13102) implemented in - TransformerEncoder. - """ - - def __init__(self, args, dictionary, embed_tokens, num_logits=1): - self.num_logits = num_logits - self.num_layers = args.encoder_layers - super().__init__(args, dictionary, embed_tokens) - self.layer_select = LayerSelect( - num_layers=self.num_layers, - num_logits=self.num_logits, - soft_select=getattr(args, "soft_select", False), - sampling_tau=getattr(args, "sampling_tau", 5.), - ) - self.lang_idx = None - self.layers = nn.ModuleList( - [self._build_encoder_layer(args, idx) for idx in range(args.encoder_layers)] - ) - - def set_lang_idx(self, lang_idx): - self.lang_idx = lang_idx - - def _build_encoder_layer(self, args, idx=None): - return LatentTransformerEncoderLayer(args, idx, layer_select=self.layer_select) - - def forward(self, src_tokens, src_lengths, return_all_hiddens: bool = False): - self.layer_select.sample(self.lang_idx) - return super().forward(src_tokens, src_lengths, return_all_hiddens) - - -class LatentTransformerEncoderLayer(TransformerEncoderLayer): - """Encoder layer with each (non_residual) block weighted by samples of Bernouli - or Gumbel Signmoid samples. - - Args: - args (argparse.Namespace): parsed command-line arguments from standard - TransformerEncoderLayer. - idx (int): layer index (used to retrieve samples). - layer_select (LayerSelect, optional): instance of LayerSelect module with logits - parameters and sampling method. - """ - - def __init__(self, args, idx, layer_select=None): - super().__init__(args) - self.idx = idx - self.layer_select = layer_select - - def residual_connection(self, x, residual): - return residual + x * self.layer_select(self.idx) - - -class LatentTransformerDecoder(TransformerDecoder): - """Latent depth (https://arxiv.org/abs/2009.13102) implemented in - TransformerDecoder. - """ - - def __init__( - self, args, dictionary, embed_tokens, no_encoder_attn=False, num_logits=1 - ): - self.num_logits = num_logits - self.num_layers = args.decoder_layers - super().__init__( - args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn - ) - self.layer_select = LayerSelect( - num_layers=self.num_layers, - num_logits=self.num_logits, - soft_select=getattr(args, "soft_select", False), - sampling_tau=getattr(args, "sampling_tau", 5.), - ) - self.lang_idx = None - self.layers = nn.ModuleList( - [ - self._build_decoder_layer(args, no_encoder_attn, idx) - for idx in range(args.decoder_layers) - ] - ) - - def set_lang_idx(self, lang_idx): - self.lang_idx = lang_idx - - def _build_decoder_layer(self, args, no_encoder_attn=False, idx=None): - return LatentTransformerDecoderLayer( - args, idx, layer_select=self.layer_select, no_encoder_attn=no_encoder_attn - ) - - def forward( - self, - prev_output_tokens, - encoder_out: Optional[EncoderOut] = None, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - features_only: bool = False, - alignment_layer: Optional[int] = None, - alignment_heads: Optional[int] = None, - src_lengths: Optional[Any] = None, - return_all_hiddens: bool = False, - ): - self.layer_select.sample(self.lang_idx) - return super().forward( - prev_output_tokens=prev_output_tokens, - encoder_out=encoder_out, - incremental_state=incremental_state, - features_only=features_only, - alignment_layer=alignment_layer, - src_lengths=src_lengths, - return_all_hiddens=return_all_hiddens, - ) - - -class LatentTransformerDecoderLayer(TransformerDecoderLayer): - """Decoder layer with each (non_residual) block weighted by samples of Bernouli - or Gumbel Signmoid samples. - - Args: - args (argparse.Namespace): parsed command-line arguments from standard - TransformerDecoderLayer. - idx (int): layer index (used to retrieve samples). - layer_select (LayerSelect, optional): instance of LayerSelect module with logits - parameters and sampling method. - no_encoder_attn (bool, optional): whether to attend to encoder outputs - (default: False). - - """ - - def __init__( - self, - args, - idx, - layer_select=None, - no_encoder_attn=False, - add_bias_kv=False, - add_zero_attn=False, - ): - super().__init__(args, no_encoder_attn, add_bias_kv, add_zero_attn) - self.idx = idx - self.layer_select = layer_select - - def residual_connection(self, x, residual): - return residual + x * self.layer_select(self.idx) diff --git a/spaces/ICML2022/OFA/fairseq/examples/xlmr/README.md b/spaces/ICML2022/OFA/fairseq/examples/xlmr/README.md deleted file mode 100644 index b95bfe15d3fe6d03951453679135c2e9187d73c7..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/xlmr/README.md +++ /dev/null @@ -1,144 +0,0 @@ -# Unsupervised Cross-lingual Representation Learning at Scale (XLM-RoBERTa) -https://arxiv.org/pdf/1911.02116.pdf - -# Larger-Scale Transformers for Multilingual Masked Language Modeling -https://arxiv.org/pdf/2105.00572.pdf - - -## What's New: -- June 2021: `XLMR-XL` AND `XLMR-XXL` models released. - -## Introduction - -`XLM-R` (`XLM-RoBERTa`) is a generic cross lingual sentence encoder that obtains state-of-the-art results on many cross-lingual understanding (XLU) benchmarks. It is trained on `2.5T` of filtered CommonCrawl data in 100 languages (list below). - - Language | Language|Language |Language | Language ----|---|---|---|--- -Afrikaans | Albanian | Amharic | Arabic | Armenian -Assamese | Azerbaijani | Basque | Belarusian | Bengali -Bengali Romanize | Bosnian | Breton | Bulgarian | Burmese -Burmese zawgyi font | Catalan | Chinese (Simplified) | Chinese (Traditional) | Croatian -Czech | Danish | Dutch | English | Esperanto -Estonian | Filipino | Finnish | French | Galician -Georgian | German | Greek | Gujarati | Hausa -Hebrew | Hindi | Hindi Romanize | Hungarian | Icelandic -Indonesian | Irish | Italian | Japanese | Javanese -Kannada | Kazakh | Khmer | Korean | Kurdish (Kurmanji) -Kyrgyz | Lao | Latin | Latvian | Lithuanian -Macedonian | Malagasy | Malay | Malayalam | Marathi -Mongolian | Nepali | Norwegian | Oriya | Oromo -Pashto | Persian | Polish | Portuguese | Punjabi -Romanian | Russian | Sanskrit | Scottish Gaelic | Serbian -Sindhi | Sinhala | Slovak | Slovenian | Somali -Spanish | Sundanese | Swahili | Swedish | Tamil -Tamil Romanize | Telugu | Telugu Romanize | Thai | Turkish -Ukrainian | Urdu | Urdu Romanize | Uyghur | Uzbek -Vietnamese | Welsh | Western Frisian | Xhosa | Yiddish - -## Pre-trained models - -Model | Description | #params | vocab size | Download ----|---|---|---|--- -`xlmr.base` | XLM-R using the BERT-base architecture | 250M | 250k | [xlm.base.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/xlmr.base.tar.gz) -`xlmr.large` | XLM-R using the BERT-large architecture | 560M | 250k | [xlm.large.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.tar.gz) -`xlmr.xl` | XLM-R (`layers=36, model_dim=2560`) | 3.5B | 250k | [xlm.xl.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/xlmr/xlmr.xl.tar.gz) -`xlmr.xxl` | XLM-R (`layers=48, model_dim=4096`) | 10.7B | 250k | [xlm.xxl.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/xlmr/xlmr.xxl.tar.gz) - -## Results - -**[XNLI (Conneau et al., 2018)](https://arxiv.org/abs/1809.05053)** - -Model | average | en | fr | es | de | el | bg | ru | tr | ar | vi | th | zh | hi | sw | ur ----|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|--- -`roberta.large.mnli` _(TRANSLATE-TEST)_ | 77.8 | 91.3 | 82.9 | 84.3 | 81.2 | 81.7 | 83.1 | 78.3 | 76.8 | 76.6 | 74.2 | 74.1 | 77.5 | 70.9 | 66.7 | 66.8 -`xlmr.large` _(TRANSLATE-TRAIN-ALL)_ | 83.6 | 89.1 | 85.1 | 86.6 | 85.7 | 85.3 | 85.9 | 83.5 | 83.2 | 83.1 | 83.7 | 81.5 | 83.7 | 81.6 | 78.0 | 78.1 -`xlmr.xl` _(TRANSLATE-TRAIN-ALL)_ | 85.4 | 91.1 | 87.2 | 88.1 | 87.0 | 87.4 | 87.8 | 85.3 | 85.2 | 85.3 | 86.2 | 83.8 | 85.3 | 83.1 | 79.8 | 78.2 | 85.4 -`xlmr.xxl` _(TRANSLATE-TRAIN-ALL)_ | 86.0 | 91.5 | 87.6 | 88.7 | 87.8 | 87.4 | 88.2 | 85.6 | 85.1 | 85.8 | 86.3 | 83.9 | 85.6 | 84.6 | 81.7 | 80.6 - -**[MLQA (Lewis et al., 2018)](https://arxiv.org/abs/1910.07475)** - -Model | average | en | es | de | ar | hi | vi | zh ----|---|---|---|---|---|---|---|--- -`BERT-large` | - | 80.2/67.4 | - | - | - | - | - | - -`mBERT` | 57.7 / 41.6 | 77.7 / 65.2 | 64.3 / 46.6 | 57.9 / 44.3 | 45.7 / 29.8| 43.8 / 29.7 | 57.1 / 38.6 | 57.5 / 37.3 -`xlmr.large` | 70.7 / 52.7 | 80.6 / 67.8 | 74.1 / 56.0 | 68.5 / 53.6 | 63.1 / 43.5 | 69.2 / 51.6 | 71.3 / 50.9 | 68.0 / 45.4 -`xlmr.xl` | 73.4 / 55.3 | 85.1 / 72.6 | 66.7 / 46.2 | 70.5 / 55.5 | 74.3 / 56.9 | 72.2 / 54.7 | 74.4 / 52.9 | 70.9 / 48.5 -`xlmr.xxl` | 74.8 / 56.6 | 85.5 / 72.4 | 68.6 / 48.4 | 72.7 / 57.8 | 75.4 / 57.6 | 73.7 / 55.8 | 76.0 / 55.0 | 71.7 / 48.9 - - -## Example usage - -##### Load XLM-R from torch.hub (PyTorch >= 1.1): -```python -import torch -xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') -xlmr.eval() # disable dropout (or leave in train mode to finetune) -``` - -##### Load XLM-R (for PyTorch 1.0 or custom models): -```python -# Download xlmr.large model -wget https://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.tar.gz -tar -xzvf xlmr.large.tar.gz - -# Load the model in fairseq -from fairseq.models.roberta import XLMRModel -xlmr = XLMRModel.from_pretrained('/path/to/xlmr.large', checkpoint_file='model.pt') -xlmr.eval() # disable dropout (or leave in train mode to finetune) -``` - -##### Apply sentence-piece-model (SPM) encoding to input text: -```python -en_tokens = xlmr.encode('Hello world!') -assert en_tokens.tolist() == [0, 35378, 8999, 38, 2] -xlmr.decode(en_tokens) # 'Hello world!' - -zh_tokens = xlmr.encode('你好,世界') -assert zh_tokens.tolist() == [0, 6, 124084, 4, 3221, 2] -xlmr.decode(zh_tokens) # '你好,世界' - -hi_tokens = xlmr.encode('नमस्ते दुनिया') -assert hi_tokens.tolist() == [0, 68700, 97883, 29405, 2] -xlmr.decode(hi_tokens) # 'नमस्ते दुनिया' - -ar_tokens = xlmr.encode('مرحبا بالعالم') -assert ar_tokens.tolist() == [0, 665, 193478, 258, 1705, 77796, 2] -xlmr.decode(ar_tokens) # 'مرحبا بالعالم' - -fr_tokens = xlmr.encode('Bonjour le monde') -assert fr_tokens.tolist() == [0, 84602, 95, 11146, 2] -xlmr.decode(fr_tokens) # 'Bonjour le monde' -``` - -##### Extract features from XLM-R: -```python -# Extract the last layer's features -last_layer_features = xlmr.extract_features(zh_tokens) -assert last_layer_features.size() == torch.Size([1, 6, 1024]) - -# Extract all layer's features (layer 0 is the embedding layer) -all_layers = xlmr.extract_features(zh_tokens, return_all_hiddens=True) -assert len(all_layers) == 25 -assert torch.all(all_layers[-1] == last_layer_features) -``` - -## Citation - -```bibtex -@article{conneau2019unsupervised, - title={Unsupervised Cross-lingual Representation Learning at Scale}, - author={Conneau, Alexis and Khandelwal, Kartikay and Goyal, Naman and Chaudhary, Vishrav and Wenzek, Guillaume and Guzm{\'a}n, Francisco and Grave, Edouard and Ott, Myle and Zettlemoyer, Luke and Stoyanov, Veselin}, - journal={arXiv preprint arXiv:1911.02116}, - year={2019} -} -``` - - -```bibtex -@article{goyal2021larger, - title={Larger-Scale Transformers for Multilingual Masked Language Modeling}, - author={Goyal, Naman and Du, Jingfei and Ott, Myle and Anantharaman, Giri and Conneau, Alexis}, - journal={arXiv preprint arXiv:2105.00572}, - year={2021} -} -``` diff --git a/spaces/Ibtehaj10/cheating-detection-FYP/yolovs5/classify/val.py b/spaces/Ibtehaj10/cheating-detection-FYP/yolovs5/classify/val.py deleted file mode 100644 index 8657036fb2a23d7388240c31d36b67b95877ec12..0000000000000000000000000000000000000000 --- a/spaces/Ibtehaj10/cheating-detection-FYP/yolovs5/classify/val.py +++ /dev/null @@ -1,170 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Validate a trained YOLOv5 classification model on a classification dataset - -Usage: - $ bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) - $ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ImageNet - -Usage - formats: - $ python classify/val.py --weights yolov5s-cls.pt # PyTorch - yolov5s-cls.torchscript # TorchScript - yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s-cls_openvino_model # OpenVINO - yolov5s-cls.engine # TensorRT - yolov5s-cls.mlmodel # CoreML (macOS-only) - yolov5s-cls_saved_model # TensorFlow SavedModel - yolov5s-cls.pb # TensorFlow GraphDef - yolov5s-cls.tflite # TensorFlow Lite - yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU - yolov5s-cls_paddle_model # PaddlePaddle -""" - -import argparse -import os -import sys -from pathlib import Path - -import torch -from tqdm import tqdm - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from models.common import DetectMultiBackend -from utils.dataloaders import create_classification_dataloader -from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_img_size, check_requirements, colorstr, - increment_path, print_args) -from utils.torch_utils import select_device, smart_inference_mode - - -@smart_inference_mode() -def run( - data=ROOT / '../datasets/mnist', # dataset dir - weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) - batch_size=128, # batch size - imgsz=224, # inference size (pixels) - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - workers=8, # max dataloader workers (per RANK in DDP mode) - verbose=False, # verbose output - project=ROOT / 'runs/val-cls', # save to project/name - name='exp', # save to project/name - exist_ok=False, # existing project/name ok, do not increment - half=False, # use FP16 half-precision inference - dnn=False, # use OpenCV DNN for ONNX inference - model=None, - dataloader=None, - criterion=None, - pbar=None, -): - # Initialize/load model and set device - training = model is not None - if training: # called by train.py - device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model - half &= device.type != 'cpu' # half precision only supported on CUDA - model.half() if half else model.float() - else: # called directly - device = select_device(device, batch_size=batch_size) - - # Directories - save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - save_dir.mkdir(parents=True, exist_ok=True) # make dir - - # Load model - model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half) - stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine - imgsz = check_img_size(imgsz, s=stride) # check image size - half = model.fp16 # FP16 supported on limited backends with CUDA - if engine: - batch_size = model.batch_size - else: - device = model.device - if not (pt or jit): - batch_size = 1 # export.py models default to batch-size 1 - LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') - - # Dataloader - data = Path(data) - test_dir = data / 'test' if (data / 'test').exists() else data / 'val' # data/test or data/val - dataloader = create_classification_dataloader(path=test_dir, - imgsz=imgsz, - batch_size=batch_size, - augment=False, - rank=-1, - workers=workers) - - model.eval() - pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile()) - n = len(dataloader) # number of batches - action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing' - desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}" - bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0) - with torch.cuda.amp.autocast(enabled=device.type != 'cpu'): - for images, labels in bar: - with dt[0]: - images, labels = images.to(device, non_blocking=True), labels.to(device) - - with dt[1]: - y = model(images) - - with dt[2]: - pred.append(y.argsort(1, descending=True)[:, :5]) - targets.append(labels) - if criterion: - loss += criterion(y, labels) - - loss /= n - pred, targets = torch.cat(pred), torch.cat(targets) - correct = (targets[:, None] == pred).float() - acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy - top1, top5 = acc.mean(0).tolist() - - if pbar: - pbar.desc = f"{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}" - if verbose: # all classes - LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}") - LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}") - for i, c in model.names.items(): - aci = acc[targets == i] - top1i, top5i = aci.mean(0).tolist() - LOGGER.info(f"{c:>24}{aci.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") - - # Print results - t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image - shape = (1, 3, imgsz, imgsz) - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) - LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") - - return top1, top5, loss - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default=ROOT / '../datasets/mnist', help='dataset path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model.pt path(s)') - parser.add_argument('--batch-size', type=int, default=128, help='batch size') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='inference size (pixels)') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--verbose', nargs='?', const=True, default=True, help='verbose output') - parser.add_argument('--project', default=ROOT / 'runs/val-cls', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') - opt = parser.parse_args() - print_args(vars(opt)) - return opt - - -def main(opt): - check_requirements(exclude=('tensorboard', 'thop')) - run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/spaces/Iceclear/StableSR/StableSR/ldm/models/diffusion/__init__.py b/spaces/Iceclear/StableSR/StableSR/ldm/models/diffusion/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Illumotion/Koboldcpp/include/CL/cl_gl.h b/spaces/Illumotion/Koboldcpp/include/CL/cl_gl.h deleted file mode 100644 index 327746508fa89ecbe46ad61705b997b00361cba0..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/include/CL/cl_gl.h +++ /dev/null @@ -1,194 +0,0 @@ -/******************************************************************************* - * Copyright (c) 2008-2021 The Khronos Group Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - ******************************************************************************/ - -#ifndef __OPENCL_CL_GL_H -#define __OPENCL_CL_GL_H - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef cl_uint cl_gl_object_type; -typedef cl_uint cl_gl_texture_info; -typedef cl_uint cl_gl_platform_info; -typedef struct __GLsync *cl_GLsync; - -/* cl_gl_object_type = 0x2000 - 0x200F enum values are currently taken */ -#define CL_GL_OBJECT_BUFFER 0x2000 -#define CL_GL_OBJECT_TEXTURE2D 0x2001 -#define CL_GL_OBJECT_TEXTURE3D 0x2002 -#define CL_GL_OBJECT_RENDERBUFFER 0x2003 -#ifdef CL_VERSION_1_2 -#define CL_GL_OBJECT_TEXTURE2D_ARRAY 0x200E -#define CL_GL_OBJECT_TEXTURE1D 0x200F -#define CL_GL_OBJECT_TEXTURE1D_ARRAY 0x2010 -#define CL_GL_OBJECT_TEXTURE_BUFFER 0x2011 -#endif - -/* cl_gl_texture_info */ -#define CL_GL_TEXTURE_TARGET 0x2004 -#define CL_GL_MIPMAP_LEVEL 0x2005 -#ifdef CL_VERSION_1_2 -#define CL_GL_NUM_SAMPLES 0x2012 -#endif - - -extern CL_API_ENTRY cl_mem CL_API_CALL -clCreateFromGLBuffer(cl_context context, - cl_mem_flags flags, - cl_GLuint bufobj, - cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_0; - -#ifdef CL_VERSION_1_2 - -extern CL_API_ENTRY cl_mem CL_API_CALL -clCreateFromGLTexture(cl_context context, - cl_mem_flags flags, - cl_GLenum target, - cl_GLint miplevel, - cl_GLuint texture, - cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_2; - -#endif - -extern CL_API_ENTRY cl_mem CL_API_CALL -clCreateFromGLRenderbuffer(cl_context context, - cl_mem_flags flags, - cl_GLuint renderbuffer, - cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_0; - -extern CL_API_ENTRY cl_int CL_API_CALL -clGetGLObjectInfo(cl_mem memobj, - cl_gl_object_type * gl_object_type, - cl_GLuint * gl_object_name) CL_API_SUFFIX__VERSION_1_0; - -extern CL_API_ENTRY cl_int CL_API_CALL -clGetGLTextureInfo(cl_mem memobj, - cl_gl_texture_info param_name, - size_t param_value_size, - void * param_value, - size_t * param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; - -extern CL_API_ENTRY cl_int CL_API_CALL -clEnqueueAcquireGLObjects(cl_command_queue command_queue, - cl_uint num_objects, - const cl_mem * mem_objects, - cl_uint num_events_in_wait_list, - const cl_event * event_wait_list, - cl_event * event) CL_API_SUFFIX__VERSION_1_0; - -extern CL_API_ENTRY cl_int CL_API_CALL -clEnqueueReleaseGLObjects(cl_command_queue command_queue, - cl_uint num_objects, - const cl_mem * mem_objects, - cl_uint num_events_in_wait_list, - const cl_event * event_wait_list, - cl_event * event) CL_API_SUFFIX__VERSION_1_0; - - -/* Deprecated OpenCL 1.1 APIs */ -extern CL_API_ENTRY CL_API_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL -clCreateFromGLTexture2D(cl_context context, - cl_mem_flags flags, - cl_GLenum target, - cl_GLint miplevel, - cl_GLuint texture, - cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_1_DEPRECATED; - -extern CL_API_ENTRY CL_API_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL -clCreateFromGLTexture3D(cl_context context, - cl_mem_flags flags, - cl_GLenum target, - cl_GLint miplevel, - cl_GLuint texture, - cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_1_DEPRECATED; - -/* cl_khr_gl_sharing extension */ - -#define cl_khr_gl_sharing 1 - -typedef cl_uint cl_gl_context_info; - -/* Additional Error Codes */ -#define CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR -1000 - -/* cl_gl_context_info */ -#define CL_CURRENT_DEVICE_FOR_GL_CONTEXT_KHR 0x2006 -#define CL_DEVICES_FOR_GL_CONTEXT_KHR 0x2007 - -/* Additional cl_context_properties */ -#define CL_GL_CONTEXT_KHR 0x2008 -#define CL_EGL_DISPLAY_KHR 0x2009 -#define CL_GLX_DISPLAY_KHR 0x200A -#define CL_WGL_HDC_KHR 0x200B -#define CL_CGL_SHAREGROUP_KHR 0x200C - -extern CL_API_ENTRY cl_int CL_API_CALL -clGetGLContextInfoKHR(const cl_context_properties * properties, - cl_gl_context_info param_name, - size_t param_value_size, - void * param_value, - size_t * param_value_size_ret) CL_API_SUFFIX__VERSION_1_0; - -typedef cl_int (CL_API_CALL *clGetGLContextInfoKHR_fn)( - const cl_context_properties * properties, - cl_gl_context_info param_name, - size_t param_value_size, - void * param_value, - size_t * param_value_size_ret); - -/* - * cl_khr_gl_event extension - */ -#define CL_COMMAND_GL_FENCE_SYNC_OBJECT_KHR 0x200D - -extern CL_API_ENTRY cl_event CL_API_CALL -clCreateEventFromGLsyncKHR(cl_context context, - cl_GLsync sync, - cl_int * errcode_ret) CL_API_SUFFIX__VERSION_1_1; - -/*************************************************************** -* cl_intel_sharing_format_query_gl -***************************************************************/ -#define cl_intel_sharing_format_query_gl 1 - -/* when cl_khr_gl_sharing is supported */ - -extern CL_API_ENTRY cl_int CL_API_CALL -clGetSupportedGLTextureFormatsINTEL( - cl_context context, - cl_mem_flags flags, - cl_mem_object_type image_type, - cl_uint num_entries, - cl_GLenum* gl_formats, - cl_uint* num_texture_formats) ; - -typedef cl_int (CL_API_CALL * -clGetSupportedGLTextureFormatsINTEL_fn)( - cl_context context, - cl_mem_flags flags, - cl_mem_object_type image_type, - cl_uint num_entries, - cl_GLenum* gl_formats, - cl_uint* num_texture_formats) ; - -#ifdef __cplusplus -} -#endif - -#endif /* __OPENCL_CL_GL_H */ diff --git a/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_sde_ve.py b/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_sde_ve.py deleted file mode 100644 index 89d3d4a5858785731c0d60bdc5118a092d26f335..0000000000000000000000000000000000000000 --- a/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_sde_ve.py +++ /dev/null @@ -1,266 +0,0 @@ -# Copyright 2022 Google Brain and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch - -import math -from dataclasses import dataclass -from typing import Optional, Tuple, Union - -import torch - -from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import BaseOutput -from .scheduling_utils import SchedulerMixin, SchedulerOutput - - -@dataclass -class SdeVeOutput(BaseOutput): - """ - Output class for the ScoreSdeVeScheduler's step function output. - - Args: - prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the - denoising loop. - prev_sample_mean (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - Mean averaged `prev_sample`. Same as `prev_sample`, only mean-averaged over previous timesteps. - """ - - prev_sample: torch.FloatTensor - prev_sample_mean: torch.FloatTensor - - -class ScoreSdeVeScheduler(SchedulerMixin, ConfigMixin): - """ - The variance exploding stochastic differential equation (SDE) scheduler. - - For more information, see the original paper: https://arxiv.org/abs/2011.13456 - - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and - [`~SchedulerMixin.from_pretrained`] functions. - - Args: - num_train_timesteps (`int`): number of diffusion steps used to train the model. - snr (`float`): - coefficient weighting the step from the model_output sample (from the network) to the random noise. - sigma_min (`float`): - initial noise scale for sigma sequence in sampling procedure. The minimum sigma should mirror the - distribution of the data. - sigma_max (`float`): maximum value used for the range of continuous timesteps passed into the model. - sampling_eps (`float`): the end value of sampling, where timesteps decrease progressively from 1 to - epsilon. - correct_steps (`int`): number of correction steps performed on a produced sample. - """ - - order = 1 - - @register_to_config - def __init__( - self, - num_train_timesteps: int = 2000, - snr: float = 0.15, - sigma_min: float = 0.01, - sigma_max: float = 1348.0, - sampling_eps: float = 1e-5, - correct_steps: int = 1, - ): - # standard deviation of the initial noise distribution - self.init_noise_sigma = sigma_max - - # setable values - self.timesteps = None - - self.set_sigmas(num_train_timesteps, sigma_min, sigma_max, sampling_eps) - - def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: - """ - Ensures interchangeability with schedulers that need to scale the denoising model input depending on the - current timestep. - - Args: - sample (`torch.FloatTensor`): input sample - timestep (`int`, optional): current timestep - - Returns: - `torch.FloatTensor`: scaled input sample - """ - return sample - - def set_timesteps( - self, num_inference_steps: int, sampling_eps: float = None, device: Union[str, torch.device] = None - ): - """ - Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference. - - Args: - num_inference_steps (`int`): - the number of diffusion steps used when generating samples with a pre-trained model. - sampling_eps (`float`, optional): final timestep value (overrides value given at Scheduler instantiation). - - """ - sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps - - self.timesteps = torch.linspace(1, sampling_eps, num_inference_steps, device=device) - - def set_sigmas( - self, num_inference_steps: int, sigma_min: float = None, sigma_max: float = None, sampling_eps: float = None - ): - """ - Sets the noise scales used for the diffusion chain. Supporting function to be run before inference. - - The sigmas control the weight of the `drift` and `diffusion` components of sample update. - - Args: - num_inference_steps (`int`): - the number of diffusion steps used when generating samples with a pre-trained model. - sigma_min (`float`, optional): - initial noise scale value (overrides value given at Scheduler instantiation). - sigma_max (`float`, optional): final noise scale value (overrides value given at Scheduler instantiation). - sampling_eps (`float`, optional): final timestep value (overrides value given at Scheduler instantiation). - - """ - sigma_min = sigma_min if sigma_min is not None else self.config.sigma_min - sigma_max = sigma_max if sigma_max is not None else self.config.sigma_max - sampling_eps = sampling_eps if sampling_eps is not None else self.config.sampling_eps - if self.timesteps is None: - self.set_timesteps(num_inference_steps, sampling_eps) - - self.sigmas = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) - self.discrete_sigmas = torch.exp(torch.linspace(math.log(sigma_min), math.log(sigma_max), num_inference_steps)) - self.sigmas = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps]) - - def get_adjacent_sigma(self, timesteps, t): - return torch.where( - timesteps == 0, - torch.zeros_like(t.to(timesteps.device)), - self.discrete_sigmas[timesteps - 1].to(timesteps.device), - ) - - def step_pred( - self, - model_output: torch.FloatTensor, - timestep: int, - sample: torch.FloatTensor, - generator: Optional[torch.Generator] = None, - return_dict: bool = True, - ) -> Union[SdeVeOutput, Tuple]: - """ - Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion - process from the learned model outputs (most often the predicted noise). - - Args: - model_output (`torch.FloatTensor`): direct output from learned diffusion model. - timestep (`int`): current discrete timestep in the diffusion chain. - sample (`torch.FloatTensor`): - current instance of sample being created by diffusion process. - generator: random number generator. - return_dict (`bool`): option for returning tuple rather than SchedulerOutput class - - Returns: - [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`: [`~schedulers.scheduling_sde_ve.SdeVeOutput`] if - `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. - - """ - if self.timesteps is None: - raise ValueError( - "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" - ) - - timestep = timestep * torch.ones( - sample.shape[0], device=sample.device - ) # torch.repeat_interleave(timestep, sample.shape[0]) - timesteps = (timestep * (len(self.timesteps) - 1)).long() - - # mps requires indices to be in the same device, so we use cpu as is the default with cuda - timesteps = timesteps.to(self.discrete_sigmas.device) - - sigma = self.discrete_sigmas[timesteps].to(sample.device) - adjacent_sigma = self.get_adjacent_sigma(timesteps, timestep).to(sample.device) - drift = torch.zeros_like(sample) - diffusion = (sigma**2 - adjacent_sigma**2) ** 0.5 - - # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) - # also equation 47 shows the analog from SDE models to ancestral sampling methods - diffusion = diffusion.flatten() - while len(diffusion.shape) < len(sample.shape): - diffusion = diffusion.unsqueeze(-1) - drift = drift - diffusion**2 * model_output - - # equation 6: sample noise for the diffusion term of - noise = torch.randn(sample.shape, layout=sample.layout, generator=generator).to(sample.device) - prev_sample_mean = sample - drift # subtract because `dt` is a small negative timestep - # TODO is the variable diffusion the correct scaling term for the noise? - prev_sample = prev_sample_mean + diffusion * noise # add impact of diffusion field g - - if not return_dict: - return (prev_sample, prev_sample_mean) - - return SdeVeOutput(prev_sample=prev_sample, prev_sample_mean=prev_sample_mean) - - def step_correct( - self, - model_output: torch.FloatTensor, - sample: torch.FloatTensor, - generator: Optional[torch.Generator] = None, - return_dict: bool = True, - ) -> Union[SchedulerOutput, Tuple]: - """ - Correct the predicted sample based on the output model_output of the network. This is often run repeatedly - after making the prediction for the previous timestep. - - Args: - model_output (`torch.FloatTensor`): direct output from learned diffusion model. - sample (`torch.FloatTensor`): - current instance of sample being created by diffusion process. - generator: random number generator. - return_dict (`bool`): option for returning tuple rather than SchedulerOutput class - - Returns: - [`~schedulers.scheduling_sde_ve.SdeVeOutput`] or `tuple`: [`~schedulers.scheduling_sde_ve.SdeVeOutput`] if - `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. - - """ - if self.timesteps is None: - raise ValueError( - "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" - ) - - # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" - # sample noise for correction - noise = torch.randn(sample.shape, layout=sample.layout, generator=generator).to(sample.device) - - # compute step size from the model_output, the noise, and the snr - grad_norm = torch.norm(model_output.reshape(model_output.shape[0], -1), dim=-1).mean() - noise_norm = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean() - step_size = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 - step_size = step_size * torch.ones(sample.shape[0]).to(sample.device) - # self.repeat_scalar(step_size, sample.shape[0]) - - # compute corrected sample: model_output term and noise term - step_size = step_size.flatten() - while len(step_size.shape) < len(sample.shape): - step_size = step_size.unsqueeze(-1) - prev_sample_mean = sample + step_size * model_output - prev_sample = prev_sample_mean + ((step_size * 2) ** 0.5) * noise - - if not return_dict: - return (prev_sample,) - - return SchedulerOutput(prev_sample=prev_sample) - - def __len__(self): - return self.config.num_train_timesteps diff --git a/spaces/JohnC26/Gradio-Maps-Latitude-Longitude/README.md b/spaces/JohnC26/Gradio-Maps-Latitude-Longitude/README.md deleted file mode 100644 index beeb32b44ac7d15ae3b0b078adc37b40e285bd8b..0000000000000000000000000000000000000000 --- a/spaces/JohnC26/Gradio-Maps-Latitude-Longitude/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Gradio Maps Latitude Longitude -emoji: 🌖City -colorFrom: red -colorTo: green -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false -license: mit -duplicated_from: awacke1/Gradio-Maps-Latitude-Longitude ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/JosephusCheung/ACertainsStrategyTalk/9.html b/spaces/JosephusCheung/ACertainsStrategyTalk/9.html deleted file mode 100644 index 15dbd9de8144c862bba476da2b302ad857a7ac80..0000000000000000000000000000000000000000 --- a/spaces/JosephusCheung/ACertainsStrategyTalk/9.html +++ /dev/null @@ -1,114 +0,0 @@ - - - - - - - - - -
    - - - - - - - - - - - - - - - -
    -
    - - - - -
    Problems -and -Proposed -Solutions -Fine-tuning a diffusion model on a small set of subject images -causes it to lose the ability to generate generic images of the same -class and forget the class-specific prior. -1.Language Drift -Solution 1 Dreambooth use the model's own generated samples -by adding a relative weight of the prior-preservation loss. -However the ratio of prior-preservation is not easy to determine. -Solution 2 This is a method that requires a lot of GPU time - during the regular -training process, we add auto-generated images from the current model with -prompt of a single word, with words chosen from a pre-estimated word frequency -list randomly according to a certain ratio (we chose our word list from Danbooru -Tags). To avoid overfitting, each auto-generated image is used only once.
    - - - -
    - - diff --git a/spaces/JoshMe1/UAS_MCL_FAREL/README.md b/spaces/JoshMe1/UAS_MCL_FAREL/README.md deleted file mode 100644 index f6e1747075cbe32889b6207c1243d100b8e23ac8..0000000000000000000000000000000000000000 --- a/spaces/JoshMe1/UAS_MCL_FAREL/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: UAS MCL FAREL -emoji: ⚡ -colorFrom: yellow -colorTo: gray -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/JosueElias/borrs/app.py b/spaces/JosueElias/borrs/app.py deleted file mode 100644 index 4cfdda6b56b1736117a4f24adbb51acaf1346075..0000000000000000000000000000000000000000 --- a/spaces/JosueElias/borrs/app.py +++ /dev/null @@ -1,67 +0,0 @@ -import streamlit as st - -try: - - from RAG import pipeline - - st.title("Ask your scientific question! 👨‍⚕") - expected_format = "What is color?\nA)Is a name.\nB)Is something horrible.\nC)I don't know.\nD)You should ask someone else.\nE)Ask in a pyshic book." - txt = st.text_area( - "follow this format while making your question:", - expected_format) - try: - lista = txt.split("E)") - e = lista[1] - lista = lista[0].split("D)") - d = lista[1] - lista = lista[0].split("C)") - c = lista[1] - lista = lista[0].split("B)") - b = lista[1] - lista = lista[0].split("A)") - a = lista[1] - q = lista[0] - - mi_dict= { - "prompt":q, - "A":a, - "B":b, - "C":c, - "D":d, - "E":e - } - - multi = f''' - --- - :green[**Question**] 🔎 - - {mi_dict["prompt"]} - - :green[**Options**] 📗 - - * :blue[**A**] {mi_dict["A"]} - * :blue[**B**] {mi_dict["B"]} - * :blue[**C**] {mi_dict["C"]} - * :blue[**D**] {mi_dict["D"]} - * :blue[**E**] {mi_dict["E"]} - --- - ''' - st.markdown(multi) - - try: - if st.button('Ask for answer'): - answer = pipeline.give_the_best_answer(mi_dict) - st.write(f"The correct answer is {answer}: {mi_dict[answer]}") - st.balloons() - - except Exception as e: - st.error("Something bad happend while trying to infer the answer or with the buttons.") - st.error(e) - - except Exception as e: - st.error("Your question doesn't have the required format. Please, correct it.") - st.error(e) - -except Exception as e: - st.error("Error most likely related to the import of the object 'pipeline'") - st.error(e) \ No newline at end of file diff --git a/spaces/Kevin676/AutoGPT/autogpt/commands/image_gen.py b/spaces/Kevin676/AutoGPT/autogpt/commands/image_gen.py deleted file mode 100644 index 0809fcdd3e38b52a2ce09ca1444f2574813d40f9..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/AutoGPT/autogpt/commands/image_gen.py +++ /dev/null @@ -1,163 +0,0 @@ -""" Image Generation Module for AutoGPT.""" -import io -import os.path -import uuid -from base64 import b64decode - -import openai -import requests -from PIL import Image - -from autogpt.config import Config -from autogpt.workspace import path_in_workspace - -CFG = Config() - - -def generate_image(prompt: str, size: int = 256) -> str: - """Generate an image from a prompt. - - Args: - prompt (str): The prompt to use - size (int, optional): The size of the image. Defaults to 256. (Not supported by HuggingFace) - - Returns: - str: The filename of the image - """ - filename = f"{str(uuid.uuid4())}.jpg" - - # DALL-E - if CFG.image_provider == "dalle": - return generate_image_with_dalle(prompt, filename, size) - # HuggingFace - elif CFG.image_provider == "huggingface": - return generate_image_with_hf(prompt, filename) - # SD WebUI - elif CFG.image_provider == "sdwebui": - return generate_image_with_sd_webui(prompt, filename, size) - return "No Image Provider Set" - - -def generate_image_with_hf(prompt: str, filename: str) -> str: - """Generate an image with HuggingFace's API. - - Args: - prompt (str): The prompt to use - filename (str): The filename to save the image to - - Returns: - str: The filename of the image - """ - API_URL = ( - f"https://api-inference.huggingface.co/models/{CFG.huggingface_image_model}" - ) - if CFG.huggingface_api_token is None: - raise ValueError( - "You need to set your Hugging Face API token in the config file." - ) - headers = { - "Authorization": f"Bearer {CFG.huggingface_api_token}", - "X-Use-Cache": "false", - } - - response = requests.post( - API_URL, - headers=headers, - json={ - "inputs": prompt, - }, - ) - - image = Image.open(io.BytesIO(response.content)) - print(f"Image Generated for prompt:{prompt}") - - image.save(path_in_workspace(filename)) - - return f"Saved to disk:{filename}" - - -def generate_image_with_dalle(prompt: str, filename: str) -> str: - """Generate an image with DALL-E. - - Args: - prompt (str): The prompt to use - filename (str): The filename to save the image to - - Returns: - str: The filename of the image - """ - openai.api_key = CFG.openai_api_key - - # Check for supported image sizes - if size not in [256, 512, 1024]: - closest = min([256, 512, 1024], key=lambda x: abs(x - size)) - print( - f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}." - ) - size = closest - - response = openai.Image.create( - prompt=prompt, - n=1, - size=f"{size}x{size}", - response_format="b64_json", - ) - - print(f"Image Generated for prompt:{prompt}") - - image_data = b64decode(response["data"][0]["b64_json"]) - - with open(path_in_workspace(filename), mode="wb") as png: - png.write(image_data) - - return f"Saved to disk:{filename}" - - -def generate_image_with_sd_webui( - prompt: str, - filename: str, - size: int = 512, - negative_prompt: str = "", - extra: dict = {}, -) -> str: - """Generate an image with Stable Diffusion webui. - Args: - prompt (str): The prompt to use - filename (str): The filename to save the image to - size (int, optional): The size of the image. Defaults to 256. - negative_prompt (str, optional): The negative prompt to use. Defaults to "". - extra (dict, optional): Extra parameters to pass to the API. Defaults to {}. - Returns: - str: The filename of the image - """ - # Create a session and set the basic auth if needed - s = requests.Session() - if CFG.sd_webui_auth: - username, password = CFG.sd_webui_auth.split(":") - s.auth = (username, password or "") - - # Generate the images - response = requests.post( - f"{CFG.sd_webui_url}/sdapi/v1/txt2img", - json={ - "prompt": prompt, - "negative_prompt": negative_prompt, - "sampler_index": "DDIM", - "steps": 20, - "cfg_scale": 7.0, - "width": size, - "height": size, - "n_iter": 1, - **extra, - }, - ) - - print(f"Image Generated for prompt:{prompt}") - - # Save the image to disk - response = response.json() - b64 = b64decode(response["images"][0].split(",", 1)[0]) - image = Image.open(io.BytesIO(b64)) - image.save(path_in_workspace(filename)) - - return f"Saved to disk:{filename}" diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/vocoder/wavernn/audio.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/vocoder/wavernn/audio.py deleted file mode 100644 index bec976840ca22ff39644d7dd51f70094910a8458..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/vocoder/wavernn/audio.py +++ /dev/null @@ -1,108 +0,0 @@ -import math -import numpy as np -import librosa -import vocoder.wavernn.hparams as hp -from scipy.signal import lfilter -import soundfile as sf - - -def label_2_float(x, bits) : - return 2 * x / (2**bits - 1.) - 1. - - -def float_2_label(x, bits) : - assert abs(x).max() <= 1.0 - x = (x + 1.) * (2**bits - 1) / 2 - return x.clip(0, 2**bits - 1) - - -def load_wav(path) : - return librosa.load(str(path), sr=hp.sample_rate)[0] - - -def save_wav(x, path) : - sf.write(path, x.astype(np.float32), hp.sample_rate) - - -def split_signal(x) : - unsigned = x + 2**15 - coarse = unsigned // 256 - fine = unsigned % 256 - return coarse, fine - - -def combine_signal(coarse, fine) : - return coarse * 256 + fine - 2**15 - - -def encode_16bits(x) : - return np.clip(x * 2**15, -2**15, 2**15 - 1).astype(np.int16) - - -mel_basis = None - - -def linear_to_mel(spectrogram): - global mel_basis - if mel_basis is None: - mel_basis = build_mel_basis() - return np.dot(mel_basis, spectrogram) - - -def build_mel_basis(): - return librosa.filters.mel(hp.sample_rate, hp.n_fft, n_mels=hp.num_mels, fmin=hp.fmin) - - -def normalize(S): - return np.clip((S - hp.min_level_db) / -hp.min_level_db, 0, 1) - - -def denormalize(S): - return (np.clip(S, 0, 1) * -hp.min_level_db) + hp.min_level_db - - -def amp_to_db(x): - return 20 * np.log10(np.maximum(1e-5, x)) - - -def db_to_amp(x): - return np.power(10.0, x * 0.05) - - -def spectrogram(y): - D = stft(y) - S = amp_to_db(np.abs(D)) - hp.ref_level_db - return normalize(S) - - -def melspectrogram(y): - D = stft(y) - S = amp_to_db(linear_to_mel(np.abs(D))) - return normalize(S) - - -def stft(y): - return librosa.stft(y=y, n_fft=hp.n_fft, hop_length=hp.hop_length, win_length=hp.win_length) - - -def pre_emphasis(x): - return lfilter([1, -hp.preemphasis], [1], x) - - -def de_emphasis(x): - return lfilter([1], [1, -hp.preemphasis], x) - - -def encode_mu_law(x, mu) : - mu = mu - 1 - fx = np.sign(x) * np.log(1 + mu * np.abs(x)) / np.log(1 + mu) - return np.floor((fx + 1) / 2 * mu + 0.5) - - -def decode_mu_law(y, mu, from_labels=True) : - if from_labels: - y = label_2_float(y, math.log2(mu)) - mu = mu - 1 - x = np.sign(y) / mu * ((1 + mu) ** np.abs(y) - 1) - return x - diff --git a/spaces/Kimata/multimodal_deepfake_detection/models/rawnet.py b/spaces/Kimata/multimodal_deepfake_detection/models/rawnet.py deleted file mode 100644 index 7f3c16800d40b9dd6ff029613b79d5d9784784d0..0000000000000000000000000000000000000000 --- a/spaces/Kimata/multimodal_deepfake_detection/models/rawnet.py +++ /dev/null @@ -1,360 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch import Tensor -import numpy as np -from torch.utils import data -from collections import OrderedDict -from torch.nn.parameter import Parameter - - -class SincConv(nn.Module): - @staticmethod - def to_mel(hz): - return 2595 * np.log10(1 + hz / 700) - - @staticmethod - def to_hz(mel): - return 700 * (10 ** (mel / 2595) - 1) - - - def __init__(self, device,out_channels, kernel_size,in_channels=1,sample_rate=16000, - stride=1, padding=0, dilation=1, bias=False, groups=1): - - super(SincConv,self).__init__() - - if in_channels != 1: - - msg = "SincConv only support one input channel (here, in_channels = {%i})" % (in_channels) - raise ValueError(msg) - - self.out_channels = out_channels - self.kernel_size = kernel_size - self.sample_rate=sample_rate - - # Forcing the filters to be odd (i.e, perfectly symmetrics) - if kernel_size%2==0: - self.kernel_size=self.kernel_size+1 - - self.device=device - self.stride = stride - self.padding = padding - self.dilation = dilation - - if bias: - raise ValueError('SincConv does not support bias.') - if groups > 1: - raise ValueError('SincConv does not support groups.') - - - # initialize filterbanks using Mel scale - NFFT = 512 - f=int(self.sample_rate/2)*np.linspace(0,1,int(NFFT/2)+1) - fmel=self.to_mel(f) # Hz to mel conversion - fmelmax=np.max(fmel) - fmelmin=np.min(fmel) - filbandwidthsmel=np.linspace(fmelmin,fmelmax,self.out_channels+1) - filbandwidthsf=self.to_hz(filbandwidthsmel) # Mel to Hz conversion - self.mel=filbandwidthsf - self.hsupp=torch.arange(-(self.kernel_size-1)/2, (self.kernel_size-1)/2+1) - self.band_pass=torch.zeros(self.out_channels,self.kernel_size) - - - - def forward(self,x): - for i in range(len(self.mel)-1): - fmin=self.mel[i] - fmax=self.mel[i+1] - hHigh=(2*fmax/self.sample_rate)*np.sinc(2*fmax*self.hsupp/self.sample_rate) - hLow=(2*fmin/self.sample_rate)*np.sinc(2*fmin*self.hsupp/self.sample_rate) - hideal=hHigh-hLow - - self.band_pass[i,:]=Tensor(np.hamming(self.kernel_size))*Tensor(hideal) - - band_pass_filter=self.band_pass.to(self.device) - - self.filters = (band_pass_filter).view(self.out_channels, 1, self.kernel_size) - - return F.conv1d(x, self.filters, stride=self.stride, - padding=self.padding, dilation=self.dilation, - bias=None, groups=1) - - - -class Residual_block(nn.Module): - def __init__(self, nb_filts, first = False): - super(Residual_block, self).__init__() - self.first = first - - if not self.first: - self.bn1 = nn.BatchNorm1d(num_features = nb_filts[0]) - - self.lrelu = nn.LeakyReLU(negative_slope=0.3) - - self.conv1 = nn.Conv1d(in_channels = nb_filts[0], - out_channels = nb_filts[1], - kernel_size = 3, - padding = 1, - stride = 1) - - self.bn2 = nn.BatchNorm1d(num_features = nb_filts[1]) - self.conv2 = nn.Conv1d(in_channels = nb_filts[1], - out_channels = nb_filts[1], - padding = 1, - kernel_size = 3, - stride = 1) - - if nb_filts[0] != nb_filts[1]: - self.downsample = True - self.conv_downsample = nn.Conv1d(in_channels = nb_filts[0], - out_channels = nb_filts[1], - padding = 0, - kernel_size = 1, - stride = 1) - - else: - self.downsample = False - self.mp = nn.MaxPool1d(3) - - def forward(self, x): - identity = x - if not self.first: - out = self.bn1(x) - out = self.lrelu(out) - else: - out = x - - out = self.conv1(x) - out = self.bn2(out) - out = self.lrelu(out) - out = self.conv2(out) - - if self.downsample: - identity = self.conv_downsample(identity) - - out += identity - out = self.mp(out) - return out - - - - - -class RawNet(nn.Module): - def __init__(self, d_args, device): - super(RawNet, self).__init__() - - - self.device=device - - self.Sinc_conv=SincConv(device=self.device, - out_channels = d_args['filts'][0], - kernel_size = d_args['first_conv'], - in_channels = d_args['in_channels'] - ) - - self.first_bn = nn.BatchNorm1d(num_features = d_args['filts'][0]) - self.selu = nn.SELU(inplace=True) - self.block0 = nn.Sequential(Residual_block(nb_filts = d_args['filts'][1], first = True)) - self.block1 = nn.Sequential(Residual_block(nb_filts = d_args['filts'][1])) - self.block2 = nn.Sequential(Residual_block(nb_filts = d_args['filts'][2])) - d_args['filts'][2][0] = d_args['filts'][2][1] - self.block3 = nn.Sequential(Residual_block(nb_filts = d_args['filts'][2])) - self.block4 = nn.Sequential(Residual_block(nb_filts = d_args['filts'][2])) - self.block5 = nn.Sequential(Residual_block(nb_filts = d_args['filts'][2])) - self.avgpool = nn.AdaptiveAvgPool1d(1) - - self.fc_attention0 = self._make_attention_fc(in_features = d_args['filts'][1][-1], - l_out_features = d_args['filts'][1][-1]) - self.fc_attention1 = self._make_attention_fc(in_features = d_args['filts'][1][-1], - l_out_features = d_args['filts'][1][-1]) - self.fc_attention2 = self._make_attention_fc(in_features = d_args['filts'][2][-1], - l_out_features = d_args['filts'][2][-1]) - self.fc_attention3 = self._make_attention_fc(in_features = d_args['filts'][2][-1], - l_out_features = d_args['filts'][2][-1]) - self.fc_attention4 = self._make_attention_fc(in_features = d_args['filts'][2][-1], - l_out_features = d_args['filts'][2][-1]) - self.fc_attention5 = self._make_attention_fc(in_features = d_args['filts'][2][-1], - l_out_features = d_args['filts'][2][-1]) - - self.bn_before_gru = nn.BatchNorm1d(num_features = d_args['filts'][2][-1]) - self.gru = nn.GRU(input_size = d_args['filts'][2][-1], - hidden_size = d_args['gru_node'], - num_layers = d_args['nb_gru_layer'], - batch_first = True) - - - self.fc1_gru = nn.Linear(in_features = d_args['gru_node'], - out_features = d_args['nb_fc_node']) - - self.fc2_gru = nn.Linear(in_features = d_args['nb_fc_node'], - out_features = d_args['nb_classes'],bias=True) - - - self.sig = nn.Sigmoid() - self.logsoftmax = nn.LogSoftmax(dim=1) - - def forward(self, x, y = None): - - - nb_samp = x.shape[0] - len_seq = x.shape[1] - x=x.view(nb_samp,1,len_seq) - - x = self.Sinc_conv(x) - x = F.max_pool1d(torch.abs(x), 3) - x = self.first_bn(x) - x = self.selu(x) - - x0 = self.block0(x) - y0 = self.avgpool(x0).view(x0.size(0), -1) # torch.Size([batch, filter]) - y0 = self.fc_attention0(y0) - y0 = self.sig(y0).view(y0.size(0), y0.size(1), -1) # torch.Size([batch, filter, 1]) - x = x0 * y0 + y0 # (batch, filter, time) x (batch, filter, 1) - - - x1 = self.block1(x) - y1 = self.avgpool(x1).view(x1.size(0), -1) # torch.Size([batch, filter]) - y1 = self.fc_attention1(y1) - y1 = self.sig(y1).view(y1.size(0), y1.size(1), -1) # torch.Size([batch, filter, 1]) - x = x1 * y1 + y1 # (batch, filter, time) x (batch, filter, 1) - - x2 = self.block2(x) - y2 = self.avgpool(x2).view(x2.size(0), -1) # torch.Size([batch, filter]) - y2 = self.fc_attention2(y2) - y2 = self.sig(y2).view(y2.size(0), y2.size(1), -1) # torch.Size([batch, filter, 1]) - x = x2 * y2 + y2 # (batch, filter, time) x (batch, filter, 1) - - x3 = self.block3(x) - y3 = self.avgpool(x3).view(x3.size(0), -1) # torch.Size([batch, filter]) - y3 = self.fc_attention3(y3) - y3 = self.sig(y3).view(y3.size(0), y3.size(1), -1) # torch.Size([batch, filter, 1]) - x = x3 * y3 + y3 # (batch, filter, time) x (batch, filter, 1) - - x4 = self.block4(x) - y4 = self.avgpool(x4).view(x4.size(0), -1) # torch.Size([batch, filter]) - y4 = self.fc_attention4(y4) - y4 = self.sig(y4).view(y4.size(0), y4.size(1), -1) # torch.Size([batch, filter, 1]) - x = x4 * y4 + y4 # (batch, filter, time) x (batch, filter, 1) - - x5 = self.block5(x) - y5 = self.avgpool(x5).view(x5.size(0), -1) # torch.Size([batch, filter]) - y5 = self.fc_attention5(y5) - y5 = self.sig(y5).view(y5.size(0), y5.size(1), -1) # torch.Size([batch, filter, 1]) - x = x5 * y5 + y5 # (batch, filter, time) x (batch, filter, 1) - - x = self.bn_before_gru(x) - x = self.selu(x) - x = x.permute(0, 2, 1) #(batch, filt, time) >> (batch, time, filt) - self.gru.flatten_parameters() - x, _ = self.gru(x) - x = x[:,-1,:] - x = self.fc1_gru(x) - x = self.fc2_gru(x) - output=self.logsoftmax(x) - print(f"Spec output shape: {output.shape}") - - return output - - - - def _make_attention_fc(self, in_features, l_out_features): - - l_fc = [] - - l_fc.append(nn.Linear(in_features = in_features, - out_features = l_out_features)) - - - - return nn.Sequential(*l_fc) - - - def _make_layer(self, nb_blocks, nb_filts, first = False): - layers = [] - #def __init__(self, nb_filts, first = False): - for i in range(nb_blocks): - first = first if i == 0 else False - layers.append(Residual_block(nb_filts = nb_filts, - first = first)) - if i == 0: nb_filts[0] = nb_filts[1] - - return nn.Sequential(*layers) - - def summary(self, input_size, batch_size=-1, device="cuda", print_fn = None): - if print_fn == None: printfn = print - model = self - - def register_hook(module): - def hook(module, input, output): - class_name = str(module.__class__).split(".")[-1].split("'")[0] - module_idx = len(summary) - - m_key = "%s-%i" % (class_name, module_idx + 1) - summary[m_key] = OrderedDict() - summary[m_key]["input_shape"] = list(input[0].size()) - summary[m_key]["input_shape"][0] = batch_size - if isinstance(output, (list, tuple)): - summary[m_key]["output_shape"] = [ - [-1] + list(o.size())[1:] for o in output - ] - else: - summary[m_key]["output_shape"] = list(output.size()) - if len(summary[m_key]["output_shape"]) != 0: - summary[m_key]["output_shape"][0] = batch_size - - params = 0 - if hasattr(module, "weight") and hasattr(module.weight, "size"): - params += torch.prod(torch.LongTensor(list(module.weight.size()))) - summary[m_key]["trainable"] = module.weight.requires_grad - if hasattr(module, "bias") and hasattr(module.bias, "size"): - params += torch.prod(torch.LongTensor(list(module.bias.size()))) - summary[m_key]["nb_params"] = params - - if ( - not isinstance(module, nn.Sequential) - and not isinstance(module, nn.ModuleList) - and not (module == model) - ): - hooks.append(module.register_forward_hook(hook)) - - device = device.lower() - assert device in [ - "cuda", - "cpu", - ], "Input device is not valid, please specify 'cuda' or 'cpu'" - - if device == "cuda" and torch.cuda.is_available(): - dtype = torch.cuda.FloatTensor - else: - dtype = torch.FloatTensor - if isinstance(input_size, tuple): - input_size = [input_size] - x = [torch.rand(2, *in_size).type(dtype) for in_size in input_size] - summary = OrderedDict() - hooks = [] - model.apply(register_hook) - model(*x) - for h in hooks: - h.remove() - - print_fn("----------------------------------------------------------------") - line_new = "{:>20} {:>25} {:>15}".format("Layer (type)", "Output Shape", "Param #") - print_fn(line_new) - print_fn("================================================================") - total_params = 0 - total_output = 0 - trainable_params = 0 - for layer in summary: - # input_shape, output_shape, trainable, nb_params - line_new = "{:>20} {:>25} {:>15}".format( - layer, - str(summary[layer]["output_shape"]), - "{0:,}".format(summary[layer]["nb_params"]), - ) - total_params += summary[layer]["nb_params"] - total_output += np.prod(summary[layer]["output_shape"]) - if "trainable" in summary[layer]: - if summary[layer]["trainable"] == True: - trainable_params += summary[layer]["nb_params"] - print_fn(line_new) diff --git a/spaces/Kok4444/meme_kok/README.md b/spaces/Kok4444/meme_kok/README.md deleted file mode 100644 index 16fb19b6833b08159fcaa79ec640f081f368b0f2..0000000000000000000000000000000000000000 --- a/spaces/Kok4444/meme_kok/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Meme Kok -emoji: 🌖 -colorFrom: pink -colorTo: blue -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/KwabsHug/Language-Learn-Idea/README.md b/spaces/KwabsHug/Language-Learn-Idea/README.md deleted file mode 100644 index 82eb415f0f7d0231ad85f9f69cd10e498bb53d82..0000000000000000000000000000000000000000 --- a/spaces/KwabsHug/Language-Learn-Idea/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Language Learn Idea -emoji: 🌍 -colorFrom: green -colorTo: yellow -sdk: gradio -sdk_version: 3.47.1 -app_file: app.py -pinned: false -fullWidth: true ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/necks/ct_resnet_neck.py b/spaces/KyanChen/RSPrompter/mmdet/models/necks/ct_resnet_neck.py deleted file mode 100644 index 9109fe79290fafecd954f223d5365ef619c0c301..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/necks/ct_resnet_neck.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math -from typing import Sequence, Tuple - -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmengine.model import BaseModule - -from mmdet.registry import MODELS -from mmdet.utils import OptMultiConfig - - -@MODELS.register_module() -class CTResNetNeck(BaseModule): - """The neck used in `CenterNet `_ for - object classification and box regression. - - Args: - in_channels (int): Number of input channels. - num_deconv_filters (tuple[int]): Number of filters per stage. - num_deconv_kernels (tuple[int]): Number of kernels per stage. - use_dcn (bool): If True, use DCNv2. Defaults to True. - init_cfg (:obj:`ConfigDict` or dict or list[dict] or - list[:obj:`ConfigDict`], optional): Initialization - config dict. - """ - - def __init__(self, - in_channels: int, - num_deconv_filters: Tuple[int, ...], - num_deconv_kernels: Tuple[int, ...], - use_dcn: bool = True, - init_cfg: OptMultiConfig = None) -> None: - super().__init__(init_cfg=init_cfg) - assert len(num_deconv_filters) == len(num_deconv_kernels) - self.fp16_enabled = False - self.use_dcn = use_dcn - self.in_channels = in_channels - self.deconv_layers = self._make_deconv_layer(num_deconv_filters, - num_deconv_kernels) - - def _make_deconv_layer( - self, num_deconv_filters: Tuple[int, ...], - num_deconv_kernels: Tuple[int, ...]) -> nn.Sequential: - """use deconv layers to upsample backbone's output.""" - layers = [] - for i in range(len(num_deconv_filters)): - feat_channels = num_deconv_filters[i] - conv_module = ConvModule( - self.in_channels, - feat_channels, - 3, - padding=1, - conv_cfg=dict(type='DCNv2') if self.use_dcn else None, - norm_cfg=dict(type='BN')) - layers.append(conv_module) - upsample_module = ConvModule( - feat_channels, - feat_channels, - num_deconv_kernels[i], - stride=2, - padding=1, - conv_cfg=dict(type='deconv'), - norm_cfg=dict(type='BN')) - layers.append(upsample_module) - self.in_channels = feat_channels - - return nn.Sequential(*layers) - - def init_weights(self) -> None: - """Initialize the parameters.""" - for m in self.modules(): - if isinstance(m, nn.ConvTranspose2d): - # In order to be consistent with the source code, - # reset the ConvTranspose2d initialization parameters - m.reset_parameters() - # Simulated bilinear upsampling kernel - w = m.weight.data - f = math.ceil(w.size(2) / 2) - c = (2 * f - 1 - f % 2) / (2. * f) - for i in range(w.size(2)): - for j in range(w.size(3)): - w[0, 0, i, j] = \ - (1 - math.fabs(i / f - c)) * ( - 1 - math.fabs(j / f - c)) - for c in range(1, w.size(0)): - w[c, 0, :, :] = w[0, 0, :, :] - elif isinstance(m, nn.BatchNorm2d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - # self.use_dcn is False - elif not self.use_dcn and isinstance(m, nn.Conv2d): - # In order to be consistent with the source code, - # reset the Conv2d initialization parameters - m.reset_parameters() - - def forward(self, x: Sequence[torch.Tensor]) -> Tuple[torch.Tensor]: - """model forward.""" - assert isinstance(x, (list, tuple)) - outs = self.deconv_layers(x[-1]) - return outs, diff --git a/spaces/Laihiujin/OneFormer/oneformer/modeling/pixel_decoder/ops/functions/ms_deform_attn_func.py b/spaces/Laihiujin/OneFormer/oneformer/modeling/pixel_decoder/ops/functions/ms_deform_attn_func.py deleted file mode 100644 index ecfea82f3e6ac34cd8b1abc2d1de5b0f4eaed343..0000000000000000000000000000000000000000 --- a/spaces/Laihiujin/OneFormer/oneformer/modeling/pixel_decoder/ops/functions/ms_deform_attn_func.py +++ /dev/null @@ -1,77 +0,0 @@ -# ------------------------------------------------------------------------------------------------ -# Deformable DETR -# Copyright (c) 2020 SenseTime. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------------------------------ -# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -# ------------------------------------------------------------------------------------------------ - -# Copyright (c) Facebook, Inc. and its affiliates. -# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR - - -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - -import torch -import torch.nn.functional as F -from torch.autograd import Function -from torch.autograd.function import once_differentiable - -if torch.cuda.is_available(): - try: - import MultiScaleDeformableAttention as MSDA - except ModuleNotFoundError as e: - info_string = ( - "\n\nPlease compile MultiScaleDeformableAttention CUDA op with the following commands:\n" - "\t`cd oneformer/modeling/pixel_decoder/ops`\n" - "\t`sh make.sh`\n" - ) - raise ModuleNotFoundError(info_string) -else: - MultiScaleDeformableAttention = None - - - -class MSDeformAttnFunction(Function): - @staticmethod - def forward(ctx, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step): - ctx.im2col_step = im2col_step - output = MSDA.ms_deform_attn_forward( - value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, ctx.im2col_step) - ctx.save_for_backward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights = ctx.saved_tensors - grad_value, grad_sampling_loc, grad_attn_weight = \ - MSDA.ms_deform_attn_backward( - value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, grad_output, ctx.im2col_step) - - return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None - - -def ms_deform_attn_core_pytorch(value, value_spatial_shapes, sampling_locations, attention_weights): - # for debug and test only, - # need to use cuda version instead - N_, S_, M_, D_ = value.shape - _, Lq_, M_, L_, P_, _ = sampling_locations.shape - value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1) - sampling_grids = 2 * sampling_locations - 1 - sampling_value_list = [] - for lid_, (H_, W_) in enumerate(value_spatial_shapes): - # N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_ - value_l_ = value_list[lid_].flatten(2).transpose(1, 2).reshape(N_*M_, D_, H_, W_) - # N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2 - sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1) - # N_*M_, D_, Lq_, P_ - sampling_value_l_ = F.grid_sample(value_l_, sampling_grid_l_, - mode='bilinear', padding_mode='zeros', align_corners=False) - sampling_value_list.append(sampling_value_l_) - # (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_, M_, 1, Lq_, L_*P_) - attention_weights = attention_weights.transpose(1, 2).reshape(N_*M_, 1, Lq_, L_*P_) - output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(N_, M_*D_, Lq_) - return output.transpose(1, 2).contiguous() diff --git a/spaces/Lee-Shang/sahi-yolox-duplicate/utils.py b/spaces/Lee-Shang/sahi-yolox-duplicate/utils.py deleted file mode 100644 index a72782b8c7511b37598d83b0c899af1e98505971..0000000000000000000000000000000000000000 --- a/spaces/Lee-Shang/sahi-yolox-duplicate/utils.py +++ /dev/null @@ -1,54 +0,0 @@ -import numpy -import sahi.predict -import sahi.utils -from PIL import Image - -TEMP_DIR = "temp" - - -def sahi_mmdet_inference( - image, - detection_model, - slice_height=512, - slice_width=512, - overlap_height_ratio=0.2, - overlap_width_ratio=0.2, - image_size=640, - postprocess_type="GREEDYNMM", - postprocess_match_metric="IOS", - postprocess_match_threshold=0.5, - postprocess_class_agnostic=False, -): - - # standard inference - detection_model.image_size = image_size - prediction_result_1 = sahi.predict.get_prediction( - image=image, detection_model=detection_model - ) - visual_result_1 = sahi.utils.cv.visualize_object_predictions( - image=numpy.array(image), - object_prediction_list=prediction_result_1.object_prediction_list, - ) - output_1 = Image.fromarray(visual_result_1["image"]) - - # sliced inference - prediction_result_2 = sahi.predict.get_sliced_prediction( - image=image, - detection_model=detection_model, - slice_height=slice_height, - slice_width=slice_width, - overlap_height_ratio=overlap_height_ratio, - overlap_width_ratio=overlap_width_ratio, - postprocess_type=postprocess_type, - postprocess_match_metric=postprocess_match_metric, - postprocess_match_threshold=postprocess_match_threshold, - postprocess_class_agnostic=postprocess_class_agnostic, - ) - visual_result_2 = sahi.utils.cv.visualize_object_predictions( - image=numpy.array(image), - object_prediction_list=prediction_result_2.object_prediction_list, - ) - - output_2 = Image.fromarray(visual_result_2["image"]) - - return output_1, output_2 diff --git a/spaces/ML701G7/taim-gan/src/visualization/visualize.py b/spaces/ML701G7/taim-gan/src/visualization/visualize.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/inference/interact/fbrs/inference/clicker.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/inference/interact/fbrs/inference/clicker.py deleted file mode 100644 index e1ea9cf319f88639fa0af45088cdf79c8954f83a..0000000000000000000000000000000000000000 --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/XMem/inference/interact/fbrs/inference/clicker.py +++ /dev/null @@ -1,103 +0,0 @@ -from collections import namedtuple - -import numpy as np -from copy import deepcopy -from scipy.ndimage import distance_transform_edt - -Click = namedtuple('Click', ['is_positive', 'coords']) - - -class Clicker(object): - def __init__(self, gt_mask=None, init_clicks=None, ignore_label=-1): - if gt_mask is not None: - self.gt_mask = gt_mask == 1 - self.not_ignore_mask = gt_mask != ignore_label - else: - self.gt_mask = None - - self.reset_clicks() - - if init_clicks is not None: - for click in init_clicks: - self.add_click(click) - - def make_next_click(self, pred_mask): - assert self.gt_mask is not None - click = self._get_click(pred_mask) - self.add_click(click) - - def get_clicks(self, clicks_limit=None): - return self.clicks_list[:clicks_limit] - - def _get_click(self, pred_mask, padding=True): - fn_mask = np.logical_and(np.logical_and(self.gt_mask, np.logical_not(pred_mask)), self.not_ignore_mask) - fp_mask = np.logical_and(np.logical_and(np.logical_not(self.gt_mask), pred_mask), self.not_ignore_mask) - - if padding: - fn_mask = np.pad(fn_mask, ((1, 1), (1, 1)), 'constant') - fp_mask = np.pad(fp_mask, ((1, 1), (1, 1)), 'constant') - - fn_mask_dt = distance_transform_edt(fn_mask) - fp_mask_dt = distance_transform_edt(fp_mask) - - if padding: - fn_mask_dt = fn_mask_dt[1:-1, 1:-1] - fp_mask_dt = fp_mask_dt[1:-1, 1:-1] - - fn_mask_dt = fn_mask_dt * self.not_clicked_map - fp_mask_dt = fp_mask_dt * self.not_clicked_map - - fn_max_dist = np.max(fn_mask_dt) - fp_max_dist = np.max(fp_mask_dt) - - is_positive = fn_max_dist > fp_max_dist - if is_positive: - coords_y, coords_x = np.where(fn_mask_dt == fn_max_dist) # coords is [y, x] - else: - coords_y, coords_x = np.where(fp_mask_dt == fp_max_dist) # coords is [y, x] - - return Click(is_positive=is_positive, coords=(coords_y[0], coords_x[0])) - - def add_click(self, click): - coords = click.coords - - if click.is_positive: - self.num_pos_clicks += 1 - else: - self.num_neg_clicks += 1 - - self.clicks_list.append(click) - if self.gt_mask is not None: - self.not_clicked_map[coords[0], coords[1]] = False - - def _remove_last_click(self): - click = self.clicks_list.pop() - coords = click.coords - - if click.is_positive: - self.num_pos_clicks -= 1 - else: - self.num_neg_clicks -= 1 - - if self.gt_mask is not None: - self.not_clicked_map[coords[0], coords[1]] = True - - def reset_clicks(self): - if self.gt_mask is not None: - self.not_clicked_map = np.ones_like(self.gt_mask, dtype=np.bool) - - self.num_pos_clicks = 0 - self.num_neg_clicks = 0 - - self.clicks_list = [] - - def get_state(self): - return deepcopy(self.clicks_list) - - def set_state(self, state): - self.reset_clicks() - for click in state: - self.add_click(click) - - def __len__(self): - return len(self.clicks_list) diff --git a/spaces/March07/PromptBench/app.py b/spaces/March07/PromptBench/app.py deleted file mode 100644 index 4de2f2014fcde8bff84cf756ae8dc24fb019cd2b..0000000000000000000000000000000000000000 --- a/spaces/March07/PromptBench/app.py +++ /dev/null @@ -1,105 +0,0 @@ -import streamlit as st -from parse import retrieve -from transfer import retrieve_transfer - -def main(): - st.sidebar.title("Choose Function") - function_choice = st.sidebar.radio("", ["PromptBench", "Retrieve Transferability Information"]) - - if function_choice == "PromptBench": - promptbench() - - elif function_choice == "Retrieve Transferability Information": - retrieve_transferability_information() - -def promptbench(): - st.title("PromptBench") - - model_name = st.selectbox( - "Select Model", - options=["T5", "Vicuna", "UL2", "ChatGPT"], - index=0, - ) - - dataset_name = st.selectbox( - "Select Dataset", - options=[ - "SST-2", "CoLA", "QQP", "MRPC", "MNLI", "QNLI", - "RTE", "WNLI", "MMLU", "SQuAD V2", "IWSLT 2017", "UN Multi", "Math" - ], - index=0, - ) - - attack_name = st.selectbox( - "Select Attack", - options=[ - "BertAttack", "CheckList", "DeepWordBug", "StressTest", "TextFooler", "TextBugger", "Semantic" - ], - index=0, - ) - - prompt_type = st.selectbox( - "Select Prompt Type", - options=["zeroshot-task", "zeroshot-role", "fewshot-task", "fewshot-role"], - index=0, - ) - - st.write(f"Model: {model_name}") - st.write(f"Dataset: {dataset_name}") - st.write(f"Prompt Type: {prompt_type}") - - if st.button("Retrieve"): - results = retrieve(model_name, dataset_name, attack_name, prompt_type) - - for result in results: - st.write("Original prompt: {}".format(result["origin prompt"])) - st.write("Original acc: {}".format(result["origin acc"])) - st.write("Attack prompt: {}".format(result["attack prompt"])) - st.write("Attack acc: {}".format(result["attack acc"])) - - -def retrieve_transferability_information(): - st.title("Retrieve Transferability Information") - source_model_name = st.selectbox( - "Select Source Model", - options=["T5", "Vicuna", "UL2", "ChatGPT"], - index=0, - ) - - target_model_name = st.selectbox( - "Select Target Model", - options=["T5", "Vicuna", "UL2", "ChatGPT"], - index=0, - ) - - if source_model_name == target_model_name: - st.write("Source model and target model cannot be the same.") - return - - attack_name = st.selectbox( - "Select Attack", - options=[ - "BertAttack", "CheckList", "DeepWordBug", "StressTest", "TextFooler", "TextBugger", "Semantic" - ], - index=0, - ) - - if attack_name == "Semantic": - attack_name = "translation" - - shot = st.selectbox( - "Select Shot", - options=[0, 3], - index=0, - ) - - data = retrieve_transfer(source_model_name, target_model_name, attack_name, shot) - for d in data: - with st.expander(f"Dataset: {d['dataset']} Prompt Type: {d['type']}-oriented"): - st.write(f"Origin prompt: {d['origin_prompt']}") - st.write(f"Attack prompt: {d['atk_prompt']}") - st.write(f"Source model: origin acc: {d['origin_acc']}, attack acc: {d['atk_acc']}") - st.write(f"Target model: origin acc: {d['transfer_ori_acc']}, attack acc: {d['transfer_atk_acc']}") - -if __name__ == "__main__": - main() diff --git a/spaces/Marshalls/testmtd/analysis/cut_bvh.py b/spaces/Marshalls/testmtd/analysis/cut_bvh.py deleted file mode 100644 index b84fae8119dfd33720a1f63b65cdf03e8f0f980d..0000000000000000000000000000000000000000 --- a/spaces/Marshalls/testmtd/analysis/cut_bvh.py +++ /dev/null @@ -1,23 +0,0 @@ -from analysis.pymo.parsers import BVHParser -from analysis.pymo.data import Joint, MocapData -from analysis.pymo.preprocessing import * -from analysis.pymo.viz_tools import * -from analysis.pymo.writers import * -from sklearn.pipeline import Pipeline -from pathlib import Path -import sys -from feature_extraction.utils import distribute_tasks - -p = BVHParser() -datas = [] -filename = sys.argv[1] -cut_frames1 = int(sys.argv[2]) -cut_frames2 = int(sys.argv[3]) -data = p.parse(filename) - -data.values = data.values.iloc[cut_frames1:-cut_frames2] - -writer = BVHWriter() - -with open(filename,'w') as f: - writer.write(data, f) diff --git a/spaces/Mecca/whisper-webui/src/vadParallel.py b/spaces/Mecca/whisper-webui/src/vadParallel.py deleted file mode 100644 index c2323c0b632c34014ac1fe7ac79141b5bd9c5731..0000000000000000000000000000000000000000 --- a/spaces/Mecca/whisper-webui/src/vadParallel.py +++ /dev/null @@ -1,298 +0,0 @@ -import multiprocessing -from queue import Empty -import threading -import time -from src.hooks.progressListener import ProgressListener -from src.vad import AbstractTranscription, TranscriptionConfig, get_audio_duration - -from multiprocessing import Pool, Queue - -from typing import Any, Dict, List, Union -import os - -from src.whisper.abstractWhisperContainer import AbstractWhisperCallback - -class _ProgressListenerToQueue(ProgressListener): - def __init__(self, progress_queue: Queue): - self.progress_queue = progress_queue - self.progress_total = 0 - self.prev_progress = 0 - - def on_progress(self, current: Union[int, float], total: Union[int, float]): - delta = current - self.prev_progress - self.prev_progress = current - self.progress_total = total - self.progress_queue.put(delta) - - def on_finished(self): - if self.progress_total > self.prev_progress: - delta = self.progress_total - self.prev_progress - self.progress_queue.put(delta) - self.prev_progress = self.progress_total - -class ParallelContext: - def __init__(self, num_processes: int = None, auto_cleanup_timeout_seconds: float = None): - self.num_processes = num_processes - self.auto_cleanup_timeout_seconds = auto_cleanup_timeout_seconds - self.lock = threading.Lock() - - self.ref_count = 0 - self.pool = None - self.cleanup_timer = None - - def get_pool(self): - # Initialize pool lazily - if (self.pool is None): - context = multiprocessing.get_context('spawn') - self.pool = context.Pool(self.num_processes) - - self.ref_count = self.ref_count + 1 - - if (self.auto_cleanup_timeout_seconds is not None): - self._stop_auto_cleanup() - - return self.pool - - def return_pool(self, pool): - if (self.pool == pool and self.ref_count > 0): - self.ref_count = self.ref_count - 1 - - if (self.ref_count == 0): - if (self.auto_cleanup_timeout_seconds is not None): - self._start_auto_cleanup() - - def _start_auto_cleanup(self): - if (self.cleanup_timer is not None): - self.cleanup_timer.cancel() - self.cleanup_timer = threading.Timer(self.auto_cleanup_timeout_seconds, self._execute_cleanup) - self.cleanup_timer.start() - - print("Started auto cleanup of pool in " + str(self.auto_cleanup_timeout_seconds) + " seconds") - - def _stop_auto_cleanup(self): - if (self.cleanup_timer is not None): - self.cleanup_timer.cancel() - self.cleanup_timer = None - - print("Stopped auto cleanup of pool") - - def _execute_cleanup(self): - print("Executing cleanup of pool") - - if (self.ref_count == 0): - self.close() - - def close(self): - self._stop_auto_cleanup() - - if (self.pool is not None): - print("Closing pool of " + str(self.num_processes) + " processes") - self.pool.close() - self.pool.join() - self.pool = None - -class ParallelTranscriptionConfig(TranscriptionConfig): - def __init__(self, device_id: str, override_timestamps, initial_segment_index, copy: TranscriptionConfig = None): - super().__init__(copy.non_speech_strategy, copy.segment_padding_left, copy.segment_padding_right, copy.max_silent_period, copy.max_merge_size, copy.max_prompt_window, initial_segment_index) - self.device_id = device_id - self.override_timestamps = override_timestamps - -class ParallelTranscription(AbstractTranscription): - # Silero VAD typically takes about 3 seconds per minute, so there's no need to split the chunks - # into smaller segments than 2 minute (min 6 seconds per CPU core) - MIN_CPU_CHUNK_SIZE_SECONDS = 2 * 60 - - def __init__(self, sampling_rate: int = 16000): - super().__init__(sampling_rate=sampling_rate) - - def transcribe_parallel(self, transcription: AbstractTranscription, audio: str, whisperCallable: AbstractWhisperCallback, config: TranscriptionConfig, - cpu_device_count: int, gpu_devices: List[str], cpu_parallel_context: ParallelContext = None, gpu_parallel_context: ParallelContext = None, - progress_listener: ProgressListener = None): - total_duration = get_audio_duration(audio) - - # First, get the timestamps for the original audio - if (cpu_device_count > 1 and not transcription.is_transcribe_timestamps_fast()): - merged = self._get_merged_timestamps_parallel(transcription, audio, config, total_duration, cpu_device_count, cpu_parallel_context) - else: - timestamp_segments = transcription.get_transcribe_timestamps(audio, config, 0, total_duration) - merged = transcription.get_merged_timestamps(timestamp_segments, config, total_duration) - - # We must make sure the whisper model is downloaded - if (len(gpu_devices) > 1): - whisperCallable.model_container.ensure_downloaded() - - # Split into a list for each device - # TODO: Split by time instead of by number of chunks - merged_split = list(self._split(merged, len(gpu_devices))) - - # Parameters that will be passed to the transcribe function - parameters = [] - segment_index = config.initial_segment_index - - processing_manager = multiprocessing.Manager() - progress_queue = processing_manager.Queue() - - for i in range(len(gpu_devices)): - # Note that device_segment_list can be empty. But we will still create a process for it, - # as otherwise we run the risk of assigning the same device to multiple processes. - device_segment_list = list(merged_split[i]) if i < len(merged_split) else [] - device_id = gpu_devices[i] - - print("Device " + str(device_id) + " (index " + str(i) + ") has " + str(len(device_segment_list)) + " segments") - - # Create a new config with the given device ID - device_config = ParallelTranscriptionConfig(device_id, device_segment_list, segment_index, config) - segment_index += len(device_segment_list) - - progress_listener_to_queue = _ProgressListenerToQueue(progress_queue) - parameters.append([audio, whisperCallable, device_config, progress_listener_to_queue]); - - merged = { - 'text': '', - 'segments': [], - 'language': None - } - - created_context = False - - perf_start_gpu = time.perf_counter() - - # Spawn a separate process for each device - try: - if (gpu_parallel_context is None): - gpu_parallel_context = ParallelContext(len(gpu_devices)) - created_context = True - - # Get a pool of processes - pool = gpu_parallel_context.get_pool() - - # Run the transcription in parallel - results_async = pool.starmap_async(self.transcribe, parameters) - total_progress = 0 - - while not results_async.ready(): - try: - delta = progress_queue.get(timeout=5) # Set a timeout of 5 seconds - except Empty: - continue - - total_progress += delta - if progress_listener is not None: - progress_listener.on_progress(total_progress, total_duration) - - results = results_async.get() - - # Call the finished callback - if progress_listener is not None: - progress_listener.on_finished() - - for result in results: - # Merge the results - if (result['text'] is not None): - merged['text'] += result['text'] - if (result['segments'] is not None): - merged['segments'].extend(result['segments']) - if (result['language'] is not None): - merged['language'] = result['language'] - - finally: - # Return the pool to the context - if (gpu_parallel_context is not None): - gpu_parallel_context.return_pool(pool) - # Always close the context if we created it - if (created_context): - gpu_parallel_context.close() - - perf_end_gpu = time.perf_counter() - print("Parallel transcription took " + str(perf_end_gpu - perf_start_gpu) + " seconds") - - return merged - - def _get_merged_timestamps_parallel(self, transcription: AbstractTranscription, audio: str, config: TranscriptionConfig, total_duration: float, - cpu_device_count: int, cpu_parallel_context: ParallelContext = None): - parameters = [] - - chunk_size = max(total_duration / cpu_device_count, self.MIN_CPU_CHUNK_SIZE_SECONDS) - chunk_start = 0 - cpu_device_id = 0 - - perf_start_time = time.perf_counter() - - # Create chunks that will be processed on the CPU - while (chunk_start < total_duration): - chunk_end = min(chunk_start + chunk_size, total_duration) - - if (chunk_end - chunk_start < 1): - # No need to process chunks that are less than 1 second - break - - print("Parallel VAD: Executing chunk from " + str(chunk_start) + " to " + - str(chunk_end) + " on CPU device " + str(cpu_device_id)) - parameters.append([audio, config, chunk_start, chunk_end]); - - cpu_device_id += 1 - chunk_start = chunk_end - - created_context = False - - # Spawn a separate process for each device - try: - if (cpu_parallel_context is None): - cpu_parallel_context = ParallelContext(cpu_device_count) - created_context = True - - # Get a pool of processes - pool = cpu_parallel_context.get_pool() - - # Run the transcription in parallel. Note that transcription must be picklable. - results = pool.starmap(transcription.get_transcribe_timestamps, parameters) - - timestamps = [] - - # Flatten the results - for result in results: - timestamps.extend(result) - - merged = transcription.get_merged_timestamps(timestamps, config, total_duration) - - perf_end_time = time.perf_counter() - print("Parallel VAD processing took {} seconds".format(perf_end_time - perf_start_time)) - return merged - - finally: - # Return the pool to the context - if (cpu_parallel_context is not None): - cpu_parallel_context.return_pool(pool) - # Always close the context if we created it - if (created_context): - cpu_parallel_context.close() - - def get_transcribe_timestamps(self, audio: str, config: ParallelTranscriptionConfig, start_time: float, duration: float): - return [] - - def get_merged_timestamps(self, timestamps: List[Dict[str, Any]], config: ParallelTranscriptionConfig, total_duration: float): - # Override timestamps that will be processed - if (config.override_timestamps is not None): - print("(get_merged_timestamps) Using override timestamps of size " + str(len(config.override_timestamps))) - return config.override_timestamps - return super().get_merged_timestamps(timestamps, config, total_duration) - - def transcribe(self, audio: str, whisperCallable: AbstractWhisperCallback, config: ParallelTranscriptionConfig, - progressListener: ProgressListener = None): - # Override device ID the first time - if (os.environ.get("INITIALIZED", None) is None): - os.environ["INITIALIZED"] = "1" - - # Note that this may be None if the user didn't specify a device. In that case, Whisper will - # just use the default GPU device. - if (config.device_id is not None): - print("Using device " + config.device_id) - os.environ["CUDA_VISIBLE_DEVICES"] = config.device_id - - return super().transcribe(audio, whisperCallable, config, progressListener) - - def _split(self, a, n): - """Split a list into n approximately equal parts.""" - k, m = divmod(len(a), n) - return (a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n)) - diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/plugins/common.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/plugins/common.py deleted file mode 100644 index 7a6e8c6de712978c571224b9e20ea881d1116211..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/plugins/common.py +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Tuple, Union - -import torch -import torch.nn as nn - -from mmocr.registry import MODELS - - -@MODELS.register_module() -class Maxpool2d(nn.Module): - """A wrapper around nn.Maxpool2d(). - - Args: - kernel_size (int or tuple(int)): Kernel size for max pooling layer - stride (int or tuple(int)): Stride for max pooling layer - padding (int or tuple(int)): Padding for pooling layer - """ - - def __init__(self, - kernel_size: Union[int, Tuple[int]], - stride: Union[int, Tuple[int]], - padding: Union[int, Tuple[int]] = 0, - **kwargs) -> None: - super().__init__() - self.model = nn.MaxPool2d(kernel_size, stride, padding) - - def forward(self, x) -> torch.Tensor: - """Forward function. - Args: - x (Tensor): Input feature map. - - Returns: - Tensor: Output tensor after Maxpooling layer. - """ - return self.model(x) - - -@MODELS.register_module() -class GCAModule(nn.Module): - """GCAModule in MASTER. - - Args: - in_channels (int): Channels of input tensor. - ratio (float): Scale ratio of in_channels. - n_head (int): Numbers of attention head. - pooling_type (str): Spatial pooling type. Options are [``avg``, - ``att``]. - scale_attn (bool): Whether to scale the attention map. Defaults to - False. - fusion_type (str): Fusion type of input and context. Options are - [``channel_add``, ``channel_mul``, ``channel_concat``]. - """ - - def __init__(self, - in_channels: int, - ratio: float, - n_head: int, - pooling_type: str = 'att', - scale_attn: bool = False, - fusion_type: str = 'channel_add', - **kwargs) -> None: - super().__init__() - - assert pooling_type in ['avg', 'att'] - assert fusion_type in ['channel_add', 'channel_mul', 'channel_concat'] - - # in_channels must be divided by headers evenly - assert in_channels % n_head == 0 and in_channels >= 8 - - self.n_head = n_head - self.in_channels = in_channels - self.ratio = ratio - self.planes = int(in_channels * ratio) - self.pooling_type = pooling_type - self.fusion_type = fusion_type - self.scale_attn = scale_attn - self.single_header_inplanes = int(in_channels / n_head) - - if pooling_type == 'att': - self.conv_mask = nn.Conv2d( - self.single_header_inplanes, 1, kernel_size=1) - self.softmax = nn.Softmax(dim=2) - else: - self.avg_pool = nn.AdaptiveAvgPool2d(1) - - if fusion_type == 'channel_add': - self.channel_add_conv = nn.Sequential( - nn.Conv2d(self.in_channels, self.planes, kernel_size=1), - nn.LayerNorm([self.planes, 1, 1]), nn.ReLU(inplace=True), - nn.Conv2d(self.planes, self.in_channels, kernel_size=1)) - elif fusion_type == 'channel_concat': - self.channel_concat_conv = nn.Sequential( - nn.Conv2d(self.in_channels, self.planes, kernel_size=1), - nn.LayerNorm([self.planes, 1, 1]), nn.ReLU(inplace=True), - nn.Conv2d(self.planes, self.in_channels, kernel_size=1)) - # for concat - self.cat_conv = nn.Conv2d( - 2 * self.in_channels, self.in_channels, kernel_size=1) - elif fusion_type == 'channel_mul': - self.channel_mul_conv = nn.Sequential( - nn.Conv2d(self.in_channels, self.planes, kernel_size=1), - nn.LayerNorm([self.planes, 1, 1]), nn.ReLU(inplace=True), - nn.Conv2d(self.planes, self.in_channels, kernel_size=1)) - - def spatial_pool(self, x: torch.Tensor) -> torch.Tensor: - """Spatial pooling function. - - Args: - x (Tensor): Input feature map. - - Returns: - Tensor: Output tensor after spatial pooling. - """ - batch, channel, height, width = x.size() - if self.pooling_type == 'att': - # [N*headers, C', H , W] C = headers * C' - x = x.view(batch * self.n_head, self.single_header_inplanes, - height, width) - input_x = x - - # [N*headers, C', H * W] C = headers * C' - input_x = input_x.view(batch * self.n_head, - self.single_header_inplanes, height * width) - - # [N*headers, 1, C', H * W] - input_x = input_x.unsqueeze(1) - # [N*headers, 1, H, W] - context_mask = self.conv_mask(x) - # [N*headers, 1, H * W] - context_mask = context_mask.view(batch * self.n_head, 1, - height * width) - - # scale variance - if self.scale_attn and self.n_head > 1: - context_mask = context_mask / \ - torch.sqrt(self.single_header_inplanes) - - # [N*headers, 1, H * W] - context_mask = self.softmax(context_mask) - - # [N*headers, 1, H * W, 1] - context_mask = context_mask.unsqueeze(-1) - # [N*headers, 1, C', 1] = - # [N*headers, 1, C', H * W] * [N*headers, 1, H * W, 1] - context = torch.matmul(input_x, context_mask) - - # [N, headers * C', 1, 1] - context = context.view(batch, - self.n_head * self.single_header_inplanes, - 1, 1) - else: - # [N, C, 1, 1] - context = self.avg_pool(x) - - return context - - def forward(self, x: torch.Tensor) -> torch.Tensor: - """Forward function. - - Args: - x (Tensor): Input feature map. - - Returns: - Tensor: Output tensor after GCAModule. - """ - # [N, C, 1, 1] - context = self.spatial_pool(x) - out = x - - if self.fusion_type == 'channel_mul': - # [N, C, 1, 1] - channel_mul_term = torch.sigmoid(self.channel_mul_conv(context)) - out = out * channel_mul_term - elif self.fusion_type == 'channel_add': - # [N, C, 1, 1] - channel_add_term = self.channel_add_conv(context) - out = out + channel_add_term - else: - # [N, C, 1, 1] - channel_concat_term = self.channel_concat_conv(context) - - # use concat - _, C1, _, _ = channel_concat_term.shape - N, C2, H, W = out.shape - - out = torch.cat([out, - channel_concat_term.expand(-1, -1, H, W)], - dim=1) - out = self.cat_conv(out) - out = nn.functional.layer_norm(out, [self.in_channels, H, W]) - out = nn.functional.relu(out) - - return out diff --git a/spaces/Mysterykey/Orange/README.md b/spaces/Mysterykey/Orange/README.md deleted file mode 100644 index c2138aa4b429312f8c0176e38a6533b10e705317..0000000000000000000000000000000000000000 --- a/spaces/Mysterykey/Orange/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Orange -emoji: 🍊 -colorFrom: yellow -colorTo: yellow -sdk: docker -pinned: false -duplicated_from: null ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/NCTCMumbai/NCTC/models/research/cognitive_mapping_and_planning/scripts/__init__.py b/spaces/NCTCMumbai/NCTC/models/research/cognitive_mapping_and_planning/scripts/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/NMEX/rvc-hoyogame-v2/lib/infer_pack/modules.py b/spaces/NMEX/rvc-hoyogame-v2/lib/infer_pack/modules.py deleted file mode 100644 index c83289df7c79a4810dacd15c050148544ba0b6a9..0000000000000000000000000000000000000000 --- a/spaces/NMEX/rvc-hoyogame-v2/lib/infer_pack/modules.py +++ /dev/null @@ -1,522 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from lib.infer_pack.transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/NNDM/img-to-music/constants.py b/spaces/NNDM/img-to-music/constants.py deleted file mode 100644 index 86863d1b778d4c66f0d8e1e0b699f1bb937c1d50..0000000000000000000000000000000000000000 --- a/spaces/NNDM/img-to-music/constants.py +++ /dev/null @@ -1,9 +0,0 @@ -import numpy as np -import os - -MUBERT_LICENSE = os.environ.get('MUBERT_LICENSE') -MUBERT_TOKEN = os.environ.get('MUBERT_TOKEN') - -MUBERT_MODE = "loop" -MUBERT_TAGS_STRING = 'tribal,action,kids,neo-classic,run 130,pumped,jazz / funk,ethnic,dubtechno,reggae,acid jazz,liquidfunk,funk,witch house,tech house,underground,artists,mystical,disco,sensorium,r&b,agender,psychedelic trance / psytrance,peaceful,run 140,piano,run 160,setting,meditation,christmas,ambient,horror,cinematic,electro house,idm,bass,minimal,underscore,drums,glitchy,beautiful,technology,tribal house,country pop,jazz & funk,documentary,space,classical,valentines,chillstep,experimental,trap,new jack swing,drama,post-rock,tense,corporate,neutral,happy,analog,funky,spiritual,sberzvuk special,chill hop,dramatic,catchy,holidays,fitness 90,optimistic,orchestra,acid techno,energizing,romantic,minimal house,breaks,hyper pop,warm up,dreamy,dark,urban,microfunk,dub,nu disco,vogue,keys,hardcore,aggressive,indie,electro funk,beauty,relaxing,trance,pop,hiphop,soft,acoustic,chillrave / ethno-house,deep techno,angry,dance,fun,dubstep,tropical,latin pop,heroic,world music,inspirational,uplifting,atmosphere,art,epic,advertising,chillout,scary,spooky,slow ballad,saxophone,summer,erotic,jazzy,energy 100,kara mar,xmas,atmospheric,indie pop,hip-hop,yoga,reggaeton,lounge,travel,running,folk,chillrave & ethno-house,detective,darkambient,chill,fantasy,minimal techno,special,night,tropical house,downtempo,lullaby,meditative,upbeat,glitch hop,fitness,neurofunk,sexual,indie rock,future pop,jazz,cyberpunk,melancholic,happy hardcore,family / kids,synths,electric guitar,comedy,psychedelic trance & psytrance,edm,psychedelic rock,calm,zen,bells,podcast,melodic house,ethnic percussion,nature,heavy,bassline,indie dance,techno,drumnbass,synth pop,vaporwave,sad,8-bit,chillgressive,deep,orchestral,futuristic,hardtechno,nostalgic,big room,sci-fi,tutorial,joyful,pads,minimal 170,drill,ethnic 108,amusing,sleepy ambient,psychill,italo disco,lofi,house,acoustic guitar,bassline house,rock,k-pop,synthwave,deep house,electronica,gabber,nightlife,sport & fitness,road trip,celebration,electro,disco house,electronic' -MUBERT_TAGS = np.array(MUBERT_TAGS_STRING.split(',')) \ No newline at end of file diff --git a/spaces/Narrativa/semantic_news_search/README.md b/spaces/Narrativa/semantic_news_search/README.md deleted file mode 100644 index ad012ad17762bbe9e85446b622107ddc61d2354a..0000000000000000000000000000000000000000 --- a/spaces/Narrativa/semantic_news_search/README.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Semantic News Search -emoji: 📈 -colorFrom: gray -colorTo: blue -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`models`: _List[string]_ -HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space. -Will be parsed automatically from your code if not specified here. - -`datasets`: _List[string]_ -HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space. -Will be parsed automatically from your code if not specified here. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/Nee001/bing0/src/components/ui/select.tsx b/spaces/Nee001/bing0/src/components/ui/select.tsx deleted file mode 100644 index 77f12c2996f541b97663de4c9e20ab34d4ec2fac..0000000000000000000000000000000000000000 --- a/spaces/Nee001/bing0/src/components/ui/select.tsx +++ /dev/null @@ -1,123 +0,0 @@ -'use client' - -import * as React from 'react' -import * as SelectPrimitive from '@radix-ui/react-select' - -import { cn } from '@/lib/utils' -import { - IconArrowDown, - IconCheck, - IconChevronUpDown -} from '@/components/ui/icons' - -const Select = SelectPrimitive.Root - -const SelectGroup = SelectPrimitive.Group - -const SelectValue = SelectPrimitive.Value - -const SelectTrigger = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - {children} - - - - -)) -SelectTrigger.displayName = SelectPrimitive.Trigger.displayName - -const SelectContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, position = 'popper', ...props }, ref) => ( - - - - {children} - - - -)) -SelectContent.displayName = SelectPrimitive.Content.displayName - -const SelectLabel = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SelectLabel.displayName = SelectPrimitive.Label.displayName - -const SelectItem = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - - - - - {children} - -)) -SelectItem.displayName = SelectPrimitive.Item.displayName - -const SelectSeparator = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SelectSeparator.displayName = SelectPrimitive.Separator.displayName - -export { - Select, - SelectGroup, - SelectValue, - SelectTrigger, - SelectContent, - SelectLabel, - SelectItem, - SelectSeparator -} diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/model_parallel/modules/transformer_layer.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/model_parallel/modules/transformer_layer.py deleted file mode 100644 index 7ab53c6e5f12f15562717effb86ab8cb8d6b4fa3..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/model_parallel/modules/transformer_layer.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from fairseq.model_parallel.modules import ModelParallelMultiheadAttention -from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer - - -try: - from fairseq.model_parallel.megatron.mpu import ( - ColumnParallelLinear, - RowParallelLinear, - ) - - has_megatron_submodule = True -except (ImportError, ModuleNotFoundError): - has_megatron_submodule = False - - -class ModelParallelTransformerEncoderLayer(TransformerEncoderLayer): - """Encoder layer block over multiple gpus. - - See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details. - """ - - def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): - if q_noise > 0: - raise NotImplementedError - return ColumnParallelLinear(input_dim, output_dim, gather_output=False) - - def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): - if q_noise > 0: - raise NotImplementedError - return RowParallelLinear(input_dim, output_dim, input_is_parallel=True) - - def build_self_attention(self, embed_dim, args, **unused_kwargs): - return ModelParallelMultiheadAttention( - embed_dim, - args.encoder_attention_heads, - dropout=args.attention_dropout, - self_attention=True, - ) - - -class ModelParallelTransformerDecoderLayer(TransformerDecoderLayer): - """Decoder layer block. - - See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details. - """ - - def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): - if q_noise > 0: - raise NotImplementedError - return ColumnParallelLinear(input_dim, output_dim, gather_output=False) - - def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): - if q_noise > 0: - raise NotImplementedError - return RowParallelLinear(input_dim, output_dim, input_is_parallel=True) - - def build_self_attention(self, embed_dim, args, **unused_kwargs): - return ModelParallelMultiheadAttention( - embed_dim=embed_dim, - num_heads=args.decoder_attention_heads, - dropout=args.attention_dropout, - self_attention=not getattr(args, "cross_self_attention", False), - ) - - def build_encoder_attention(self, embed_dim, args, **unused_kwargs): - return ModelParallelMultiheadAttention( - embed_dim=embed_dim, - num_heads=args.decoder_attention_heads, - kdim=getattr(args, "encoder_embed_dim", None), - vdim=getattr(args, "encoder_embed_dim", None), - dropout=args.attention_dropout, - encoder_decoder_attention=True, - ) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_character_token_embedder.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_character_token_embedder.py deleted file mode 100644 index 24940ebd21a0e4465ca6052409353a3179e9cf6d..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_character_token_embedder.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import unittest - -import torch -from fairseq.data import Dictionary -from fairseq.modules import CharacterTokenEmbedder - - -class TestCharacterTokenEmbedder(unittest.TestCase): - def test_character_token_embedder(self): - vocab = Dictionary() - vocab.add_symbol("hello") - vocab.add_symbol("there") - - embedder = CharacterTokenEmbedder( - vocab, [(2, 16), (4, 32), (8, 64), (16, 2)], 64, 5, 2 - ) - - test_sents = [["hello", "unk", "there"], ["there"], ["hello", "there"]] - max_len = max(len(s) for s in test_sents) - input = torch.LongTensor(len(test_sents), max_len + 2).fill_(vocab.pad()) - for i in range(len(test_sents)): - input[i][0] = vocab.eos() - for j in range(len(test_sents[i])): - input[i][j + 1] = vocab.index(test_sents[i][j]) - input[i][j + 2] = vocab.eos() - embs = embedder(input) - - assert embs.size() == (len(test_sents), max_len + 2, 5) - self.assertAlmostEqual(embs[0][0], embs[1][0]) - self.assertAlmostEqual(embs[0][0], embs[0][-1]) - self.assertAlmostEqual(embs[0][1], embs[2][1]) - self.assertAlmostEqual(embs[0][3], embs[1][1]) - - embs.sum().backward() - assert embedder.char_embeddings.weight.grad is not None - - def assertAlmostEqual(self, t1, t2): - self.assertEqual(t1.size(), t2.size(), "size mismatch") - self.assertLess((t1 - t2).abs().max(), 1e-6) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/criss/sentence_retrieval/sentence_retrieval_tatoeba.sh b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/criss/sentence_retrieval/sentence_retrieval_tatoeba.sh deleted file mode 100644 index 0428d8bef9d426ac3e664cd281ce0b688f5f580f..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/criss/sentence_retrieval/sentence_retrieval_tatoeba.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# -source_lang=kk_KZ -target_lang=en_XX -MODEL=criss_checkpoints/criss.3rd.pt -SPM=criss_checkpoints/sentence.bpe.model -SPLIT=test -LANG_DICT=criss_checkpoints/lang_dict.txt -ENCODER_ANALYSIS=sentence_retrieval/encoder_analysis.py -SAVE_ENCODER=save_encoder.py -ENCODER_SAVE_ROOT=sentence_embeddings/$MODEL - - - -DATA_DIR=data_tmp -INPUT_DIR=$DATA_DIR/${source_lang}-${target_lang}-tatoeba -ENCODER_SAVE_DIR=${ENCODER_SAVE_ROOT}/${source_lang}-${target_lang} -mkdir -p $ENCODER_SAVE_DIR/${target_lang} -mkdir -p $ENCODER_SAVE_DIR/${source_lang} - -# Save encoder outputs for source sentences -python $SAVE_ENCODER \ - ${INPUT_DIR} \ - --path ${MODEL} \ - --task translation_multi_simple_epoch \ - --lang-dict ${LANG_DICT} \ - --gen-subset ${SPLIT} \ - --bpe 'sentencepiece' \ - --lang-pairs ${source_lang}-${target_lang} \ - -s ${source_lang} -t ${target_lang} \ - --sentencepiece-model ${SPM} \ - --remove-bpe 'sentencepiece' \ - --beam 1 \ - --lang-tok-style mbart \ - --encoder-save-dir ${ENCODER_SAVE_DIR}/${source_lang} - -# Save encoder outputs for target sentences -python $SAVE_ENCODER \ - ${INPUT_DIR} \ - --path ${MODEL} \ - --lang-dict ${LANG_DICT} \ - --task translation_multi_simple_epoch \ - --gen-subset ${SPLIT} \ - --bpe 'sentencepiece' \ - --lang-pairs ${target_lang}-${source_lang} \ - -t ${source_lang} -s ${target_lang} \ - --sentencepiece-model ${SPM} \ - --remove-bpe 'sentencepiece' \ - --beam 1 \ - --lang-tok-style mbart \ - --encoder-save-dir ${ENCODER_SAVE_DIR}/${target_lang} - -# Analyze sentence retrieval accuracy -python $ENCODER_ANALYSIS --langs "${source_lang},${target_lang}" ${ENCODER_SAVE_DIR} diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/continuation_eval.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/continuation_eval.py deleted file mode 100644 index 72b92a341dcd1b82035af72b8a6b4edc65783ecc..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/continuation_eval.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -from collections import defaultdict -import numpy as np -from misc.bleu_utils import sentence_bleu -import json -import warnings - - -def get_args(): - import argparse - - parser = argparse.ArgumentParser("Tool to calculate Continuation-BLEU2") - parser.add_argument('--asr-transcript', type=str, - help='Path to the transcript file.') - parser.add_argument('--prompts-description', type=str, - help='Path to the ground-truth continuation') - parser.add_argument('--manifest', type=str, required=True) - parser.add_argument('--take-shortest', type=int, default=1000) - - args = parser.parse_args() - - return args - - -def main(): - # NLTK produces warnings - warnings.filterwarnings("ignore") - - args = get_args() - - with open(args.prompts_description, 'r') as fin: - original_continuations = json.loads(fin.read()) - - sequence2length = [(k, v[0]) for k, v in original_continuations.items()] - assert all(float(v) >= 6.0 for (_, v) in sequence2length) # 6 seconds - - sequence2length.sort(key=lambda x: x[1]) - to_take = set(v[0] for v in sequence2length[:args.take_shortest]) - - with open(args.manifest, 'r') as fin: - fin.readline() - - linenum2file = dict([ - (i, l.split("__")[0]) for (i, l) in enumerate(fin) - ]) - - max_files = max(linenum2file.keys()) - continuations = defaultdict(list) - - mean_length_after = 0 - n_examples = 0 - - with open(args.asr_transcript, 'r') as fin: - for line in fin: - n_examples += 1 - line = line.split() - sequence_id = int(line[-1].split('-')[1][:-1]) - - assert sequence_id <= max_files - - sequence_name = linenum2file[sequence_id] - - continuations[sequence_name].append(line[:-1]) - mean_length_after += len(line) - - mean_length_after /= n_examples - print(f'Mean length of continuations, in words: {mean_length_after}') - metric_values = [] - - mean_ground_truth_words = 0 - n_examples = 0 - n_candidates = 0 - - for k, candidates in continuations.items(): - if k not in to_take: - continue - - n_examples += 1 - - ground_truth = original_continuations[k][1].split() - n_candidates += len(candidates) - bleu = sentence_bleu(candidates, ground_truth, weights=( - 0.5, 0.5), no_length_penalty=True, averaging_mode="geometric") - mean_ground_truth_words += len(ground_truth) - - metric_values.append(bleu) - - n = len(metric_values) - print( - f'Median BLEU over {n} examples: {np.median(metric_values)} +- {np.std(metric_values) / np.sqrt(n)}') - - -if __name__ == '__main__': - main() diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/encoders/moses_tokenizer.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/encoders/moses_tokenizer.py deleted file mode 100644 index e236dad167a037a8ed95f7fc8292b27b10d580b0..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/encoders/moses_tokenizer.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from dataclasses import dataclass, field - -from fairseq.data.encoders import register_tokenizer -from fairseq.dataclass import FairseqDataclass - - -@dataclass -class MosesTokenizerConfig(FairseqDataclass): - source_lang: str = field(default="en", metadata={"help": "source language"}) - target_lang: str = field(default="en", metadata={"help": "target language"}) - moses_no_dash_splits: bool = field( - default=False, metadata={"help": "don't apply dash split rules"} - ) - moses_no_escape: bool = field( - default=False, - metadata={"help": "don't perform HTML escaping on apostrophe, quotes, etc."}, - ) - - -@register_tokenizer("moses", dataclass=MosesTokenizerConfig) -class MosesTokenizer(object): - def __init__(self, cfg: MosesTokenizerConfig): - self.cfg = cfg - - try: - from sacremoses import MosesTokenizer, MosesDetokenizer - - self.tok = MosesTokenizer(cfg.source_lang) - self.detok = MosesDetokenizer(cfg.target_lang) - except ImportError: - raise ImportError( - "Please install Moses tokenizer with: pip install sacremoses" - ) - - def encode(self, x: str) -> str: - return self.tok.tokenize( - x, - aggressive_dash_splits=(not self.cfg.moses_no_dash_splits), - return_str=True, - escape=(not self.cfg.moses_no_escape), - ) - - def decode(self, x: str) -> str: - return self.detok.detokenize(x.split()) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/ema/ema.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/ema/ema.py deleted file mode 100644 index 010b60ba2fd766340d2c5b8ba96f9e57c6fe25b5..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/ema/ema.py +++ /dev/null @@ -1,200 +0,0 @@ -#!/usr/bin/env python3 - -""" -This module has the EMA class used to store a copy of the exponentially decayed -model params. - -Typical usage of EMA class involves initializing an object using an existing -model (random or from a seed model) and setting the config like ema_decay, -ema_start_update which determine how the EMA model is updated. After every -update of the model i.e. at the end of the train_step, the EMA should be updated -by passing the new model to the EMA.step function. The EMA model state dict -can be stored in the extra state under the key of "ema" and dumped -into a checkpoint and loaded. The EMA object can be passed to tasks -by setting task.uses_ema property. -EMA is a smoothed/ensemble model which might have better performance -when used for inference or further fine-tuning. EMA class has a -reverse function to load the EMA params into a model and use it -like a regular model. -""" - -import copy -import logging - -import torch -from fairseq import checkpoint_utils - - -class EMA(object): - """Exponential Moving Average of Fairseq Models - EMA keeps a copy of the exponentially decayed model params. - The set of params should include both gradient-descent and - non-gradient descent params, such as batch mean/var and buffers. - This is a modified implementation of - the open source code in https://github.com/zhawe01/fairseq-gec.git, - and internal source code in - fbcode/mobile-vision/projects/classification_pytorch/lib/utils/model_ema.py. - - Similar to TF EMA. - https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage. - EMA provides a averaged and smoothed set of model weights, and has been shown to - improve vision models. EMA class does all necessary functions to update, reload, - or init EMA methods. - - EMA object is initialized from an arbitrary model. By default, it is stored in - the same device (unless device specified at initialization) and with the - same precision as the model (unless ema_fp32 is True). ema_fp32 is recommended. - This stores the EMA parameters in fp32 only for the EMA update step, and - is used at the default precision otherwise. - EMA is usually enabled using EMAConfig with store_ema=True. Some important - parameters to configure EMA are - 1) ema_decay - The decay of EMA - 2) ema_update_freq - EMA is updated every this many model updates. - 3) ema_start_update - Start EMA update after this many model updates [default 0] - - Key methods: - 1) step - One update of EMA using new model - 2) restore - Update EMA from a state dict - 3) reverse - Load EMA into a model - 4) get_decay, _set_decay - Used to get or set the decay. Note _set_decay is - called from step. - 5) build_fp32_params - Used to initialize or update the fp32 copy of EMA params. - Note this is enabled only when ema_fp32=True - """ - - def __init__(self, model, config, device=None): - """ - @param model model to initialize the EMA with - @param config EMAConfig object with configuration like - ema_decay, ema_update_freq, ema_fp32 - @param device If provided, copy EMA to this device (e.g. gpu). - Otherwise EMA is in the same device as the model. - """ - - self.decay = config.ema_decay - self.model = copy.deepcopy(model) - self.model.requires_grad_(False) - self.config = config - self.fp32_params = {} - - if self.config.ema_seed_model is not None: - state = checkpoint_utils.load_ema_from_checkpoint(self.config.ema_seed_model) - self.model.load_state_dict(state["model"], strict=True) - - if device is not None: - logging.info(f"Copying EMA model to device {device}") - self.model = self.model.to(device=device) - - if self.config.ema_fp32: - self.build_fp32_params() - - self.update_freq_counter = 0 - - def get_model(self): - return self.model - - def build_fp32_params(self, state_dict=None): - """ - Store a copy of the EMA params in fp32. - If state dict is passed, the EMA params is copied from - the provided state dict. Otherwise, it is copied from the - current EMA model parameters. - """ - if not self.config.ema_fp32: - raise RuntimeError( - "build_fp32_params should not be called if ema_fp32=False. " - "Use ema_fp32=True if this is really intended." - ) - - if state_dict is None: - state_dict = self.model.state_dict() - - def _to_float(t): - return t.float() if torch.is_floating_point(t) else t - - # for non-float params (like registered symbols), they are copied into this dict and covered in each update - for param_key in state_dict: - if param_key in self.fp32_params: - self.fp32_params[param_key].copy_(state_dict[param_key]) - else: - self.fp32_params[param_key] = _to_float(state_dict[param_key]) - - def restore(self, state_dict, build_fp32_params=False): - """ Load data from a model spec into EMA model """ - self.model.load_state_dict(state_dict, strict=False) - if build_fp32_params: - self.build_fp32_params(state_dict) - - def _set_decay(self, decay): - self.decay = decay - - def get_decay(self): - return self.decay - - def _step_internal(self, new_model, updates=None): - """ One update of the EMA model based on new model weights """ - decay = self.decay - - ema_state_dict = {} - ema_params = self.fp32_params if self.config.ema_fp32 else self.model.state_dict() - for key, param in new_model.state_dict().items(): - try: - ema_param = ema_params[key] - except KeyError: - ema_param = param.float().clone() if param.ndim == 1 else copy.deepcopy(param) - - if param.shape != ema_param.shape: - raise ValueError( - "incompatible tensor shapes between model param and ema param" - + "{} vs. {}".format(param.shape, ema_param.shape) - ) - if "version" in key: - # Do not decay a model.version pytorch param - continue - - # for non-float params (like registered symbols), they are covered in each update - if not torch.is_floating_point(ema_param): - if ema_param.dtype != param.dtype: - raise ValueError( - "incompatible tensor dtypes between model param and ema param" - + "{} vs. {}".format(param.dtype, ema_param.dtype) - ) - ema_param.copy_(param) - else: - ema_param.mul_(decay) - ema_param.add_(param.to(dtype=ema_param.dtype), alpha=1-decay) - ema_state_dict[key] = ema_param - self.restore(ema_state_dict, build_fp32_params=False) - - def step(self, new_model, updates=None): - """ - One update of EMA which is done every self.config.ema_update_freq - updates of the model. - - @param updates The current number of model updates done. - Decay is set of 0 if model updates < ema_start_update, which means - the model will be simply copied over to the EMA. - When model updates >= ema_start_updates, then EMA is updated with - a decay of self.config.ema_decay. - """ - self._set_decay( - 0 - if updates is not None - and updates < self.config.ema_start_update - else self.config.ema_decay - ) - if updates is not None and self.config.ema_update_freq > 1: - self.update_freq_counter += 1 - if self.update_freq_counter >= self.config.ema_update_freq: - self._step_internal(new_model, updates) - self.update_freq_counter = 0 - else: - self._step_internal(new_model, updates) - - def reverse(self, model): - """ - Load the model parameters from EMA model. - Useful for inference or fine-tuning from the EMA model. - """ - model.load_state_dict(self.model.state_dict(), strict=False) - return model diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/nat/insertion_transformer.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/nat/insertion_transformer.py deleted file mode 100644 index bc28000f59a3b9e8098f9fe710cc8335d39eea3e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/nat/insertion_transformer.py +++ /dev/null @@ -1,280 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import numpy as np -import torch -import torch.nn.functional as F -from fairseq.models import register_model, register_model_architecture -from fairseq.models.nat import ( - FairseqNATModel, - LevenshteinTransformerDecoder, - LevenshteinTransformerModel, - ensemble_decoder, -) -from fairseq.models.transformer import Linear -from fairseq.modules.transformer_sentence_encoder import init_bert_params -from fairseq.utils import new_arange - - -class NegativeDistanceScore(object): - def __init__(self): - - # pre-compute some values - self.scores = {} - - self.scores[0.5] = self.compute_score_full(50, 0.5) - self.scores[1.0] = self.compute_score_full(50, 1.0) - self.scores[2.0] = self.compute_score_full(50, 2.0) - - def __call__(self, i, L, tau): - if (tau is None) or (tau > 1000): - return 1 / L - - if tau in self.scores: - if L < self.scores[tau].shape[0]: - return self.scores[tau][L - 1, i] - return self.compute_score(L, tau)[i] - - def compute_score(self, L, tau): - s = np.array([-abs(L / 2 - i) / tau for i in range(L)]) - s = np.exp(s - s.max()) - return s / s.sum() - - def compute_score_full(self, L, tau): - s = -abs(np.arange(0, L - 1)[:, None] / 2 - np.arange(L)[None, :]) / tau - s = np.tril(s, 0) + np.triu(s - float("inf"), 1) - s = np.exp(s - s.max(1, keepdims=True)) - return s / s.sum(1, keepdims=True) - - -neg_scorer = NegativeDistanceScore() - - -def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx, vocab_size, tau=None): - try: - from fairseq import libnat - except ImportError as e: - import sys - - sys.stderr.write("ERROR: missing libnat. run `pip install --editable .`\n") - raise e - - B = in_tokens.size(0) - T = in_tokens.size(1) - V = vocab_size - - with torch.cuda.device_of(in_tokens): - in_tokens_list = [ - [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) - ] - out_tokens_list = [ - [t for t in s if t != padding_idx] - for i, s in enumerate(out_tokens.tolist()) - ] - - full_labels = libnat.suggested_ed2_path( - in_tokens_list, out_tokens_list, padding_idx - ) - insert_labels = [a[:-1] for a in full_labels] - - # numericalize1 - insert_label_tensors = in_tokens.new_zeros(B * (T - 1) * V).float() - insert_index, insert_labels = zip( - *[ - (w + (j + i * (T - 1)) * V, neg_scorer(k, len(label), tau)) - for i, labels in enumerate(insert_labels) - for j, label in enumerate(labels[1:-1]) - for k, w in enumerate(label) - ] - ) # HACK 1:-1 - insert_index, insert_labels = [ - torch.tensor(list(a), device=in_tokens.device) - for a in [insert_index, insert_labels] - ] - insert_label_tensors.scatter_(0, insert_index.long(), insert_labels) - insert_label_tensors = insert_label_tensors.view(B, T - 1, V) - - return insert_label_tensors - - -def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, padding_idx): - - padding_masks = in_tokens[:, 1:].eq(padding_idx) - word_ins_scores.masked_fill_(padding_masks, 0.0) - word_ins_pred.masked_fill_(padding_masks, padding_idx) - - in_coords = new_arange(in_tokens).type_as(in_scores) - - # shift all padding predictions to infinite - out_coords = (in_coords[:, 1:] - 0.5).masked_fill( - word_ins_pred.eq(padding_idx), float("inf") - ) - out_coords = torch.cat([in_coords, out_coords], 1).sort(-1)[1] - out_tokens = torch.cat([in_tokens, word_ins_pred], 1).gather(1, out_coords) - out_scores = torch.cat([in_scores, word_ins_scores], 1).gather(1, out_coords) - return out_tokens, out_scores - - -@register_model("insertion_transformer") -class InsertionTransformerModel(LevenshteinTransformerModel): - def __init__(self, args, encoder, decoder): - super().__init__(args, encoder, decoder) - - @staticmethod - def add_args(parser): - FairseqNATModel.add_args(parser) - parser.add_argument("--label-tau", default=None, type=float) - - @classmethod - def build_decoder(cls, args, tgt_dict, embed_tokens): - decoder = InsertionTransformerDecoder(args, tgt_dict, embed_tokens) - if getattr(args, "apply_bert_init", False): - decoder.apply(init_bert_params) - return decoder - - def forward( - self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs - ): - - assert tgt_tokens is not None, "forward function only supports training." - - # encoding - encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) - - # generate training labels for insertion - word_ins_out = self.decoder.forward_word_ins( - normalize=False, - prev_output_tokens=prev_output_tokens, - encoder_out=encoder_out, - ) - - word_ins_tgt = _get_ins_targets( - prev_output_tokens, - tgt_tokens, - self.pad, - self.unk, - len(self.tgt_dict), - tau=self.decoder.label_tau, - ).type_as(word_ins_out) - word_ins_masks = prev_output_tokens[:, 1:].ne(self.pad) - - return { - "word_ins": { - "out": word_ins_out, - "tgt": word_ins_tgt, - "mask": word_ins_masks, - "ls": self.args.label_smoothing, - "nll_loss": True, - } - } - - def forward_decoder( - self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs - ): - - output_tokens = decoder_out.output_tokens - output_scores = decoder_out.output_scores - history = decoder_out.history - - # TODO: decoding for InsertionTransformer - word_ins_score = self.decoder.forward_word_ins( - normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out - ) - - if eos_penalty > 0.0: - word_ins_score[:, :, self.pad] -= eos_penalty - word_ins_score, word_ins_pred = word_ins_score.max(-1) - output_tokens, output_scores = _apply_ins_words( - output_tokens, output_scores, word_ins_pred, word_ins_score, self.pad - ) - - # delete some unnecessary paddings - cut_off = output_tokens.ne(self.pad).sum(1).max() - output_tokens = output_tokens[:, :cut_off] - output_scores = output_scores[:, :cut_off] - - if history is not None: - history.append(output_tokens.clone()) - - return decoder_out._replace( - output_tokens=output_tokens, - output_scores=output_scores, - attn=None, - history=history, - ) - - -class InsertionTransformerDecoder(LevenshteinTransformerDecoder): - def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): - # use the TransformerDecoder's __init__ - super(LevenshteinTransformerDecoder, self).__init__( - args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn - ) - - self.dictionary = dictionary - self.bos = dictionary.bos() - self.unk = dictionary.unk() - self.eos = dictionary.eos() - self.pool_out = Linear(self.output_embed_dim * 2, self.output_embed_dim) - - self.label_tau = getattr(args, "label_tau", None) - - @ensemble_decoder - def forward_word_ins(self, normalize, encoder_out, prev_output_tokens): - features = self.extract_features(prev_output_tokens, encoder_out=encoder_out)[0] - features = self.pool_out( - torch.cat([features[:, :-1, :], features[:, 1:, :]], 2) - ) - decoder_out = self.output_layer(features) - return F.log_softmax(decoder_out, -1) if normalize else decoder_out - - def forward_mask_ins(self, *args, **kwargs): - raise NotImplementedError - - def forward_word_del(self, *args, **kwargs): - raise NotImplementedError - - -@register_model_architecture("insertion_transformer", "insertion_transformer") -def insertion_base_architecture(args): - args.encoder_embed_path = getattr(args, "encoder_embed_path", None) - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) - args.encoder_layers = getattr(args, "encoder_layers", 6) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) - args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) - args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) - args.decoder_embed_path = getattr(args, "decoder_embed_path", None) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) - args.decoder_ffn_embed_dim = getattr( - args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim - ) - args.decoder_layers = getattr(args, "decoder_layers", 6) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) - args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) - args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) - args.attention_dropout = getattr(args, "attention_dropout", 0.0) - args.activation_dropout = getattr(args, "activation_dropout", 0.0) - args.activation_fn = getattr(args, "activation_fn", "relu") - args.dropout = getattr(args, "dropout", 0.1) - args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) - args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) - args.share_decoder_input_output_embed = getattr( - args, "share_decoder_input_output_embed", False - ) - args.share_all_embeddings = getattr(args, "share_all_embeddings", False) - args.no_token_positional_embeddings = getattr( - args, "no_token_positional_embeddings", False - ) - args.adaptive_input = getattr(args, "adaptive_input", False) - args.apply_bert_init = getattr(args, "apply_bert_init", False) - - args.decoder_output_dim = getattr( - args, "decoder_output_dim", args.decoder_embed_dim - ) - args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) - - # special for insertion transformer - args.label_tau = getattr(args, "label_tau", None) diff --git a/spaces/PKUWilliamYang/VToonify/vtoonify/model/raft/core/corr.py b/spaces/PKUWilliamYang/VToonify/vtoonify/model/raft/core/corr.py deleted file mode 100644 index 40214aa5e6f0392a732eacab9d9cb0cbfb4669f3..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/VToonify/vtoonify/model/raft/core/corr.py +++ /dev/null @@ -1,91 +0,0 @@ -import torch -import torch.nn.functional as F -from model.raft.core.utils.utils import bilinear_sampler, coords_grid - -try: - import alt_cuda_corr -except: - # alt_cuda_corr is not compiled - pass - - -class CorrBlock: - def __init__(self, fmap1, fmap2, num_levels=4, radius=4): - self.num_levels = num_levels - self.radius = radius - self.corr_pyramid = [] - - # all pairs correlation - corr = CorrBlock.corr(fmap1, fmap2) - - batch, h1, w1, dim, h2, w2 = corr.shape - corr = corr.reshape(batch*h1*w1, dim, h2, w2) - - self.corr_pyramid.append(corr) - for i in range(self.num_levels-1): - corr = F.avg_pool2d(corr, 2, stride=2) - self.corr_pyramid.append(corr) - - def __call__(self, coords): - r = self.radius - coords = coords.permute(0, 2, 3, 1) - batch, h1, w1, _ = coords.shape - - out_pyramid = [] - for i in range(self.num_levels): - corr = self.corr_pyramid[i] - dx = torch.linspace(-r, r, 2*r+1, device=coords.device) - dy = torch.linspace(-r, r, 2*r+1, device=coords.device) - delta = torch.stack(torch.meshgrid(dy, dx), axis=-1) - - centroid_lvl = coords.reshape(batch*h1*w1, 1, 1, 2) / 2**i - delta_lvl = delta.view(1, 2*r+1, 2*r+1, 2) - coords_lvl = centroid_lvl + delta_lvl - - corr = bilinear_sampler(corr, coords_lvl) - corr = corr.view(batch, h1, w1, -1) - out_pyramid.append(corr) - - out = torch.cat(out_pyramid, dim=-1) - return out.permute(0, 3, 1, 2).contiguous().float() - - @staticmethod - def corr(fmap1, fmap2): - batch, dim, ht, wd = fmap1.shape - fmap1 = fmap1.view(batch, dim, ht*wd) - fmap2 = fmap2.view(batch, dim, ht*wd) - - corr = torch.matmul(fmap1.transpose(1,2), fmap2) - corr = corr.view(batch, ht, wd, 1, ht, wd) - return corr / torch.sqrt(torch.tensor(dim).float()) - - -class AlternateCorrBlock: - def __init__(self, fmap1, fmap2, num_levels=4, radius=4): - self.num_levels = num_levels - self.radius = radius - - self.pyramid = [(fmap1, fmap2)] - for i in range(self.num_levels): - fmap1 = F.avg_pool2d(fmap1, 2, stride=2) - fmap2 = F.avg_pool2d(fmap2, 2, stride=2) - self.pyramid.append((fmap1, fmap2)) - - def __call__(self, coords): - coords = coords.permute(0, 2, 3, 1) - B, H, W, _ = coords.shape - dim = self.pyramid[0][0].shape[1] - - corr_list = [] - for i in range(self.num_levels): - r = self.radius - fmap1_i = self.pyramid[0][0].permute(0, 2, 3, 1).contiguous() - fmap2_i = self.pyramid[i][1].permute(0, 2, 3, 1).contiguous() - - coords_i = (coords / 2**i).reshape(B, 1, H, W, 2).contiguous() - corr, = alt_cuda_corr.forward(fmap1_i, fmap2_i, coords_i, r) - corr_list.append(corr.squeeze(1)) - - corr = torch.stack(corr_list, dim=1) - corr = corr.reshape(B, -1, H, W) - return corr / torch.sqrt(torch.tensor(dim).float()) diff --git a/spaces/PSLD/PSLD/diffusion-posterior-sampling/bkse/options/options.py b/spaces/PSLD/PSLD/diffusion-posterior-sampling/bkse/options/options.py deleted file mode 100644 index 1eeefa691e32857cd565e252e2049a11dde5e68b..0000000000000000000000000000000000000000 --- a/spaces/PSLD/PSLD/diffusion-posterior-sampling/bkse/options/options.py +++ /dev/null @@ -1,122 +0,0 @@ -import logging -import os -import os.path as osp - -import yaml -from utils.util import OrderedYaml - - -Loader, Dumper = OrderedYaml() - - -def parse(opt_path, is_train=True): - with open(opt_path, mode="r") as f: - opt = yaml.load(f, Loader=Loader) - # export CUDA_VISIBLE_DEVICES - gpu_list = ",".join(str(x) for x in opt["gpu_ids"]) - os.environ["CUDA_VISIBLE_DEVICES"] = gpu_list - print("export CUDA_VISIBLE_DEVICES=" + gpu_list) - - opt["is_train"] = is_train - if opt["distortion"] == "sr": - scale = opt["scale"] - - # datasets - for phase, dataset in opt["datasets"].items(): - phase = phase.split("_")[0] - dataset["phase"] = phase - if opt["distortion"] == "sr": - dataset["scale"] = scale - is_lmdb = False - if dataset.get("dataroot_GT", None) is not None: - dataset["dataroot_GT"] = osp.expanduser(dataset["dataroot_GT"]) - if dataset["dataroot_GT"].endswith("lmdb"): - is_lmdb = True - if dataset.get("dataroot_LQ", None) is not None: - dataset["dataroot_LQ"] = osp.expanduser(dataset["dataroot_LQ"]) - if dataset["dataroot_LQ"].endswith("lmdb"): - is_lmdb = True - dataset["data_type"] = "lmdb" if is_lmdb else "img" - if dataset["mode"].endswith("mc"): # for memcached - dataset["data_type"] = "mc" - dataset["mode"] = dataset["mode"].replace("_mc", "") - - # path - for key, path in opt["path"].items(): - if path and key in opt["path"] and key != "strict_load": - opt["path"][key] = osp.expanduser(path) - opt["path"]["root"] = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir)) - if is_train: - experiments_root = osp.join(opt["path"]["root"], "experiments", opt["name"]) - opt["path"]["experiments_root"] = experiments_root - opt["path"]["models"] = osp.join(experiments_root, "models") - opt["path"]["training_state"] = osp.join(experiments_root, "training_state") - opt["path"]["log"] = experiments_root - opt["path"]["val_images"] = osp.join(experiments_root, "val_images") - - # change some options for debug mode - if "debug" in opt["name"]: - opt["train"]["val_freq"] = 8 - opt["logger"]["print_freq"] = 1 - opt["logger"]["save_checkpoint_freq"] = 8 - else: # test - results_root = osp.join(opt["path"]["root"], "results", opt["name"]) - opt["path"]["results_root"] = results_root - opt["path"]["log"] = results_root - - # network - if opt["distortion"] == "sr": - opt["network_G"]["scale"] = scale - - return opt - - -def dict2str(opt, indent_l=1): - """dict to string for logger""" - msg = "" - for k, v in opt.items(): - if isinstance(v, dict): - msg += " " * (indent_l * 2) + k + ":[\n" - msg += dict2str(v, indent_l + 1) - msg += " " * (indent_l * 2) + "]\n" - else: - msg += " " * (indent_l * 2) + k + ": " + str(v) + "\n" - return msg - - -class NoneDict(dict): - def __missing__(self, key): - return None - - -# convert to NoneDict, which return None for missing key. -def dict_to_nonedict(opt): - if isinstance(opt, dict): - new_opt = dict() - for key, sub_opt in opt.items(): - new_opt[key] = dict_to_nonedict(sub_opt) - return NoneDict(**new_opt) - elif isinstance(opt, list): - return [dict_to_nonedict(sub_opt) for sub_opt in opt] - else: - return opt - - -def check_resume(opt, resume_iter): - """Check resume states and pretrain_model paths""" - logger = logging.getLogger("base") - if opt["path"]["resume_state"]: - if ( - opt["path"].get("pretrain_model_G", None) is not None - or opt["path"].get("pretrain_model_D", None) is not None - ): - logger.warning( - "pretrain_model path will be ignored \ - when resuming training." - ) - - opt["path"]["pretrain_model_G"] = osp.join(opt["path"]["models"], "{}_G.pth".format(resume_iter)) - logger.info("Set [pretrain_model_G] to " + opt["path"]["pretrain_model_G"]) - if "gan" in opt["model"]: - opt["path"]["pretrain_model_D"] = osp.join(opt["path"]["models"], "{}_D.pth".format(resume_iter)) - logger.info("Set [pretrain_model_D] to " + opt["path"]["pretrain_model_D"]) diff --git a/spaces/PSLD/PSLD/diffusion-posterior-sampling/bkse/scripts/download_dataset.py b/spaces/PSLD/PSLD/diffusion-posterior-sampling/bkse/scripts/download_dataset.py deleted file mode 100644 index 5d7086463abb2eeb777b1b4769604e2a79a67d88..0000000000000000000000000000000000000000 --- a/spaces/PSLD/PSLD/diffusion-posterior-sampling/bkse/scripts/download_dataset.py +++ /dev/null @@ -1,72 +0,0 @@ -import argparse -import os -import os.path as osp - -import requests - - -def download_file_from_google_drive(file_id, destination): - os.makedirs(osp.dirname(destination), exist_ok=True) - URL = "https://docs.google.com/uc?export=download" - - session = requests.Session() - - response = session.get(URL, params={"id": file_id}, stream=True) - token = get_confirm_token(response) - - if token: - params = {"id": file_id, "confirm": token} - response = session.get(URL, params=params, stream=True) - - save_response_content(response, destination) - - -def get_confirm_token(response): - for key, value in response.cookies.items(): - if key.startswith("download_warning"): - return value - - return None - - -def save_response_content(response, destination): - CHUNK_SIZE = 32768 - - with open(destination, "wb") as f: - for chunk in response.iter_content(CHUNK_SIZE): - if chunk: # filter out keep-alive new chunks - f.write(chunk) - - -if __name__ == "__main__": - dataset_ids = { - "GOPRO_Large": "1H0PIXvJH4c40pk7ou6nAwoxuR4Qh_Sa2", - "train_sharp": "1YLksKtMhd2mWyVSkvhDaDLWSc1qYNCz-", - "train_blur": "1Be2cgzuuXibcqAuJekDgvHq4MLYkCgR8", - "val_sharp": "1MGeObVQ1-Z29f-myDP7-8c3u0_xECKXq", - "val_blur": "1N8z2yD0GDWmh6U4d4EADERtcUgDzGrHx", - "test_blur": "1dr0--ZBKqr4P1M8lek6JKD1Vd6bhhrZT", - } - - parser = argparse.ArgumentParser( - description="Download REDS dataset from google drive to current folder", allow_abbrev=False - ) - - parser.add_argument("--REDS_train_sharp", action="store_true", help="download REDS train_sharp.zip") - parser.add_argument("--REDS_train_blur", action="store_true", help="download REDS train_blur.zip") - parser.add_argument("--REDS_val_sharp", action="store_true", help="download REDS val_sharp.zip") - parser.add_argument("--REDS_val_blur", action="store_true", help="download REDS val_blur.zip") - parser.add_argument("--GOPRO", action="store_true", help="download GOPRO_Large.zip") - - args = parser.parse_args() - - if args.REDS_train_sharp: - download_file_from_google_drive(dataset_ids["train_sharp"], "REDS/train_sharp.zip") - if args.REDS_train_blur: - download_file_from_google_drive(dataset_ids["train_blur"], "REDS/train_blur.zip") - if args.REDS_val_sharp: - download_file_from_google_drive(dataset_ids["val_sharp"], "REDS/val_sharp.zip") - if args.REDS_val_blur: - download_file_from_google_drive(dataset_ids["val_blur"], "REDS/val_blur.zip") - if args.GOPRO: - download_file_from_google_drive(dataset_ids["GOPRO_Large"], "GOPRO/GOPRO.zip") diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/scripts/punify.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/scripts/punify.go deleted file mode 100644 index eeb847548ced83cc4bfeb24c03c8685ee1f82762..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/scripts/punify.go and /dev/null differ diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/output-lib.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/output-lib.go deleted file mode 100644 index ac916ce068641e6dd71ebf0e13d85c89bd484b8b..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/output-lib.go and /dev/null differ diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/time-signature.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/time-signature.go deleted file mode 100644 index 8f6e5392b439d485dc8f57b431b7bc9e52e5087a..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/time-signature.go and /dev/null differ diff --git a/spaces/PaulHilders/CLIPGroundingExplainability/clip_grounding/utils/image.py b/spaces/PaulHilders/CLIPGroundingExplainability/clip_grounding/utils/image.py deleted file mode 100644 index 0406e52a1bc76cf1432205934eff84e0f8d43dd1..0000000000000000000000000000000000000000 --- a/spaces/PaulHilders/CLIPGroundingExplainability/clip_grounding/utils/image.py +++ /dev/null @@ -1,46 +0,0 @@ -"""Image operations.""" -from copy import deepcopy -from PIL import Image - - -def center_crop(im: Image): - width, height = im.size - new_width = width if width < height else height - new_height = height if height < width else width - - left = (width - new_width)/2 - top = (height - new_height)/2 - right = (width + new_width)/2 - bottom = (height + new_height)/2 - - # Crop the center of the image - im = im.crop((left, top, right, bottom)) - - return im - - -def pad_to_square(im: Image, color=(0, 0, 0)): - im = deepcopy(im) - width, height = im.size - - vert_pad = (max(width, height) - height) // 2 - hor_pad = (max(width, height) - width) // 2 - - if len(im.mode) == 3: - color = (0, 0, 0) - elif len(im.mode) == 1: - color = 0 - else: - raise ValueError(f"Image mode not supported. Image has {im.mode} channels.") - - return add_margin(im, vert_pad, hor_pad, vert_pad, hor_pad, color=color) - - -def add_margin(pil_img, top, right, bottom, left, color=(0, 0, 0)): - """Ref: https://note.nkmk.me/en/python-pillow-add-margin-expand-canvas/""" - width, height = pil_img.size - new_width = width + right + left - new_height = height + top + bottom - result = Image.new(pil_img.mode, (new_width, new_height), color) - result.paste(pil_img, (left, top)) - return result diff --git a/spaces/Paulraj916/paulraj916/scrapTs.py b/spaces/Paulraj916/paulraj916/scrapTs.py deleted file mode 100644 index 04b6c8b9d9b7a55a5a51c66f9ee596d3a608c3ae..0000000000000000000000000000000000000000 --- a/spaces/Paulraj916/paulraj916/scrapTs.py +++ /dev/null @@ -1,50 +0,0 @@ -import os -import requests -from urllib.parse import urljoin -from bs4 import BeautifulSoup - -class ScrapTs: - def __init__(self, url): - self.url = url - - def extract_and_save_typescript(self): - try: - # Send an HTTP GET request to the webpage and get the HTML content - response = requests.get(self.url) - response.raise_for_status() - html_content = response.text - - # Parse the HTML content using BeautifulSoup - soup = BeautifulSoup(html_content, 'html.parser') - - # Find all - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/spaces/merve/measuring-fairness/public/measuring-fairness/mini.js b/spaces/merve/measuring-fairness/public/measuring-fairness/mini.js deleted file mode 100644 index 51e81b909d66e7a0b45f54b318a0b88a95fdb217..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/public/measuring-fairness/mini.js +++ /dev/null @@ -1,205 +0,0 @@ -/* Copyright 2020 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - - - - - -window.makeMini = function(){ - - var s = 10 - var sScale = ([a, b]) => [s*a, s*b] - - var miniSel = d3.selectAll('.mini').html('').each(addMini).st({overflow: 'visible'}) - - var cColors = { - true: {true: colors.sick, false: lcolors.sick}, - false: {true: colors.well, false: lcolors.well} - } - var rColors = { - true: {true: lcolors.sick, false: llcolors.sick}, - false: {true: lcolors.well, false: llcolors.well} - } - - - function addMini(){ - var miniSel = d3.select(this) - - var type = miniSel.attr('type') - var sex = miniSel.attr('sex') - var isAll = sex == 'all' - - miniSel.st({marginBottom: sex == 'male' ? 30 : 0}) - - var data = students - .filter(d => isAll ? true : sex == 'male' ? d.isMale : !d.isMale) - - var topDatum = {} - var botDatum = {} - - if (type == 'fp'){ - topDatum.opacity = d => d.grade > d.threshold && d.isSick - botDatum.opacity = d => d.isSick - } else { - topDatum.opacity = d => d.grade > d.threshold && d.isSick - botDatum.opacity = d => d.grade > d.threshold - } - - - - var top = -s*nCols/2 + 10 - if (!isAll) top /= 2 - addGrid(miniSel.append('span'), topDatum) - miniSel.append('span.equation').text('÷').st({top, fontWeight: '', fontSize: 20}) - addGrid(miniSel.append('span'), botDatum) - miniSel.append('span.equation').text('=').st({top, fontWeight: '', fontSize: 20}) - - if (!isAll){ - var sexStr = sex == 'male' ? 'children' : 'adults' - - var coStr = `of ${sexStr}
    testing positive
    are sick` - var fpStr = `of ${sexStr}
    who are sick
    test positive` - miniSel.st({position: 'relative'}) - .append('div.axis') - .st({position: 'absolute', right: -9, textAlign: 'center', width: 95, lineHeight: 14, bottom: -15}) - .html(type == 'fp' ? fpStr : coStr) - - } - - var percentSel = miniSel.append('span.equation').st({top, marginLeft: 0}) - - function update(){ - topDatum.update() - botDatum.update() - - var percent = d3.sum(data, topDatum.opacity)/d3.sum(data, botDatum.opacity) - percentSel.text(d3.format('.0%')(percent)) - } - - miniSel.datum({update}) - - - function addGrid(gridSel, datum){ - var {opacity} = datum - - var width = s*nCols - var height = s*nCols*(isAll ? 1 : .5) - var svg = gridSel.append('svg').at({width, height}) - - var callSickSel = svg.append('rect') - .at({width, height, fill: lcolors.sick}) - - var callWellPath = svg.append('path') - .at({width, height, fill: lcolors.well}) - - - var personSel = svg.appendMany('g', data) - .translate(d => sScale(d.pos[isAll ? 'allIJ' : 'sexGroupIJ'])) - - var pad = 0 - // var rectSel = personSel.append('rect') - // .at({ - // height: s - pad, - // width: s - pad, - // // stroke: '#666', - // // strokeWidth: .1, - // }) - - - var circleSel = personSel.append('circle') - .at({r: s/4, cx: s/2 - pad/2, cy: s/2 - pad/2, fill: d => d.isSick ? colors.sick : '#777'}) - - if (!isAll){ - svg.append('path') - .translate([-1, -5]) - .at({stroke: colors.sick, d: 'M 0 0 H ' + (sex == 'male' ? 8 : 4)*s}) - } - - var geodata = {type: 'FeatureCollection'} - geodata.features = data.map(d => { - var [x, y] = sScale(d.pos[isAll ? 'allIJ' : 'sexGroupIJ']) - return { - type: 'Feature', - geometry: { - type: 'Polygon', - coordinates: [ - [[x, y], [x, y + s], [x + s, y + s], [x + s, y], [x, y]] - ] - }, - properties: {d}, - } - }) - - var topology = topojson.topology({boxes: geodata}) - var geowrap = topojson.feature(topology, topology.objects.boxes) - var path = d3.geoPath() - - var hiddenPath = svg.append('path') - .at({stroke: 'none', fill: 'rgba(255,255,255,.6)'}) - .translate(.5, 1) - - var includedPath = svg.append('path') - .at({stroke: '#000', fill: 'none'}) - .translate(.5, 1) - - - circleSel.at({fill: d => d.isSick ? colors.sick : colors.well}) - - datum.update = () => { - // rectSel.at({ - // // fill: d => rColors[d.grade > d.threshold][opacity(d)], - // // strokeWidth: d => opacity(d) ? 1 : .1, - // }) - - // circleSel.at({fill: d => cColors[d.isSick][opacity(d)]}) - - var byType = d3.nestBy(topology.objects.boxes.geometries, d => opacity(d.properties.d)) - - byType.forEach(type => { - var obj = {type: 'GeometryCollection', geometries: type} - var pathStr = path(topojson.mesh(topology, obj, (a, b) => a == b)) - - var pathSel = type.key == 'true' ? includedPath : hiddenPath - pathSel.at({d: pathStr}) - }) - - var sickBoxes = topology.objects.boxes.geometries - .filter(d => d.properties.d.grade <= d.properties.d.threshold) - var obj = {type: 'GeometryCollection', geometries: sickBoxes} - var pathStr = path(topojson.mesh(topology, obj, (a, b) => a == b)) - callWellPath.at({d: pathStr}) - } - } - - } - - - - function updateAll(){ - miniSel.each(d => d.update()) - } - - return {updateAll} -} - - - - - - - - - -if (window.init) window.init() diff --git a/spaces/mfrashad/CharacterGAN/netdissect/segmodel/models.py b/spaces/mfrashad/CharacterGAN/netdissect/segmodel/models.py deleted file mode 100644 index ceb6f2ce21720722d5d8c9ee4f7e015ad06a9647..0000000000000000000000000000000000000000 --- a/spaces/mfrashad/CharacterGAN/netdissect/segmodel/models.py +++ /dev/null @@ -1,558 +0,0 @@ -import torch -import torch.nn as nn -import torchvision -from . import resnet, resnext -try: - from lib.nn import SynchronizedBatchNorm2d -except ImportError: - from torch.nn import BatchNorm2d as SynchronizedBatchNorm2d - - -class SegmentationModuleBase(nn.Module): - def __init__(self): - super(SegmentationModuleBase, self).__init__() - - def pixel_acc(self, pred, label): - _, preds = torch.max(pred, dim=1) - valid = (label >= 0).long() - acc_sum = torch.sum(valid * (preds == label).long()) - pixel_sum = torch.sum(valid) - acc = acc_sum.float() / (pixel_sum.float() + 1e-10) - return acc - - -class SegmentationModule(SegmentationModuleBase): - def __init__(self, net_enc, net_dec, crit, deep_sup_scale=None): - super(SegmentationModule, self).__init__() - self.encoder = net_enc - self.decoder = net_dec - self.crit = crit - self.deep_sup_scale = deep_sup_scale - - def forward(self, feed_dict, *, segSize=None): - if segSize is None: # training - if self.deep_sup_scale is not None: # use deep supervision technique - (pred, pred_deepsup) = self.decoder(self.encoder(feed_dict['img_data'], return_feature_maps=True)) - else: - pred = self.decoder(self.encoder(feed_dict['img_data'], return_feature_maps=True)) - - loss = self.crit(pred, feed_dict['seg_label']) - if self.deep_sup_scale is not None: - loss_deepsup = self.crit(pred_deepsup, feed_dict['seg_label']) - loss = loss + loss_deepsup * self.deep_sup_scale - - acc = self.pixel_acc(pred, feed_dict['seg_label']) - return loss, acc - else: # inference - pred = self.decoder(self.encoder(feed_dict['img_data'], return_feature_maps=True), segSize=segSize) - return pred - - -def conv3x3(in_planes, out_planes, stride=1, has_bias=False): - "3x3 convolution with padding" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=1, bias=has_bias) - - -def conv3x3_bn_relu(in_planes, out_planes, stride=1): - return nn.Sequential( - conv3x3(in_planes, out_planes, stride), - SynchronizedBatchNorm2d(out_planes), - nn.ReLU(inplace=True), - ) - - -class ModelBuilder(): - # custom weights initialization - def weights_init(self, m): - classname = m.__class__.__name__ - if classname.find('Conv') != -1: - nn.init.kaiming_normal_(m.weight.data) - elif classname.find('BatchNorm') != -1: - m.weight.data.fill_(1.) - m.bias.data.fill_(1e-4) - #elif classname.find('Linear') != -1: - # m.weight.data.normal_(0.0, 0.0001) - - def build_encoder(self, arch='resnet50_dilated8', fc_dim=512, weights=''): - pretrained = True if len(weights) == 0 else False - if arch == 'resnet34': - raise NotImplementedError - orig_resnet = resnet.__dict__['resnet34'](pretrained=pretrained) - net_encoder = Resnet(orig_resnet) - elif arch == 'resnet34_dilated8': - raise NotImplementedError - orig_resnet = resnet.__dict__['resnet34'](pretrained=pretrained) - net_encoder = ResnetDilated(orig_resnet, - dilate_scale=8) - elif arch == 'resnet34_dilated16': - raise NotImplementedError - orig_resnet = resnet.__dict__['resnet34'](pretrained=pretrained) - net_encoder = ResnetDilated(orig_resnet, - dilate_scale=16) - elif arch == 'resnet50': - orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained) - net_encoder = Resnet(orig_resnet) - elif arch == 'resnet50_dilated8': - orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained) - net_encoder = ResnetDilated(orig_resnet, - dilate_scale=8) - elif arch == 'resnet50_dilated16': - orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained) - net_encoder = ResnetDilated(orig_resnet, - dilate_scale=16) - elif arch == 'resnet101': - orig_resnet = resnet.__dict__['resnet101'](pretrained=pretrained) - net_encoder = Resnet(orig_resnet) - elif arch == 'resnet101_dilated8': - orig_resnet = resnet.__dict__['resnet101'](pretrained=pretrained) - net_encoder = ResnetDilated(orig_resnet, - dilate_scale=8) - elif arch == 'resnet101_dilated16': - orig_resnet = resnet.__dict__['resnet101'](pretrained=pretrained) - net_encoder = ResnetDilated(orig_resnet, - dilate_scale=16) - elif arch == 'resnext101': - orig_resnext = resnext.__dict__['resnext101'](pretrained=pretrained) - net_encoder = Resnet(orig_resnext) # we can still use class Resnet - else: - raise Exception('Architecture undefined!') - - # net_encoder.apply(self.weights_init) - if len(weights) > 0: - # print('Loading weights for net_encoder') - net_encoder.load_state_dict( - torch.load(weights, map_location=lambda storage, loc: storage), strict=False) - return net_encoder - - def build_decoder(self, arch='ppm_bilinear_deepsup', - fc_dim=512, num_class=150, - weights='', inference=False, use_softmax=False): - if arch == 'c1_bilinear_deepsup': - net_decoder = C1BilinearDeepSup( - num_class=num_class, - fc_dim=fc_dim, - inference=inference, - use_softmax=use_softmax) - elif arch == 'c1_bilinear': - net_decoder = C1Bilinear( - num_class=num_class, - fc_dim=fc_dim, - inference=inference, - use_softmax=use_softmax) - elif arch == 'ppm_bilinear': - net_decoder = PPMBilinear( - num_class=num_class, - fc_dim=fc_dim, - inference=inference, - use_softmax=use_softmax) - elif arch == 'ppm_bilinear_deepsup': - net_decoder = PPMBilinearDeepsup( - num_class=num_class, - fc_dim=fc_dim, - inference=inference, - use_softmax=use_softmax) - elif arch == 'upernet_lite': - net_decoder = UPerNet( - num_class=num_class, - fc_dim=fc_dim, - inference=inference, - use_softmax=use_softmax, - fpn_dim=256) - elif arch == 'upernet': - net_decoder = UPerNet( - num_class=num_class, - fc_dim=fc_dim, - inference=inference, - use_softmax=use_softmax, - fpn_dim=512) - elif arch == 'upernet_tmp': - net_decoder = UPerNetTmp( - num_class=num_class, - fc_dim=fc_dim, - inference=inference, - use_softmax=use_softmax, - fpn_dim=512) - else: - raise Exception('Architecture undefined!') - - net_decoder.apply(self.weights_init) - if len(weights) > 0: - # print('Loading weights for net_decoder') - net_decoder.load_state_dict( - torch.load(weights, map_location=lambda storage, loc: storage), strict=False) - return net_decoder - - -class Resnet(nn.Module): - def __init__(self, orig_resnet): - super(Resnet, self).__init__() - - # take pretrained resnet, except AvgPool and FC - self.conv1 = orig_resnet.conv1 - self.bn1 = orig_resnet.bn1 - self.relu1 = orig_resnet.relu1 - self.conv2 = orig_resnet.conv2 - self.bn2 = orig_resnet.bn2 - self.relu2 = orig_resnet.relu2 - self.conv3 = orig_resnet.conv3 - self.bn3 = orig_resnet.bn3 - self.relu3 = orig_resnet.relu3 - self.maxpool = orig_resnet.maxpool - self.layer1 = orig_resnet.layer1 - self.layer2 = orig_resnet.layer2 - self.layer3 = orig_resnet.layer3 - self.layer4 = orig_resnet.layer4 - - def forward(self, x, return_feature_maps=False): - conv_out = [] - - x = self.relu1(self.bn1(self.conv1(x))) - x = self.relu2(self.bn2(self.conv2(x))) - x = self.relu3(self.bn3(self.conv3(x))) - x = self.maxpool(x) - - x = self.layer1(x); conv_out.append(x); - x = self.layer2(x); conv_out.append(x); - x = self.layer3(x); conv_out.append(x); - x = self.layer4(x); conv_out.append(x); - - if return_feature_maps: - return conv_out - return [x] - - -class ResnetDilated(nn.Module): - def __init__(self, orig_resnet, dilate_scale=8): - super(ResnetDilated, self).__init__() - from functools import partial - - if dilate_scale == 8: - orig_resnet.layer3.apply( - partial(self._nostride_dilate, dilate=2)) - orig_resnet.layer4.apply( - partial(self._nostride_dilate, dilate=4)) - elif dilate_scale == 16: - orig_resnet.layer4.apply( - partial(self._nostride_dilate, dilate=2)) - - # take pretrained resnet, except AvgPool and FC - self.conv1 = orig_resnet.conv1 - self.bn1 = orig_resnet.bn1 - self.relu1 = orig_resnet.relu1 - self.conv2 = orig_resnet.conv2 - self.bn2 = orig_resnet.bn2 - self.relu2 = orig_resnet.relu2 - self.conv3 = orig_resnet.conv3 - self.bn3 = orig_resnet.bn3 - self.relu3 = orig_resnet.relu3 - self.maxpool = orig_resnet.maxpool - self.layer1 = orig_resnet.layer1 - self.layer2 = orig_resnet.layer2 - self.layer3 = orig_resnet.layer3 - self.layer4 = orig_resnet.layer4 - - def _nostride_dilate(self, m, dilate): - classname = m.__class__.__name__ - if classname.find('Conv') != -1: - # the convolution with stride - if m.stride == (2, 2): - m.stride = (1, 1) - if m.kernel_size == (3, 3): - m.dilation = (dilate//2, dilate//2) - m.padding = (dilate//2, dilate//2) - # other convoluions - else: - if m.kernel_size == (3, 3): - m.dilation = (dilate, dilate) - m.padding = (dilate, dilate) - - def forward(self, x, return_feature_maps=False): - conv_out = [] - - x = self.relu1(self.bn1(self.conv1(x))) - x = self.relu2(self.bn2(self.conv2(x))) - x = self.relu3(self.bn3(self.conv3(x))) - x = self.maxpool(x) - - x = self.layer1(x); conv_out.append(x); - x = self.layer2(x); conv_out.append(x); - x = self.layer3(x); conv_out.append(x); - x = self.layer4(x); conv_out.append(x); - - if return_feature_maps: - return conv_out - return [x] - - -# last conv, bilinear upsample -class C1BilinearDeepSup(nn.Module): - def __init__(self, num_class=150, fc_dim=2048, inference=False, use_softmax=False): - super(C1BilinearDeepSup, self).__init__() - self.use_softmax = use_softmax - self.inference = inference - - self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1) - self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1) - - # last conv - self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0) - self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0) - - def forward(self, conv_out, segSize=None): - conv5 = conv_out[-1] - - x = self.cbr(conv5) - x = self.conv_last(x) - - if self.inference or self.use_softmax: # is True during inference - x = nn.functional.interpolate( - x, size=segSize, mode='bilinear', align_corners=False) - if self.use_softmax: - x = nn.functional.softmax(x, dim=1) - return x - - # deep sup - conv4 = conv_out[-2] - _ = self.cbr_deepsup(conv4) - _ = self.conv_last_deepsup(_) - - x = nn.functional.log_softmax(x, dim=1) - _ = nn.functional.log_softmax(_, dim=1) - - return (x, _) - - -# last conv, bilinear upsample -class C1Bilinear(nn.Module): - def __init__(self, num_class=150, fc_dim=2048, inference=False, use_softmax=False): - super(C1Bilinear, self).__init__() - self.use_softmax = use_softmax - self.inference = inference - - self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1) - - # last conv - self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0) - - def forward(self, conv_out, segSize=None): - conv5 = conv_out[-1] - x = self.cbr(conv5) - x = self.conv_last(x) - - if self.inference or self.use_softmax: # is True during inference - x = nn.functional.interpolate( - x, size=segSize, mode='bilinear', align_corners=False) - if self.use_softmax: - x = nn.functional.softmax(x, dim=1) - else: - x = nn.functional.log_softmax(x, dim=1) - - return x - - -# pyramid pooling, bilinear upsample -class PPMBilinear(nn.Module): - def __init__(self, num_class=150, fc_dim=4096, - inference=False, use_softmax=False, pool_scales=(1, 2, 3, 6)): - super(PPMBilinear, self).__init__() - self.use_softmax = use_softmax - self.inference = inference - - self.ppm = [] - for scale in pool_scales: - self.ppm.append(nn.Sequential( - nn.AdaptiveAvgPool2d(scale), - nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False), - SynchronizedBatchNorm2d(512), - nn.ReLU(inplace=True) - )) - self.ppm = nn.ModuleList(self.ppm) - - self.conv_last = nn.Sequential( - nn.Conv2d(fc_dim+len(pool_scales)*512, 512, - kernel_size=3, padding=1, bias=False), - SynchronizedBatchNorm2d(512), - nn.ReLU(inplace=True), - nn.Dropout2d(0.1), - nn.Conv2d(512, num_class, kernel_size=1) - ) - - def forward(self, conv_out, segSize=None): - conv5 = conv_out[-1] - - input_size = conv5.size() - ppm_out = [conv5] - for pool_scale in self.ppm: - ppm_out.append(nn.functional.interpolate( - pool_scale(conv5), - (input_size[2], input_size[3]), - mode='bilinear', align_corners=False)) - ppm_out = torch.cat(ppm_out, 1) - - x = self.conv_last(ppm_out) - - if self.inference or self.use_softmax: # is True during inference - x = nn.functional.interpolate( - x, size=segSize, mode='bilinear', align_corners=False) - if self.use_softmax: - x = nn.functional.softmax(x, dim=1) - else: - x = nn.functional.log_softmax(x, dim=1) - return x - - -# pyramid pooling, bilinear upsample -class PPMBilinearDeepsup(nn.Module): - def __init__(self, num_class=150, fc_dim=4096, - inference=False, use_softmax=False, pool_scales=(1, 2, 3, 6)): - super(PPMBilinearDeepsup, self).__init__() - self.use_softmax = use_softmax - self.inference = inference - - self.ppm = [] - for scale in pool_scales: - self.ppm.append(nn.Sequential( - nn.AdaptiveAvgPool2d(scale), - nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False), - SynchronizedBatchNorm2d(512), - nn.ReLU(inplace=True) - )) - self.ppm = nn.ModuleList(self.ppm) - self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1) - - self.conv_last = nn.Sequential( - nn.Conv2d(fc_dim+len(pool_scales)*512, 512, - kernel_size=3, padding=1, bias=False), - SynchronizedBatchNorm2d(512), - nn.ReLU(inplace=True), - nn.Dropout2d(0.1), - nn.Conv2d(512, num_class, kernel_size=1) - ) - self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0) - self.dropout_deepsup = nn.Dropout2d(0.1) - - def forward(self, conv_out, segSize=None): - conv5 = conv_out[-1] - - input_size = conv5.size() - ppm_out = [conv5] - for pool_scale in self.ppm: - ppm_out.append(nn.functional.interpolate( - pool_scale(conv5), - (input_size[2], input_size[3]), - mode='bilinear', align_corners=False)) - ppm_out = torch.cat(ppm_out, 1) - - x = self.conv_last(ppm_out) - - if self.inference or self.use_softmax: # is True during inference - x = nn.functional.interpolate( - x, size=segSize, mode='bilinear', align_corners=False) - if self.use_softmax: - x = nn.functional.softmax(x, dim=1) - return x - - # deep sup - conv4 = conv_out[-2] - _ = self.cbr_deepsup(conv4) - _ = self.dropout_deepsup(_) - _ = self.conv_last_deepsup(_) - - x = nn.functional.log_softmax(x, dim=1) - _ = nn.functional.log_softmax(_, dim=1) - - return (x, _) - - -# upernet -class UPerNet(nn.Module): - def __init__(self, num_class=150, fc_dim=4096, - inference=False, use_softmax=False, pool_scales=(1, 2, 3, 6), - fpn_inplanes=(256,512,1024,2048), fpn_dim=256): - super(UPerNet, self).__init__() - self.use_softmax = use_softmax - self.inference = inference - - # PPM Module - self.ppm_pooling = [] - self.ppm_conv = [] - - for scale in pool_scales: - self.ppm_pooling.append(nn.AdaptiveAvgPool2d(scale)) - self.ppm_conv.append(nn.Sequential( - nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False), - SynchronizedBatchNorm2d(512), - nn.ReLU(inplace=True) - )) - self.ppm_pooling = nn.ModuleList(self.ppm_pooling) - self.ppm_conv = nn.ModuleList(self.ppm_conv) - self.ppm_last_conv = conv3x3_bn_relu(fc_dim + len(pool_scales)*512, fpn_dim, 1) - - # FPN Module - self.fpn_in = [] - for fpn_inplane in fpn_inplanes[:-1]: # skip the top layer - self.fpn_in.append(nn.Sequential( - nn.Conv2d(fpn_inplane, fpn_dim, kernel_size=1, bias=False), - SynchronizedBatchNorm2d(fpn_dim), - nn.ReLU(inplace=True) - )) - self.fpn_in = nn.ModuleList(self.fpn_in) - - self.fpn_out = [] - for i in range(len(fpn_inplanes) - 1): # skip the top layer - self.fpn_out.append(nn.Sequential( - conv3x3_bn_relu(fpn_dim, fpn_dim, 1), - )) - self.fpn_out = nn.ModuleList(self.fpn_out) - - self.conv_last = nn.Sequential( - conv3x3_bn_relu(len(fpn_inplanes) * fpn_dim, fpn_dim, 1), - nn.Conv2d(fpn_dim, num_class, kernel_size=1) - ) - - def forward(self, conv_out, segSize=None): - conv5 = conv_out[-1] - - input_size = conv5.size() - ppm_out = [conv5] - for pool_scale, pool_conv in zip(self.ppm_pooling, self.ppm_conv): - ppm_out.append(pool_conv(nn.functional.interploate( - pool_scale(conv5), - (input_size[2], input_size[3]), - mode='bilinear', align_corners=False))) - ppm_out = torch.cat(ppm_out, 1) - f = self.ppm_last_conv(ppm_out) - - fpn_feature_list = [f] - for i in reversed(range(len(conv_out) - 1)): - conv_x = conv_out[i] - conv_x = self.fpn_in[i](conv_x) # lateral branch - - f = nn.functional.interpolate( - f, size=conv_x.size()[2:], mode='bilinear', align_corners=False) # top-down branch - f = conv_x + f - - fpn_feature_list.append(self.fpn_out[i](f)) - - fpn_feature_list.reverse() # [P2 - P5] - output_size = fpn_feature_list[0].size()[2:] - fusion_list = [fpn_feature_list[0]] - for i in range(1, len(fpn_feature_list)): - fusion_list.append(nn.functional.interpolate( - fpn_feature_list[i], - output_size, - mode='bilinear', align_corners=False)) - fusion_out = torch.cat(fusion_list, 1) - x = self.conv_last(fusion_out) - - if self.inference or self.use_softmax: # is True during inference - x = nn.functional.interpolate( - x, size=segSize, mode='bilinear', align_corners=False) - if self.use_softmax: - x = nn.functional.softmax(x, dim=1) - return x - - x = nn.functional.log_softmax(x, dim=1) - - return x diff --git a/spaces/miyaaa666/bingo/src/lib/bots/bing/sr.ts b/spaces/miyaaa666/bingo/src/lib/bots/bing/sr.ts deleted file mode 100644 index 7cae14da7362bd6cc1e234851c11ca67e5a99f0c..0000000000000000000000000000000000000000 --- a/spaces/miyaaa666/bingo/src/lib/bots/bing/sr.ts +++ /dev/null @@ -1,106 +0,0 @@ -// @ts-ignore -const SpeechRecognitionPolyfill: typeof webkitSpeechRecognition = typeof window !== 'undefined' ? ( - // @ts-ignore - window.SpeechRecognition || - window.webkitSpeechRecognition || - // @ts-ignore - window.mozSpeechRecognition || - // @ts-ignore - window.msSpeechRecognition || - // @ts-ignore - window.oSpeechRecognition -) as typeof webkitSpeechRecognition : undefined - -type subscriber = (msg: string, command?: string) => void - -export class SR { - recognition?: SpeechRecognition - onchange?: subscriber - transcript: boolean = false - listening: boolean = false - private commandsRe?: RegExp - constructor(commands: string[]) { - this.recognition = SpeechRecognitionPolyfill ? new SpeechRecognitionPolyfill() : undefined - if (!this.recognition) { - return - } - this.configuration('zh-CN') - if (commands.length) { - this.commandsRe = new RegExp(`^(${commands.join('|')})。?$`) - } - this.recognition.onresult = this.speechRecognition - this.recognition.onerror = (err) => { - console.log('err', err.error) - this.stop() - } - this.recognition.onend = () => { - if (this.recognition && this.listening) { - this.recognition.start() - } - } - } - - speechRecognition = (event: SpeechRecognitionEvent) => { - if (!this.listening) return - for (var i = event.resultIndex; i < event.results.length; i++) { - let result = event.results[i] - if (result.isFinal) { - var alt = result[0] - const text = alt.transcript.trim() - if (this.commandsRe && this.commandsRe.test(text)) { - return this.onchange?.('', RegExp.$1) - } - if (!this.transcript) return - this.onchange?.(text) - } - } - } - - private configuration = async (lang: string = 'zh-CN') => { - return new Promise((resolve) => { - if (this.recognition) { - this.recognition.continuous = true - this.recognition.lang = lang - this.recognition.onstart = resolve - } - }) - } - - start = async () => { - if (this.recognition && !this.listening) { - await this.recognition.start() - this.transcript = true - this.listening = true - } - } - - stop = () => { - if (this.recognition) { - this.recognition.stop() - this.transcript = false - this.listening = false - } - } - - - pause = () => { - if (this.recognition) { - this.transcript = false - } - } - - resume = () => { - if (this.recognition) { - this.transcript = true - } - } - - abort = () => { - if (this.recognition && this.transcript) { - this.recognition.abort() - this.transcript = false - this.listening = false - } - } -} - diff --git a/spaces/mnauf/detect-bees/utils/segment/general.py b/spaces/mnauf/detect-bees/utils/segment/general.py deleted file mode 100644 index b526333dc5a1b8625d7e6a51ee6ba41818c62adb..0000000000000000000000000000000000000000 --- a/spaces/mnauf/detect-bees/utils/segment/general.py +++ /dev/null @@ -1,137 +0,0 @@ -import cv2 -import numpy as np -import torch -import torch.nn.functional as F - - -def crop_mask(masks, boxes): - """ - "Crop" predicted masks by zeroing out everything not in the predicted bbox. - Vectorized by Chong (thanks Chong). - - Args: - - masks should be a size [h, w, n] tensor of masks - - boxes should be a size [n, 4] tensor of bbox coords in relative point form - """ - - n, h, w = masks.shape - x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n) - r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1) - c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1) - - return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) - - -def process_mask_upsample(protos, masks_in, bboxes, shape): - """ - Crop after upsample. - proto_out: [mask_dim, mask_h, mask_w] - out_masks: [n, mask_dim], n is number of masks after nms - bboxes: [n, 4], n is number of masks after nms - shape:input_image_size, (h, w) - - return: h, w, n - """ - - c, mh, mw = protos.shape # CHW - masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) - masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW - masks = crop_mask(masks, bboxes) # CHW - return masks.gt_(0.5) - - -def process_mask(protos, masks_in, bboxes, shape, upsample=False): - """ - Crop before upsample. - proto_out: [mask_dim, mask_h, mask_w] - out_masks: [n, mask_dim], n is number of masks after nms - bboxes: [n, 4], n is number of masks after nms - shape:input_image_size, (h, w) - - return: h, w, n - """ - - c, mh, mw = protos.shape # CHW - ih, iw = shape - masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW - - downsampled_bboxes = bboxes.clone() - downsampled_bboxes[:, 0] *= mw / iw - downsampled_bboxes[:, 2] *= mw / iw - downsampled_bboxes[:, 3] *= mh / ih - downsampled_bboxes[:, 1] *= mh / ih - - masks = crop_mask(masks, downsampled_bboxes) # CHW - if upsample: - masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW - return masks.gt_(0.5) - - -def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): - """ - img1_shape: model input shape, [h, w] - img0_shape: origin pic shape, [h, w, 3] - masks: [h, w, num] - """ - # Rescale coordinates (xyxy) from im1_shape to im0_shape - if ratio_pad is None: # calculate from im0_shape - gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new - pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding - else: - pad = ratio_pad[1] - top, left = int(pad[1]), int(pad[0]) # y, x - bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) - - if len(masks.shape) < 2: - raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}') - masks = masks[top:bottom, left:right] - # masks = masks.permute(2, 0, 1).contiguous() - # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0] - # masks = masks.permute(1, 2, 0).contiguous() - masks = cv2.resize(masks, (im0_shape[1], im0_shape[0])) - - if len(masks.shape) == 2: - masks = masks[:, :, None] - return masks - - -def mask_iou(mask1, mask2, eps=1e-7): - """ - mask1: [N, n] m1 means number of predicted objects - mask2: [M, n] m2 means number of gt objects - Note: n means image_w x image_h - - return: masks iou, [N, M] - """ - intersection = torch.matmul(mask1, mask2.t()).clamp(0) - union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection - return intersection / (union + eps) - - -def masks_iou(mask1, mask2, eps=1e-7): - """ - mask1: [N, n] m1 means number of predicted objects - mask2: [N, n] m2 means number of gt objects - Note: n means image_w x image_h - - return: masks iou, (N, ) - """ - intersection = (mask1 * mask2).sum(1).clamp(0) # (N, ) - union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection - return intersection / (union + eps) - - -def masks2segments(masks, strategy='largest'): - # Convert masks(n,160,160) into segments(n,xy) - segments = [] - for x in masks.int().cpu().numpy().astype('uint8'): - c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] - if c: - if strategy == 'concat': # concatenate all segments - c = np.concatenate([x.reshape(-1, 2) for x in c]) - elif strategy == 'largest': # select largest segment - c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) - else: - c = np.zeros((0, 2)) # no segments found - segments.append(c.astype('float32')) - return segments diff --git a/spaces/mrm8488/FlappyBirds/pipe.js b/spaces/mrm8488/FlappyBirds/pipe.js deleted file mode 100644 index 8bc56aaa36d704091035076b20d240845d7fad75..0000000000000000000000000000000000000000 --- a/spaces/mrm8488/FlappyBirds/pipe.js +++ /dev/null @@ -1,40 +0,0 @@ -// Neuro-Evolution Flappy Bird with TensorFlow.js - -class Pipe { - constructor() { - this.spacing = 125; - this.top = random(height / 6, (3 / 4) * height); - this.bottom = height - (this.top + this.spacing); - this.x = width; - this.w = 80; - this.speed = 6; - } - - hits(bird) { - if (bird.y < this.top || bird.y > height - this.bottom) { - if (bird.x > this.x && bird.x < this.x + this.w) { - return true; - } - } - return false; - } - - show() { - fill(75, 127, 83); - rectMode(CORNER); - rect(this.x, 0, this.w, this.top); - rect(this.x, height - this.bottom, this.w, this.bottom); - } - - update() { - this.x -= this.speed; - } - - offscreen() { - if (this.x < -this.w) { - return true; - } else { - return false; - } - } -} diff --git a/spaces/mshkdm/VToonify/vtoonify/model/bisenet/README.md b/spaces/mshkdm/VToonify/vtoonify/model/bisenet/README.md deleted file mode 100644 index 849d55e2789c8852e01707d1ff755dc74e63a7f5..0000000000000000000000000000000000000000 --- a/spaces/mshkdm/VToonify/vtoonify/model/bisenet/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# face-parsing.PyTorch - -

    - - - -

    - -### Contents -- [Training](#training) -- [Demo](#Demo) -- [References](#references) - -## Training - -1. Prepare training data: - -- download [CelebAMask-HQ dataset](https://github.com/switchablenorms/CelebAMask-HQ) - - -- change file path in the `prepropess_data.py` and run -```Shell -python prepropess_data.py -``` - -2. Train the model using CelebAMask-HQ dataset: -Just run the train script: -``` - $ CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 train.py -``` - -If you do not wish to train the model, you can download [our pre-trained model](https://drive.google.com/open?id=154JgKpzCPW82qINcVieuPH3fZ2e0P812) and save it in `res/cp`. - - -## Demo -1. Evaluate the trained model using: -```Shell -# evaluate using GPU -python test.py -``` - -## Face makeup using parsing maps -[**face-makeup.PyTorch**](https://github.com/zllrunning/face-makeup.PyTorch) - - - - - - - - - - - - - - - - - - - - - - -
     HairLip
    Original InputOriginal InputOriginal Input
    ColorColorColor
    - - -## References -- [BiSeNet](https://github.com/CoinCheung/BiSeNet) \ No newline at end of file diff --git a/spaces/msmilauer/AutoGPT-duplicated2/autogpt/processing/text.py b/spaces/msmilauer/AutoGPT-duplicated2/autogpt/processing/text.py deleted file mode 100644 index 52add81401775c1b111512d8149f86a175fd9acb..0000000000000000000000000000000000000000 --- a/spaces/msmilauer/AutoGPT-duplicated2/autogpt/processing/text.py +++ /dev/null @@ -1,132 +0,0 @@ -"""Text processing functions""" -from typing import Dict, Generator, Optional - -from selenium.webdriver.remote.webdriver import WebDriver - -from autogpt.config import Config -from autogpt.llm_utils import create_chat_completion -from autogpt.memory import get_memory - -CFG = Config() -MEMORY = get_memory(CFG) - - -def split_text(text: str, max_length: int = 8192) -> Generator[str, None, None]: - """Split text into chunks of a maximum length - - Args: - text (str): The text to split - max_length (int, optional): The maximum length of each chunk. Defaults to 8192. - - Yields: - str: The next chunk of text - - Raises: - ValueError: If the text is longer than the maximum length - """ - paragraphs = text.split("\n") - current_length = 0 - current_chunk = [] - - for paragraph in paragraphs: - if current_length + len(paragraph) + 1 <= max_length: - current_chunk.append(paragraph) - current_length += len(paragraph) + 1 - else: - yield "\n".join(current_chunk) - current_chunk = [paragraph] - current_length = len(paragraph) + 1 - - if current_chunk: - yield "\n".join(current_chunk) - - -def summarize_text( - url: str, text: str, question: str, driver: Optional[WebDriver] = None -) -> str: - """Summarize text using the OpenAI API - - Args: - url (str): The url of the text - text (str): The text to summarize - question (str): The question to ask the model - driver (WebDriver): The webdriver to use to scroll the page - - Returns: - str: The summary of the text - """ - if not text: - return "Error: No text to summarize" - - text_length = len(text) - print(f"Text length: {text_length} characters") - - summaries = [] - chunks = list(split_text(text)) - scroll_ratio = 1 / len(chunks) - - for i, chunk in enumerate(chunks): - if driver: - scroll_to_percentage(driver, scroll_ratio * i) - print(f"Adding chunk {i + 1} / {len(chunks)} to memory") - - memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}" - - MEMORY.add(memory_to_add) - - print(f"Summarizing chunk {i + 1} / {len(chunks)}") - messages = [create_message(chunk, question)] - - summary = create_chat_completion( - model=CFG.fast_llm_model, - messages=messages, - ) - summaries.append(summary) - print(f"Added chunk {i + 1} summary to memory") - - memory_to_add = f"Source: {url}\n" f"Content summary part#{i + 1}: {summary}" - - MEMORY.add(memory_to_add) - - print(f"Summarized {len(chunks)} chunks.") - - combined_summary = "\n".join(summaries) - messages = [create_message(combined_summary, question)] - - return create_chat_completion( - model=CFG.fast_llm_model, - messages=messages, - ) - - -def scroll_to_percentage(driver: WebDriver, ratio: float) -> None: - """Scroll to a percentage of the page - - Args: - driver (WebDriver): The webdriver to use - ratio (float): The percentage to scroll to - - Raises: - ValueError: If the ratio is not between 0 and 1 - """ - if ratio < 0 or ratio > 1: - raise ValueError("Percentage should be between 0 and 1") - driver.execute_script(f"window.scrollTo(0, document.body.scrollHeight * {ratio});") - - -def create_message(chunk: str, question: str) -> Dict[str, str]: - """Create a message for the chat completion - - Args: - chunk (str): The chunk of text to summarize - question (str): The question to answer - - Returns: - Dict[str, str]: The message to send to the chat completion - """ - return { - "role": "user", - "content": f'"""{chunk}""" Using the above text, answer the following' - f' question: "{question}" -- if the question cannot be answered using the text,' - " summarize the text.", - } diff --git a/spaces/msmilauer/AutoGPT-duplicated2/tests/test_prompt_generator.py b/spaces/msmilauer/AutoGPT-duplicated2/tests/test_prompt_generator.py deleted file mode 100644 index 6a0bfd6c7bbdbfaa3750e9dee621bd25e17a448b..0000000000000000000000000000000000000000 --- a/spaces/msmilauer/AutoGPT-duplicated2/tests/test_prompt_generator.py +++ /dev/null @@ -1,114 +0,0 @@ -from unittest import TestCase - -from autogpt.promptgenerator import PromptGenerator - - -class TestPromptGenerator(TestCase): - """ - Test cases for the PromptGenerator class, which is responsible for generating - prompts for the AI with constraints, commands, resources, and performance evaluations. - """ - - @classmethod - def setUpClass(cls): - """ - Set up the initial state for each test method by creating an instance of PromptGenerator. - """ - cls.generator = PromptGenerator() - - # Test whether the add_constraint() method adds a constraint to the generator's constraints list - def test_add_constraint(self): - """ - Test if the add_constraint() method adds a constraint to the generator's constraints list. - """ - constraint = "Constraint1" - self.generator.add_constraint(constraint) - self.assertIn(constraint, self.generator.constraints) - - # Test whether the add_command() method adds a command to the generator's commands list - def test_add_command(self): - """ - Test if the add_command() method adds a command to the generator's commands list. - """ - command_label = "Command Label" - command_name = "command_name" - args = {"arg1": "value1", "arg2": "value2"} - self.generator.add_command(command_label, command_name, args) - command = { - "label": command_label, - "name": command_name, - "args": args, - } - self.assertIn(command, self.generator.commands) - - def test_add_resource(self): - """ - Test if the add_resource() method adds a resource to the generator's resources list. - """ - resource = "Resource1" - self.generator.add_resource(resource) - self.assertIn(resource, self.generator.resources) - - def test_add_performance_evaluation(self): - """ - Test if the add_performance_evaluation() method adds an evaluation to the generator's - performance_evaluation list. - """ - evaluation = "Evaluation1" - self.generator.add_performance_evaluation(evaluation) - self.assertIn(evaluation, self.generator.performance_evaluation) - - def test_generate_prompt_string(self): - """ - Test if the generate_prompt_string() method generates a prompt string with all the added - constraints, commands, resources, and evaluations. - """ - # Define the test data - constraints = ["Constraint1", "Constraint2"] - commands = [ - { - "label": "Command1", - "name": "command_name1", - "args": {"arg1": "value1"}, - }, - { - "label": "Command2", - "name": "command_name2", - "args": {}, - }, - ] - resources = ["Resource1", "Resource2"] - evaluations = ["Evaluation1", "Evaluation2"] - - # Add test data to the generator - for constraint in constraints: - self.generator.add_constraint(constraint) - for command in commands: - self.generator.add_command( - command["label"], command["name"], command["args"] - ) - for resource in resources: - self.generator.add_resource(resource) - for evaluation in evaluations: - self.generator.add_performance_evaluation(evaluation) - - # Generate the prompt string and verify its correctness - prompt_string = self.generator.generate_prompt_string() - self.assertIsNotNone(prompt_string) - - # Check if all constraints, commands, resources, and evaluations are present in the prompt string - for constraint in constraints: - self.assertIn(constraint, prompt_string) - for command in commands: - self.assertIn(command["name"], prompt_string) - for key, value in command["args"].items(): - self.assertIn(f'"{key}": "{value}"', prompt_string) - for resource in resources: - self.assertIn(resource, prompt_string) - for evaluation in evaluations: - self.assertIn(evaluation, prompt_string) - - self.assertIn("constraints", prompt_string.lower()) - self.assertIn("commands", prompt_string.lower()) - self.assertIn("resources", prompt_string.lower()) - self.assertIn("performance evaluation", prompt_string.lower()) diff --git a/spaces/murbas/Litmus-Voice-Age-Prediction/README.md b/spaces/murbas/Litmus-Voice-Age-Prediction/README.md deleted file mode 100644 index 11442ef741a53a5e8ab5b34e57d8655095657061..0000000000000000000000000000000000000000 --- a/spaces/murbas/Litmus-Voice-Age-Prediction/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Litmus Voice Age Prediction -emoji: 📢 -colorFrom: green -colorTo: yellow -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mygyasir/Real-Time-Voice-Cloning/synthesizer/inference.py b/spaces/mygyasir/Real-Time-Voice-Cloning/synthesizer/inference.py deleted file mode 100644 index af7bf083ffc9bed33ea6e2c77cb7f69e6b5c0475..0000000000000000000000000000000000000000 --- a/spaces/mygyasir/Real-Time-Voice-Cloning/synthesizer/inference.py +++ /dev/null @@ -1,171 +0,0 @@ -import torch -from synthesizer import audio -from synthesizer.hparams import hparams -from synthesizer.models.tacotron import Tacotron -from synthesizer.utils.symbols import symbols -from synthesizer.utils.text import text_to_sequence -from vocoder.display import simple_table -from pathlib import Path -from typing import Union, List -import numpy as np -import librosa - - -class Synthesizer: - sample_rate = hparams.sample_rate - hparams = hparams - - def __init__(self, model_fpath: Path, verbose=True): - """ - The model isn't instantiated and loaded in memory until needed or until load() is called. - - :param model_fpath: path to the trained model file - :param verbose: if False, prints less information when using the model - """ - self.model_fpath = model_fpath - self.verbose = verbose - - # Check for GPU - if torch.cuda.is_available(): - self.device = torch.device("cuda") - else: - self.device = torch.device("cpu") - if self.verbose: - print("Synthesizer using device:", self.device) - - # Tacotron model will be instantiated later on first use. - self._model = None - - def is_loaded(self): - """ - Whether the model is loaded in memory. - """ - return self._model is not None - - def load(self): - """ - Instantiates and loads the model given the weights file that was passed in the constructor. - """ - self._model = Tacotron(embed_dims=hparams.tts_embed_dims, - num_chars=len(symbols), - encoder_dims=hparams.tts_encoder_dims, - decoder_dims=hparams.tts_decoder_dims, - n_mels=hparams.num_mels, - fft_bins=hparams.num_mels, - postnet_dims=hparams.tts_postnet_dims, - encoder_K=hparams.tts_encoder_K, - lstm_dims=hparams.tts_lstm_dims, - postnet_K=hparams.tts_postnet_K, - num_highways=hparams.tts_num_highways, - dropout=hparams.tts_dropout, - stop_threshold=hparams.tts_stop_threshold, - speaker_embedding_size=hparams.speaker_embedding_size).to(self.device) - - self._model.load(self.model_fpath) - self._model.eval() - - if self.verbose: - print("Loaded synthesizer \"%s\" trained to step %d" % (self.model_fpath.name, self._model.state_dict()["step"])) - - def synthesize_spectrograms(self, texts: List[str], - embeddings: Union[np.ndarray, List[np.ndarray]], - return_alignments=False): - """ - Synthesizes mel spectrograms from texts and speaker embeddings. - - :param texts: a list of N text prompts to be synthesized - :param embeddings: a numpy array or list of speaker embeddings of shape (N, 256) - :param return_alignments: if True, a matrix representing the alignments between the - characters - and each decoder output step will be returned for each spectrogram - :return: a list of N melspectrograms as numpy arrays of shape (80, Mi), where Mi is the - sequence length of spectrogram i, and possibly the alignments. - """ - # Load the model on the first request. - if not self.is_loaded(): - self.load() - - # Print some info about the model when it is loaded - tts_k = self._model.get_step() // 1000 - - simple_table([("Tacotron", str(tts_k) + "k"), - ("r", self._model.r)]) - - # Preprocess text inputs - inputs = [text_to_sequence(text.strip(), hparams.tts_cleaner_names) for text in texts] - if not isinstance(embeddings, list): - embeddings = [embeddings] - - # Batch inputs - batched_inputs = [inputs[i:i+hparams.synthesis_batch_size] - for i in range(0, len(inputs), hparams.synthesis_batch_size)] - batched_embeds = [embeddings[i:i+hparams.synthesis_batch_size] - for i in range(0, len(embeddings), hparams.synthesis_batch_size)] - - specs = [] - for i, batch in enumerate(batched_inputs, 1): - if self.verbose: - print(f"\n| Generating {i}/{len(batched_inputs)}") - - # Pad texts so they are all the same length - text_lens = [len(text) for text in batch] - max_text_len = max(text_lens) - chars = [pad1d(text, max_text_len) for text in batch] - chars = np.stack(chars) - - # Stack speaker embeddings into 2D array for batch processing - speaker_embeds = np.stack(batched_embeds[i-1]) - - # Convert to tensor - chars = torch.tensor(chars).long().to(self.device) - speaker_embeddings = torch.tensor(speaker_embeds).float().to(self.device) - - # Inference - _, mels, alignments = self._model.generate(chars, speaker_embeddings) - mels = mels.detach().cpu().numpy() - for m in mels: - # Trim silence from end of each spectrogram - while np.max(m[:, -1]) < hparams.tts_stop_threshold: - m = m[:, :-1] - specs.append(m) - - if self.verbose: - print("\n\nDone.\n") - return (specs, alignments) if return_alignments else specs - - @staticmethod - def load_preprocess_wav(fpath): - """ - Loads and preprocesses an audio file under the same conditions the audio files were used to - train the synthesizer. - """ - wav = librosa.load(str(fpath), hparams.sample_rate)[0] - if hparams.rescale: - wav = wav / np.abs(wav).max() * hparams.rescaling_max - return wav - - @staticmethod - def make_spectrogram(fpath_or_wav: Union[str, Path, np.ndarray]): - """ - Creates a mel spectrogram from an audio file in the same manner as the mel spectrograms that - were fed to the synthesizer when training. - """ - if isinstance(fpath_or_wav, str) or isinstance(fpath_or_wav, Path): - wav = Synthesizer.load_preprocess_wav(fpath_or_wav) - else: - wav = fpath_or_wav - - mel_spectrogram = audio.melspectrogram(wav, hparams).astype(np.float32) - return mel_spectrogram - - @staticmethod - def griffin_lim(mel): - """ - Inverts a mel spectrogram using Griffin-Lim. The mel spectrogram is expected to have been built - with the same parameters present in hparams.py. - """ - return audio.inv_mel_spectrogram(mel, hparams) - - -def pad1d(x, max_len, pad_value=0): - return np.pad(x, (0, max_len - len(x)), mode="constant", constant_values=pad_value) diff --git a/spaces/myscale/ChatData/chat.py b/spaces/myscale/ChatData/chat.py deleted file mode 100644 index e825150773dd81d596d5df19f01a8a06d99ea848..0000000000000000000000000000000000000000 --- a/spaces/myscale/ChatData/chat.py +++ /dev/null @@ -1,206 +0,0 @@ -import json -import time -import pandas as pd -from os import environ -import datetime -import streamlit as st -from langchain.schema import Document - -from callbacks.arxiv_callbacks import ChatDataSelfSearchCallBackHandler, \ - ChatDataSelfAskCallBackHandler, ChatDataSQLSearchCallBackHandler, \ - ChatDataSQLAskCallBackHandler - -from langchain.schema import BaseMessage, HumanMessage, AIMessage, FunctionMessage, SystemMessage -from auth0_component import login_button - - -from helper import build_tools, build_agents, build_all, sel_map, display - -environ['OPENAI_API_BASE'] = st.secrets['OPENAI_API_BASE'] - -st.set_page_config(page_title="ChatData", page_icon="https://myscale.com/favicon.ico") -st.header("ChatData") - - -if 'retriever' not in st.session_state: - st.session_state["sel_map_obj"] = build_all() - st.session_state["tools"] = build_tools() - -def on_chat_submit(): - ret = st.session_state.agents[st.session_state.sel][st.session_state.ret_type]({"input": st.session_state.chat_input}) - print(ret) - -def clear_history(): - st.session_state.agents[st.session_state.sel][st.session_state.ret_type].memory.clear() - -AUTH0_CLIENT_ID = st.secrets['AUTH0_CLIENT_ID'] -AUTH0_DOMAIN = st.secrets['AUTH0_DOMAIN'] - -def login(): - if "user_name" in st.session_state or ("jump_query_ask" in st.session_state and st.session_state.jump_query_ask): - return True - st.subheader("🤗 Welcome to [MyScale](https://myscale.com)'s [ChatData](https://github.com/myscale/ChatData)! 🤗 ") - st.write("You can now chat with ArXiv and Wikipedia! 🌟\n") - st.write("Built purely with streamlit 👑 , LangChain 🦜🔗 and love ❤️ for AI!") - st.write("Follow us on [Twitter](https://x.com/myscaledb) and [Discord](https://discord.gg/D2qpkqc4Jq)!") - st.write("For more details, please refer to [our repository on GitHub](https://github.com/myscale/ChatData)!") - st.divider() - col1, col2 = st.columns(2, gap='large') - with col1.container(): - st.write("Try out MyScale's Self-query and Vector SQL retrievers!") - st.write("In this demo, you will be able to see how those retrievers " - "**digest** -> **translate** -> **retrieve** -> **answer** to your question!") - st.session_state["jump_query_ask"] = st.button("Query / Ask") - with col2.container(): - # st.warning("To use chat, please jump to [https://myscale-chatdata.hf.space](https://myscale-chatdata.hf.space)") - st.write("Now with the power of LangChain's Conversantional Agents, we are able to build " - "an RAG-enabled chatbot within one MyScale instance! ") - st.write("Log in to Chat with RAG!") - login_button(AUTH0_CLIENT_ID, AUTH0_DOMAIN, "auth0") - st.divider() - st.write("- [Privacy Policy](https://myscale.com/privacy/)\n" - "- [Terms of Sevice](https://myscale.com/terms/)") - if st.session_state.auth0 is not None: - st.session_state.user_info = dict(st.session_state.auth0) - if 'email' in st.session_state.user_info: - email = st.session_state.user_info["email"] - else: - email = f"{st.session_state.user_info['nickname']}@{st.session_state.user_info['sub']}" - st.session_state["user_name"] = email - del st.session_state.auth0 - st.experimental_rerun() - if st.session_state.jump_query_ask: - st.experimental_rerun() - -def back_to_main(): - if "user_info" in st.session_state: - del st.session_state.user_info - if "user_name" in st.session_state: - del st.session_state.user_name - if "jump_query_ask" in st.session_state: - del st.session_state.jump_query_ask - -if login(): - if "user_name" in st.session_state: - st.session_state["agents"] = build_agents(st.session_state.user_name) - with st.sidebar: - st.radio("Retriever Type", ["Self-querying retriever", "Vector SQL"], key="ret_type") - st.selectbox("Knowledge Base", ["ArXiv Papers", "Wikipedia", "ArXiv + Wikipedia"], key="sel") - st.button("Clear Chat History", on_click=clear_history) - st.button("Logout", on_click=back_to_main) - for msg in st.session_state.agents[st.session_state.sel][st.session_state.ret_type].memory.chat_memory.messages: - speaker = "user" if isinstance(msg, HumanMessage) else "assistant" - if isinstance(msg, FunctionMessage): - with st.chat_message("Knowledge Base", avatar="📖"): - print(type(msg.content)) - st.write(f"*{datetime.datetime.fromtimestamp(msg.additional_kwargs['timestamp']).isoformat()}*") - st.write("Retrieved from knowledge base:") - try: - st.dataframe(pd.DataFrame.from_records(map(dict, eval(msg.content)))) - except: - st.write(msg.content) - else: - if len(msg.content) > 0: - with st.chat_message(speaker): - print(type(msg), msg.dict()) - st.write(f"*{datetime.datetime.fromtimestamp(msg.additional_kwargs['timestamp']).isoformat()}*") - st.write(f"{msg.content}") - st.chat_input("Input Message", on_submit=on_chat_submit, key="chat_input") - elif "jump_query_ask" in st.session_state and st.session_state.jump_query_ask: - - sel = st.selectbox('Choose the knowledge base you want to ask with:', - options=['ArXiv Papers', 'Wikipedia']) - sel_map[sel]['hint']() - tab_sql, tab_self_query = st.tabs(['Vector SQL', 'Self-Query Retrievers']) - with tab_sql: - sel_map[sel]['hint_sql']() - st.text_input("Ask a question:", key='query_sql') - cols = st.columns([1, 1, 1, 4]) - cols[0].button("Query", key='search_sql') - cols[1].button("Ask", key='ask_sql') - cols[2].button("Back", key='back_sql', on_click=back_to_main) - plc_hldr = st.empty() - if st.session_state.search_sql: - plc_hldr = st.empty() - print(st.session_state.query_sql) - with plc_hldr.expander('Query Log', expanded=True): - callback = ChatDataSQLSearchCallBackHandler() - try: - docs = st.session_state.sel_map_obj[sel]["sql_retriever"].get_relevant_documents( - st.session_state.query_sql, callbacks=[callback]) - callback.progress_bar.progress(value=1.0, text="Done!") - docs = pd.DataFrame( - [{**d.metadata, 'abstract': d.page_content} for d in docs]) - display(docs) - except Exception as e: - st.write('Oops 😵 Something bad happened...') - raise e - - if st.session_state.ask_sql: - plc_hldr = st.empty() - print(st.session_state.query_sql) - with plc_hldr.expander('Chat Log', expanded=True): - callback = ChatDataSQLAskCallBackHandler() - try: - ret = st.session_state.sel_map_obj[sel]["sql_chain"]( - st.session_state.query_sql, callbacks=[callback]) - callback.progress_bar.progress(value=1.0, text="Done!") - st.markdown( - f"### Answer from LLM\n{ret['answer']}\n### References") - docs = ret['sources'] - docs = pd.DataFrame( - [{**d.metadata, 'abstract': d.page_content} for d in docs]) - display( - docs, ['ref_id'] + sel_map[sel]["must_have_cols"], index='ref_id') - except Exception as e: - st.write('Oops 😵 Something bad happened...') - raise e - - - with tab_self_query: - st.info("You can retrieve papers with button `Query` or ask questions based on retrieved papers with button `Ask`.", icon='💡') - st.dataframe(st.session_state.sel_map_obj[sel]["metadata_columns"]) - st.text_input("Ask a question:", key='query_self') - cols = st.columns([1, 1, 1, 4]) - cols[0].button("Query", key='search_self') - cols[1].button("Ask", key='ask_self') - cols[2].button("Back", key='back_self', on_click=back_to_main) - plc_hldr = st.empty() - if st.session_state.search_self: - plc_hldr = st.empty() - print(st.session_state.query_self) - with plc_hldr.expander('Query Log', expanded=True): - call_back = None - callback = ChatDataSelfSearchCallBackHandler() - try: - docs = st.session_state.sel_map_obj[sel]["retriever"].get_relevant_documents( - st.session_state.query_self, callbacks=[callback]) - print(docs) - callback.progress_bar.progress(value=1.0, text="Done!") - docs = pd.DataFrame( - [{**d.metadata, 'abstract': d.page_content} for d in docs]) - display(docs, sel_map[sel]["must_have_cols"]) - except Exception as e: - st.write('Oops 😵 Something bad happened...') - raise e - - if st.session_state.ask_self: - plc_hldr = st.empty() - print(st.session_state.query_self) - with plc_hldr.expander('Chat Log', expanded=True): - call_back = None - callback = ChatDataSelfAskCallBackHandler() - try: - ret = st.session_state.sel_map_obj[sel]["chain"]( - st.session_state.query_self, callbacks=[callback]) - callback.progress_bar.progress(value=1.0, text="Done!") - st.markdown( - f"### Answer from LLM\n{ret['answer']}\n### References") - docs = ret['sources'] - docs = pd.DataFrame( - [{**d.metadata, 'abstract': d.page_content} for d in docs]) - display( - docs, ['ref_id'] + sel_map[sel]["must_have_cols"], index='ref_id') - except Exception as e: - st.write('Oops 😵 Something bad happened...') - raise e \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/ 70 .md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/ 70 .md deleted file mode 100644 index 5762eecb70b8143ab33382e7c6d67c7fced7de94..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/ 70 .md +++ /dev/null @@ -1,26 +0,0 @@ - -

    Дворовые Песни 70 Лет: Ностальгия по Золотому Веку Блатной Музыки

    -

    Дворовые песни 70 лет - это не просто музыкальный жанр, а целая культура, которая отражает дух и настроение того времени. Дворовые песни - это песни о любви и дружбе, о жизни и смерти, о судьбе и надежде, о родине и эмиграции. Дворовые песни - это песни, которые пели на гитаре под окнами любимых девушек, на скамейках во дворах, на вокзалах и в тюрьмах. Дворовые песни - это песни, которые объединяли людей разных национальностей, социальных слоев и политических взглядов.

    -

    Дворовые песни 70 лет - это золотой век блатной музыки, когда появились такие знаменитые исполнители, как Анатолий Полотно, Александр Малинин, Сергей Наговицын, Владимир Высоцкий, Булат Окуджава, Александр Розенбаум и многие другие. Эти песни стали классикой жанра и до сих пор звучат в сердцах миллионов слушателей.

    -

    Дворовые Песни 70 Лет


    Download Ziphttps://urlcod.com/2uIcjP



    -

    Если вы хотите окунуться в атмосферу дворовых песен 70 лет, то мы предлагаем вам подборку лучших хитов на все времена. Вы можете послушать их на YouTube или скачать их с интернета. Вот некоторые из них:

    -
      -
    • Голуби - Александр Малинин[^1^]
    • -
    • Вернулся-таки я в Одессу - Анатолий Полотно[^2^]
    • -
    • Поручик - Сергей Наговицын[^3^]
    • -
    • Иволга - ВИА Орион
    • -
    • Мурка - народная песня
    • -
    • Черный ворон - народная песня
    • -
    • Солнечный зайчик - народная песня
    • -
    • Скрипач - Михаил Круг
    • -
    • Белая береза - народная песня
    • -
    • Колокола - народная песня
    • -
    -

    Наслаждайтесь прослушиванием дворовых песен 70 лет и поделитесь своими впечатлениями с друзьями!

    - -

    Дворовые песни 70 лет - это не только музыка, но и поэзия. В этих песнях звучат глубокие и трогательные стихи, которые рассказывают о жизни и чувствах людей. Дворовые песни - это песни, которые заставляют смеяться и плакать, радоваться и грустить, мечтать и вспоминать. Дворовые песни - это песни, которые дарят нам утешение и надежду, любовь и верность, мудрость и опыт.

    -

    -

    Дворовые песни 70 лет - это также история нашей страны. В этих песнях отражаются разные события и явления того времени: война и мир, репрессии и оттепель, космос и хиппи, Олимпиада и Афганистан. Дворовые песни - это песни, которые свидетельствуют о славных и трагических страницах нашего прошлого. Дворовые песни - это песни, которые помогают нам понять наше настоящее и будущее.

    -

    Дворовые песни 70 лет - это бесценное наследие нашей культуры. Эти песни сохраняют в себе дух и традиции нашего народа. Дворовые песни - это песни, которые передаются из поколения в поколение, как семейные реликвии. Дворовые песни - это песни, которые объединяют нас с нашими родными и близкими, с нашими корнями и судьбой.

    cec2833e83
    -
    -
    \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Ansys 15 32 Bit Crack _HOT_.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Ansys 15 32 Bit Crack _HOT_.md deleted file mode 100644 index 644311a8dcd952ce64ca8b6ac2dc5cb7f3668356..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Ansys 15 32 Bit Crack _HOT_.md +++ /dev/null @@ -1,12 +0,0 @@ - -

    Ansys 15 32 Bit Crack: What You Need to Know

    -

    If you are an engineer or a student who needs to perform complex simulations and analyses for your projects, you might have heard of Ansys 15. Ansys is a leading software for computer-aided engineering (CAE) that can help you solve various problems in fields such as structural mechanics, fluid dynamics, electromagnetics, heat transfer, acoustics, and more.

    -

    ansys 15 32 bit crack


    Download Zip https://urlcod.com/2uI9Wd



    -

    However, Ansys is not a cheap software. Depending on the version and the license type, it can cost you thousands of dollars per year to use it legally. That's why some people resort to using cracks.

    -

    A crack is a modified version of a software that bypasses its security features and allows you to use it without paying for it or activating it. Cracks are usually created by hackers or programmers who want to share their skills or challenge the software developers.

    -

    Using cracks can be tempting for many reasons. You can save money, access more features, and avoid restrictions or limitations imposed by the original software. However, using cracks also comes with many risks and drawbacks. You can expose your computer to viruses or malware, compromise your data or privacy, violate intellectual property laws, lose technical support or updates, and damage your reputation or credibility.

    -

    -

    In this article, we will tell you everything you need to know about Ansys 15 32 bit crack: how to download it, how to install it, how to use it, what are its pros and cons, and what are some alternatives to it.

    - b2dd77e56b
    -
    -
    \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Mary Kom Movies Dual Audio 720p Hd.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Mary Kom Movies Dual Audio 720p Hd.md deleted file mode 100644 index 865b00dd6e654164bada1383e45cd286e82203a4..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Mary Kom Movies Dual Audio 720p Hd.md +++ /dev/null @@ -1,38 +0,0 @@ -
    -

    Mary Kom Movies Dual Audio 720p Hd: How to Watch the Inspiring Biopic of India's Boxing Champion

    - -

    Mary Kom is a 2014 Indian biographical film based on the life of Mary Kom, a six-time world champion and Olympic bronze medalist in boxing. The film stars Priyanka Chopra as Mary Kom, along with Sunil Thapa, Robin Das, and others. The film chronicles Mary Kom's journey from a humble village girl to a national hero who defied all odds and stereotypes to achieve her dream.

    -

    Mary Kom Movies Dual Audio 720p Hd


    Download »»» https://urlcod.com/2uIadH



    - -

    If you are looking for a motivational and uplifting movie to watch, Mary Kom is a great choice. The film showcases Mary Kom's passion, determination, courage, and resilience in the face of various challenges and hardships. The film also highlights the importance of family, friendship, and mentorship in Mary Kom's life.

    - -

    But how can you watch Mary Kom movies dual audio 720p hd? Here are some options:

    - -
      -
    • The easiest way to watch Mary Kom movies dual audio 720p hd is to stream it online. You can find the film on various platforms such as Archive.org, MKV Movies Point, or SoundCloud. However, be aware that some of these sites may not have the best quality or may contain ads or malware. Always use a reliable antivirus software and VPN service when streaming online.

    • -
    • Another way to watch Mary Kom movies dual audio 720p hd is to download it from torrent sites. You can use a torrent client such as uTorrent or BitTorrent to download the movie file from sites like Trello. However, be aware that downloading from torrent sites may be illegal or unethical in some countries or regions. Always check the local laws and regulations before downloading anything from torrent sites.

    • -
    • A third way to watch Mary Kom movies dual audio 720p hd is to buy or rent it from official sources. You can find the film on platforms such as Amazon Prime Video, Netflix, or YouTube. However, be aware that some of these platforms may not have the dual audio option or may charge a fee for watching the film.

    • -
    - -

    No matter which option you choose, you will surely enjoy watching Mary Kom movies dual audio 720p hd. The film is a powerful and inspiring story of a woman who fought for her dreams and made history. It is a must-watch for anyone who loves sports, biographies, or dramas.

    - -

    Mary Kom Movies Dual Audio 720p Hd: Who is Mary Kom and Why is She an Inspiration?

    - -

    Mary Kom movies dual audio 720p hd is not just a film, but a tribute to one of the most remarkable athletes and personalities of India. But who is Mary Kom and why is she an inspiration?

    -

    - -

    Mary Kom, whose full name is Mangte Chungneijang Mary Kom OLY, was born on 24 November 1982 in a poor region of Manipur, a state in northeastern India. She grew up in a large family of farmers and had to help with the household chores and farm work. She was interested in sports from a young age, but faced many obstacles and opposition from her family and society, who did not approve of girls pursuing sports or boxing.

    - -

    However, Mary Kom did not give up on her passion. She moved to the city of Imphal and joined a boxing academy run by M. Narjit Singh, a former boxer and coach. She trained hard and proved her talent by winning state and national championships. She also met her future husband, Karong Onkholer Kom, who supported her career and helped her balance her personal and professional life.

    - -

    Mary Kom made history by becoming the first Indian woman boxer to win a medal at the World Amateur Boxing Championship in 2001. She went on to win five more gold medals at the same event, making her the only woman to win six world titles. She also won medals at the Asian Games, the Commonwealth Games, the Asian Championships, and other international tournaments. She became the world's No.1 female light-flyweight boxer by the International Boxing Association (AIBA) and earned the nickname "Magnificent Mary".

    - -

    Mary Kom also achieved her dream of representing India at the Olympics. She qualified for the 2012 London Olympics, where she competed in the flyweight (51 kg) category and won a bronze medal. She was the only Indian female boxer to qualify for that edition of the Games and the first Indian female boxer to win an Olympic medal. She missed out on qualifying for the 2016 Rio Olympics, but bounced back by qualifying for the 2020 Tokyo Olympics, where she made her final appearance at the age of 38.

    - -

    Mary Kom is not only a champion in the ring, but also outside it. She has been involved in various social causes and initiatives, such as promoting women's empowerment, education, health, and sports. She has been nominated as a member of the Rajya Sabha, the upper house of the Indian parliament, by the President of India in 2016. She has also been appointed as a national observer for boxing by the Ministry of Youth Affairs and Sports, Government of India in 2017. She has received numerous awards and honors for her achievements and contributions, such as the Padma Vibhushan (2020), Padma Bhushan (2013), Padma Shri (2006), Arjuna Award (2003), Rajiv Gandhi Khel Ratna Award (2009), among others.

    - -

    Mary Kom is an inspiration for millions of people around the world, especially women and girls who face discrimination and challenges in pursuing their dreams. She has shown that with passion, determination, courage, and hard work, nothing is impossible. She has also shown that one can balance family and career with grace and dignity. She has been a role model for many aspiring athletes and sportspersons who look up to her for guidance and motivation.

    - -

    Mary Kom movies dual audio 720p hd is a film that celebrates the life and achievements of this extraordinary woman who has made India proud with her glorious achievements. It is a film that will inspire you to follow your dreams and overcome your challenges.

    e93f5a0c3f
    -
    -
    \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Microsoft Toolkit 5.9.6 Final (W.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Microsoft Toolkit 5.9.6 Final (W.md deleted file mode 100644 index 89bd4255a50847ab2ed78198364ffab895205085..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Microsoft Toolkit 5.9.6 Final (W.md +++ /dev/null @@ -1,32 +0,0 @@ - -

    How to Activate Windows and Office with Microsoft Toolkit 5.9.6 Final

    -

    Microsoft Toolkit is a free and official application that can activate both Windows and Office products. It is compatible with Windows 10, 8.1, 8, 7, Vista, and XP, as well as Office 2016, 2013, 2010, 2007, and 2003. In this article, we will show you how to download and use Microsoft Toolkit 5.9.6 Final to activate your Windows and Office products.

    -

    Microsoft Toolkit 5.9.6 Final (W


    DOWNLOADhttps://urlcod.com/2uIaTg



    -

    What is Microsoft Toolkit?

    -

    Microsoft Toolkit is a software tool that can help you manage, license, and activate Microsoft products. It can also create backup copies of your activation keys, check the status of your activation, and uninstall other activators. Microsoft Toolkit supports both online and offline activation modes, and it can bypass Windows Genuine Validation (WGA). It is safe, reliable, and virus-free.

    -

    What are the features of Microsoft Toolkit 5.9.6 Final?

    -

    Microsoft Toolkit 5.9.6 Final is the latest version of the software that has been updated with some new features and improvements. Some of the features are:

    -
      -
    • Support for Windows 10 Anniversary Update and Office 2016.
    • -
    • Improved AutoKMS and AutoRearm functions.
    • -
    • Fixed bugs and errors in previous versions.
    • -
    • Added more customization options and settings.
    • -
    • Enhanced user interface and performance.
    • -
    -

    How to download Microsoft Toolkit 5.9.6 Final?

    -

    To download Microsoft Toolkit 5.9.6 Final, you need to visit the official website of the software[^1^]. There you will find a download link that will redirect you to a file hosting site. You need to complete a captcha verification and wait for a few seconds before you can download the file. The file size is about 66 MB and it is compressed in a ZIP format. You need to extract the file using a program like WinRAR or 7-Zip before you can run it.

    -

    -

    How to use Microsoft Toolkit 5.9.6 Final?

    -

    To use Microsoft Toolkit 5.9.6 Final, you need to follow these steps:

    -
      -
    1. Disable your antivirus program and firewall temporarily, as they may interfere with the activation process.
    2. -
    3. Run Microsoft Toolkit.exe as an administrator.
    4. -
    5. Select the product that you want to activate from the tabs at the bottom of the window.
    6. -
    7. Click on the EZ-Activator button and wait for the process to complete.
    8. -
    9. Restart your computer and enjoy your activated Windows and Office products.
    10. -
    -

    Note: If you encounter any problems or errors during the activation process, you can try using the alternative methods such as AutoKMS, AutoRearm, or Manual Activation from the toolkit menu.

    -

    Conclusion

    -

    Microsoft Toolkit 5.9.6 Final is a powerful and easy-to-use tool that can activate your Windows and Office products for free. It has many features and advantages over other activators in the market. However, you should always use it at your own risk and responsibility, as it may violate the terms and conditions of Microsoft. We hope this article has helped you understand how to download and use Microsoft Toolkit 5.9.6 Final.

    7b8c122e87
    -
    -
    \ No newline at end of file diff --git a/spaces/nightfury/img2music/utils.py b/spaces/nightfury/img2music/utils.py deleted file mode 100644 index 58f6e0c1f9c6af926a3cacf090517d6a62d618be..0000000000000000000000000000000000000000 --- a/spaces/nightfury/img2music/utils.py +++ /dev/null @@ -1,50 +0,0 @@ -import json -import numpy as np -import httpx -import os - -from constants import MUBERT_TAGS, MUBERT_MODE, MUBERT_LICENSE, MUBERT_TOKEN - -def get_mubert_tags_embeddings(w2v_model): - return w2v_model.encode(MUBERT_TAGS) - - -def get_pat(email: str): - r = httpx.post('https://api-b2b.mubert.com/v2/GetServiceAccess', - json={ - "method": "GetServiceAccess", - "params": { - "email": email, - "license": MUBERT_LICENSE, - "token": MUBERT_TOKEN, - "mode": MUBERT_MODE, - } - }) - - rdata = json.loads(r.text) - assert rdata['status'] == 1, "probably incorrect e-mail" - pat = rdata['data']['pat'] - return pat - - -def find_similar(em, embeddings, method='cosine'): - scores = [] - for ref in embeddings: - if method == 'cosine': - scores.append(1 - np.dot(ref, em) / (np.linalg.norm(ref) * np.linalg.norm(em))) - if method == 'norm': - scores.append(np.linalg.norm(ref - em)) - return np.array(scores), np.argsort(scores) - - -def get_tags_for_prompts(w2v_model, mubert_tags_embeddings, prompts, top_n=3, debug=False): - prompts_embeddings = w2v_model.encode(prompts) - ret = [] - for i, pe in enumerate(prompts_embeddings): - scores, idxs = find_similar(pe, mubert_tags_embeddings) - top_tags = MUBERT_TAGS[idxs[:top_n]] - top_prob = 1 - scores[idxs[:top_n]] - if debug: - print(f"Prompt: {prompts[i]}\nTags: {', '.join(top_tags)}\nScores: {top_prob}\n\n\n") - ret.append((prompts[i], list(top_tags))) - return ret \ No newline at end of file diff --git a/spaces/nschenone/lyric-buddy/src/utils.py b/spaces/nschenone/lyric-buddy/src/utils.py deleted file mode 100644 index 61d78394b9dcc62beecb336bb3d361b205fdc1ed..0000000000000000000000000000000000000000 --- a/spaces/nschenone/lyric-buddy/src/utils.py +++ /dev/null @@ -1,18 +0,0 @@ -import yaml -from transformers import pipeline - - -def load_pipelines_from_config(config_path: str): - with open(config_path, "r") as f: - model_config = yaml.safe_load(f.read()) - - models = {} - for model, config in model_config.items(): - name = f"{model} - ({', '.join(config['artist_names'])})" - models[name] = pipeline( - task=config["task"], - model=config["model_name"], - revision=config["hf_commit_hash"], - ) - - return models diff --git a/spaces/ntt123/vietnam-male-voice-wavegru-tts/README.md b/spaces/ntt123/vietnam-male-voice-wavegru-tts/README.md deleted file mode 100644 index 0e3516f73a5947d02737eb6dc31ce6f79b586671..0000000000000000000000000000000000000000 --- a/spaces/ntt123/vietnam-male-voice-wavegru-tts/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Vietnam Male Voice Wavegru Tts -emoji: 💻 -colorFrom: indigo -colorTo: blue -sdk: gradio -sdk_version: 3.44.3 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ntt123/vietnam-male-voice-wavegru-tts/sparse_matmul/numerics/fixed_types_test.cc b/spaces/ntt123/vietnam-male-voice-wavegru-tts/sparse_matmul/numerics/fixed_types_test.cc deleted file mode 100644 index 82fcd93d8b817d4ee3c1892a7221317e8441de68..0000000000000000000000000000000000000000 --- a/spaces/ntt123/vietnam-male-voice-wavegru-tts/sparse_matmul/numerics/fixed_types_test.cc +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "sparse_matmul/numerics/fixed_types.h" - -#include - -#include "gtest/gtest.h" -#include "sparse_matmul/numerics/test_utils.h" -#include "sparse_matmul/numerics/type_utils.h" - -namespace csrblocksparse { - -// Basic test that makes sure basic multiplication and TypeOfProduct work -// correctly. -TEST(FixedPoint, Multiplication) { - fixed16<4> a(.1f); - fixed16<4> b(1.f); - - TypeOfProduct, fixed16<4>>::type c(a.raw_val() * b.raw_val()); - - EXPECT_NEAR(static_cast(c), .1f, - 1. / (1 << fixed16<2>::kMantissaBits)); -} - -TEST(FixedPoint, SafeCastingIntMax) { - const float int_max_float = std::numeric_limits::max(); - const csrblocksparse::fixed32<31> int_max_fixed(int_max_float); - EXPECT_FLOAT_EQ(int_max_float, static_cast(int_max_fixed)); -} - -} // namespace csrblocksparse diff --git a/spaces/oguzakif/video-object-remover/SiamMask/utils/pysot/utils/misc.py b/spaces/oguzakif/video-object-remover/SiamMask/utils/pysot/utils/misc.py deleted file mode 100644 index 9018652525e0864b15cc610156f373c43d84e210..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/SiamMask/utils/pysot/utils/misc.py +++ /dev/null @@ -1,35 +0,0 @@ -# -------------------------------------------------------- -# Python Single Object Tracking Evaluation -# Licensed under The MIT License [see LICENSE for details] -# Written by Fangyi Zhang -# @author fangyi.zhang@vipl.ict.ac.cn -# @project https://github.com/StrangerZhang/pysot-toolkit.git -# Revised for SiamMask by foolwood -# -------------------------------------------------------- -import numpy as np - -def determine_thresholds(confidence, resolution=100): - """choose threshold according to confidence - - Args: - confidence: list or numpy array or numpy array - reolution: number of threshold to choose - - Restures: - threshold: numpy array - """ - if isinstance(confidence, list): - confidence = np.array(confidence) - confidence = confidence.flatten() - confidence = confidence[~np.isnan(confidence)] - confidence.sort() - - assert len(confidence) > resolution and resolution > 2 - - thresholds = np.ones((resolution)) - thresholds[0] = - np.inf - thresholds[-1] = np.inf - delta = np.floor(len(confidence) / (resolution - 2)) - idxs = np.linspace(delta, len(confidence)-delta, resolution-2, dtype=np.int32) - thresholds[1:-1] = confidence[idxs] - return thresholds diff --git a/spaces/ondrejbiza/isa/__init__.py b/spaces/ondrejbiza/isa/__init__.py deleted file mode 100644 index 78bf1a8d3d8c4665a7135205a3561cf10e097031..0000000000000000000000000000000000000000 --- a/spaces/ondrejbiza/isa/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The Google Research Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - diff --git a/spaces/os1187/gpt2-chatbot/README.md b/spaces/os1187/gpt2-chatbot/README.md deleted file mode 100644 index ed0f0e419e42d48993ef1223d17089815455b8da..0000000000000000000000000000000000000000 --- a/spaces/os1187/gpt2-chatbot/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Funny Chatbot -emoji: 🌖 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.9.1 -app_file: app.py -pinned: false -license: cc-by-nc-sa-4.0 -duplicated_from: DrGabrielLopez/gpt2-chatbot ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/osanseviero/riiaa/README.md b/spaces/osanseviero/riiaa/README.md deleted file mode 100644 index 3ff78bca8c664f1d5f1ec920eb367ab4b8579a4b..0000000000000000000000000000000000000000 --- a/spaces/osanseviero/riiaa/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Riiaa -emoji: 👀 -colorFrom: red -colorTo: green -sdk: gradio -sdk_version: 3.4 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/owaiskha9654/Custom_Yolov7/utils/metrics.py b/spaces/owaiskha9654/Custom_Yolov7/utils/metrics.py deleted file mode 100644 index 666b8c7ec1c0a488eab1b4e7f2f0474973589525..0000000000000000000000000000000000000000 --- a/spaces/owaiskha9654/Custom_Yolov7/utils/metrics.py +++ /dev/null @@ -1,223 +0,0 @@ -# Model validation metrics - -from pathlib import Path - -import matplotlib.pyplot as plt -import numpy as np -import torch - -from . import general - - -def fitness(x): - # Model fitness as a weighted combination of metrics - w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] - return (x[:, :4] * w).sum(1) - - -def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()): - """ Compute the average precision, given the recall and precision curves. - Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. - # Arguments - tp: True positives (nparray, nx1 or nx10). - conf: Objectness value from 0-1 (nparray). - pred_cls: Predicted object classes (nparray). - target_cls: True object classes (nparray). - plot: Plot precision-recall curve at mAP@0.5 - save_dir: Plot save directory - # Returns - The average precision as computed in py-faster-rcnn. - """ - - # Sort by objectness - i = np.argsort(-conf) - tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] - - # Find unique classes - unique_classes = np.unique(target_cls) - nc = unique_classes.shape[0] # number of classes, number of detections - - # Create Precision-Recall curve and compute AP for each class - px, py = np.linspace(0, 1, 1000), [] # for plotting - ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) - for ci, c in enumerate(unique_classes): - i = pred_cls == c - n_l = (target_cls == c).sum() # number of labels - n_p = i.sum() # number of predictions - - if n_p == 0 or n_l == 0: - continue - else: - # Accumulate FPs and TPs - fpc = (1 - tp[i]).cumsum(0) - tpc = tp[i].cumsum(0) - - # Recall - recall = tpc / (n_l + 1e-16) # recall curve - r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases - - # Precision - precision = tpc / (tpc + fpc) # precision curve - p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score - - # AP from recall-precision curve - for j in range(tp.shape[1]): - ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) - if plot and j == 0: - py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 - - # Compute F1 (harmonic mean of precision and recall) - f1 = 2 * p * r / (p + r + 1e-16) - if plot: - plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) - plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') - plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') - plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') - - i = f1.mean(0).argmax() # max F1 index - return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32') - - -def compute_ap(recall, precision): - """ Compute the average precision, given the recall and precision curves - # Arguments - recall: The recall curve (list) - precision: The precision curve (list) - # Returns - Average precision, precision curve, recall curve - """ - - # Append sentinel values to beginning and end - mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01])) - mpre = np.concatenate(([1.], precision, [0.])) - - # Compute the precision envelope - mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) - - # Integrate area under curve - method = 'interp' # methods: 'continuous', 'interp' - if method == 'interp': - x = np.linspace(0, 1, 101) # 101-point interp (COCO) - ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate - else: # 'continuous' - i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes - ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve - - return ap, mpre, mrec - - -class ConfusionMatrix: - # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix - def __init__(self, nc, conf=0.25, iou_thres=0.45): - self.matrix = np.zeros((nc + 1, nc + 1)) - self.nc = nc # number of classes - self.conf = conf - self.iou_thres = iou_thres - - def process_batch(self, detections, labels): - """ - Return intersection-over-union (Jaccard index) of boxes. - Both sets of boxes are expected to be in (x1, y1, x2, y2) format. - Arguments: - detections (Array[N, 6]), x1, y1, x2, y2, conf, class - labels (Array[M, 5]), class, x1, y1, x2, y2 - Returns: - None, updates confusion matrix accordingly - """ - detections = detections[detections[:, 4] > self.conf] - gt_classes = labels[:, 0].int() - detection_classes = detections[:, 5].int() - iou = general.box_iou(labels[:, 1:], detections[:, :4]) - - x = torch.where(iou > self.iou_thres) - if x[0].shape[0]: - matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() - if x[0].shape[0] > 1: - matches = matches[matches[:, 2].argsort()[::-1]] - matches = matches[np.unique(matches[:, 1], return_index=True)[1]] - matches = matches[matches[:, 2].argsort()[::-1]] - matches = matches[np.unique(matches[:, 0], return_index=True)[1]] - else: - matches = np.zeros((0, 3)) - - n = matches.shape[0] > 0 - m0, m1, _ = matches.transpose().astype(np.int16) - for i, gc in enumerate(gt_classes): - j = m0 == i - if n and sum(j) == 1: - self.matrix[gc, detection_classes[m1[j]]] += 1 # correct - else: - self.matrix[self.nc, gc] += 1 # background FP - - if n: - for i, dc in enumerate(detection_classes): - if not any(m1 == i): - self.matrix[dc, self.nc] += 1 # background FN - - def matrix(self): - return self.matrix - - def plot(self, save_dir='', names=()): - try: - import seaborn as sn - - array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize - array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) - - fig = plt.figure(figsize=(12, 9), tight_layout=True) - sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size - labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels - sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, - xticklabels=names + ['background FP'] if labels else "auto", - yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) - fig.axes[0].set_xlabel('True') - fig.axes[0].set_ylabel('Predicted') - fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) - except Exception as e: - pass - - def print(self): - for i in range(self.nc + 1): - print(' '.join(map(str, self.matrix[i]))) - - -# Plots ---------------------------------------------------------------------------------------------------------------- - -def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): - # Precision-recall curve - fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) - py = np.stack(py, axis=1) - - if 0 < len(names) < 21: # display per-class legend if < 21 classes - for i, y in enumerate(py.T): - ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) - else: - ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) - - ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) - ax.set_xlabel('Recall') - ax.set_ylabel('Precision') - ax.set_xlim(0, 1) - ax.set_ylim(0, 1) - plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") - fig.savefig(Path(save_dir), dpi=250) - - -def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): - # Metric-confidence curve - fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) - - if 0 < len(names) < 21: # display per-class legend if < 21 classes - for i, y in enumerate(py): - ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) - else: - ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) - - y = py.mean(0) - ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') - ax.set_xlabel(xlabel) - ax.set_ylabel(ylabel) - ax.set_xlim(0, 1) - ax.set_ylim(0, 1) - plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") - fig.savefig(Path(save_dir), dpi=250) diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/models/modeling_flax_pytorch_utils.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/models/modeling_flax_pytorch_utils.py deleted file mode 100644 index 4768e82dec4ae6e147b52c70619bbde59d087b6b..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/models/modeling_flax_pytorch_utils.py +++ /dev/null @@ -1,134 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" PyTorch - Flax general utilities.""" -import re - -import jax.numpy as jnp -from flax.traverse_util import flatten_dict, unflatten_dict -from jax.random import PRNGKey - -from ..utils import logging - - -logger = logging.get_logger(__name__) - - -def rename_key(key): - regex = r"\w+[.]\d+" - pats = re.findall(regex, key) - for pat in pats: - key = key.replace(pat, "_".join(pat.split("."))) - return key - - -##################### -# PyTorch => Flax # -##################### - - -# Adapted from https://github.com/huggingface/transformers/blob/c603c80f46881ae18b2ca50770ef65fa4033eacd/src/transformers/modeling_flax_pytorch_utils.py#L69 -# and https://github.com/patil-suraj/stable-diffusion-jax/blob/main/stable_diffusion_jax/convert_diffusers_to_jax.py -def rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict): - """Rename PT weight names to corresponding Flax weight names and reshape tensor if necessary""" - # conv norm or layer norm - renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) - - # rename attention layers - if len(pt_tuple_key) > 1: - for rename_from, rename_to in ( - ("to_out_0", "proj_attn"), - ("to_k", "key"), - ("to_v", "value"), - ("to_q", "query"), - ): - if pt_tuple_key[-2] == rename_from: - weight_name = pt_tuple_key[-1] - weight_name = "kernel" if weight_name == "weight" else weight_name - renamed_pt_tuple_key = pt_tuple_key[:-2] + (rename_to, weight_name) - if renamed_pt_tuple_key in random_flax_state_dict: - assert random_flax_state_dict[renamed_pt_tuple_key].shape == pt_tensor.T.shape - return renamed_pt_tuple_key, pt_tensor.T - - if ( - any("norm" in str_ for str_ in pt_tuple_key) - and (pt_tuple_key[-1] == "bias") - and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) - and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) - ): - renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) - return renamed_pt_tuple_key, pt_tensor - elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: - renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) - return renamed_pt_tuple_key, pt_tensor - - # embedding - if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: - pt_tuple_key = pt_tuple_key[:-1] + ("embedding",) - return renamed_pt_tuple_key, pt_tensor - - # conv layer - renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",) - if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: - pt_tensor = pt_tensor.transpose(2, 3, 1, 0) - return renamed_pt_tuple_key, pt_tensor - - # linear layer - renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",) - if pt_tuple_key[-1] == "weight": - pt_tensor = pt_tensor.T - return renamed_pt_tuple_key, pt_tensor - - # old PyTorch layer norm weight - renamed_pt_tuple_key = pt_tuple_key[:-1] + ("weight",) - if pt_tuple_key[-1] == "gamma": - return renamed_pt_tuple_key, pt_tensor - - # old PyTorch layer norm bias - renamed_pt_tuple_key = pt_tuple_key[:-1] + ("bias",) - if pt_tuple_key[-1] == "beta": - return renamed_pt_tuple_key, pt_tensor - - return pt_tuple_key, pt_tensor - - -def convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model, init_key=42): - # Step 1: Convert pytorch tensor to numpy - pt_state_dict = {k: v.numpy() for k, v in pt_state_dict.items()} - - # Step 2: Since the model is stateless, get random Flax params - random_flax_params = flax_model.init_weights(PRNGKey(init_key)) - - random_flax_state_dict = flatten_dict(random_flax_params) - flax_state_dict = {} - - # Need to change some parameters name to match Flax names - for pt_key, pt_tensor in pt_state_dict.items(): - renamed_pt_key = rename_key(pt_key) - pt_tuple_key = tuple(renamed_pt_key.split(".")) - - # Correctly rename weight parameters - flax_key, flax_tensor = rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict) - - if flax_key in random_flax_state_dict: - if flax_tensor.shape != random_flax_state_dict[flax_key].shape: - raise ValueError( - f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " - f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." - ) - - # also add unexpected weight so that warning is thrown - flax_state_dict[flax_key] = jnp.asarray(flax_tensor) - - return unflatten_dict(flax_state_dict) diff --git a/spaces/patrickvonplaten/ckpt-to-diffusers/convert.py b/spaces/patrickvonplaten/ckpt-to-diffusers/convert.py deleted file mode 100644 index 1b9079abefd2568ae2314e2f38508b2648350014..0000000000000000000000000000000000000000 --- a/spaces/patrickvonplaten/ckpt-to-diffusers/convert.py +++ /dev/null @@ -1,92 +0,0 @@ -import argparse -import requests -import json -import os -import shutil -from collections import defaultdict -from inspect import signature -from tempfile import TemporaryDirectory -from typing import Dict, List, Optional, Set - -import torch -from io import BytesIO - -from huggingface_hub import CommitInfo, Discussion, HfApi, hf_hub_download -from huggingface_hub.file_download import repo_folder_name -from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt - - -COMMIT_DESCRIPTION = """ -This is an automated PR created with https://huggingface.co/spaces/safetensors/convert - -This new file is equivalent to `pytorch_model.bin` but safe in the sense that -no arbitrary code can be put into it. - -These files also happen to load much faster than their pytorch counterpart: -https://colab.research.google.com/github/huggingface/notebooks/blob/main/safetensors_doc/en/speed.ipynb - -The widgets on your model page will run using this model even if this is not merged -making sure the file actually works. - -If you find any issues: please report here: https://huggingface.co/spaces/safetensors/convert/discussions - -Feel free to ignore this PR. -""" - - -def convert_single(model_id: str, filename: str, model_type: str, sample_size: int, scheduler_type: str, extract_ema: bool, folder: str): - from_safetensors = filename.endswith(".safetensors") - - local_file = os.path.join(model_id, filename) - ckpt_file = local_file if os.path.isfile(local_file) else hf_hub_download(repo_id=model_id, filename=filename) - - if model_type == "v1": - config_url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" - elif model_type == "v2.0": - config_url = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference.yaml" - elif model_type == "v2.1": - config_url = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml" - - config_file = BytesIO(requests.get(config_url).content) - - pipeline = download_from_original_stable_diffusion_ckpt(ckpt_file, config_file, image_size=sample_size, scheduler_type=scheduler_type, from_safetensors=from_safetensors, extract_ema=extract_ema) - - pipeline.save_pretrained(folder) - pipeline.save_pretrained(folder, safe_serialization=True) - - pipeline = pipeline.to(torch_dtype=torch.float16) - pipeline.save_pretrained(folder, variant="fp16") - pipeline.save_pretrained(folder, safe_serialization=True, variant="fp16") - - return folder - - -def previous_pr(api: "HfApi", model_id: str, pr_title: str) -> Optional["Discussion"]: - try: - discussions = api.get_repo_discussions(repo_id=model_id) - except Exception: - return None - for discussion in discussions: - if discussion.status == "open" and discussion.is_pull_request and discussion.title == pr_title: - details = api.get_discussion_details(repo_id=model_id, discussion_num=discussion.num) - if details.target_branch == "refs/heads/main": - return discussion - - -def convert(token: str, model_id: str, filename: str, model_type: str, sample_size: int = 512, scheduler_type: str = "pndm", extract_ema: bool = True): - api = HfApi() - - pr_title = "Adding `diffusers` weights of this model" - - with TemporaryDirectory() as d: - folder = os.path.join(d, repo_folder_name(repo_id=model_id, repo_type="models")) - os.makedirs(folder) - new_pr = None - try: - folder = convert_single(model_id, filename, model_type, sample_size, scheduler_type, extract_ema, folder) - new_pr = api.upload_folder(folder_path=folder, path_in_repo="./", repo_id=model_id, repo_type="model", token=token, create_pr=True) - print(f"Pr created at {new_pr}") - finally: - shutil.rmtree(folder) - - return new_pr diff --git a/spaces/pknez/face-swap-docker/clip/clipseg.py b/spaces/pknez/face-swap-docker/clip/clipseg.py deleted file mode 100644 index 6adc7e4893cbb2bff31eb822dacf96a7c9a87e27..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/clip/clipseg.py +++ /dev/null @@ -1,538 +0,0 @@ -import math -from os.path import basename, dirname, join, isfile -import torch -from torch import nn -from torch.nn import functional as nnf -from torch.nn.modules.activation import ReLU - - -def get_prompt_list(prompt): - if prompt == 'plain': - return ['{}'] - elif prompt == 'fixed': - return ['a photo of a {}.'] - elif prompt == 'shuffle': - return ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.'] - elif prompt == 'shuffle+': - return ['a photo of a {}.', 'a photograph of a {}.', 'an image of a {}.', '{}.', - 'a cropped photo of a {}.', 'a good photo of a {}.', 'a photo of one {}.', - 'a bad photo of a {}.', 'a photo of the {}.'] - else: - raise ValueError('Invalid value for prompt') - - -def forward_multihead_attention(x, b, with_aff=False, attn_mask=None): - """ - Simplified version of multihead attention (taken from torch source code but without tons of if clauses). - The mlp and layer norm come from CLIP. - x: input. - b: multihead attention module. - """ - - x_ = b.ln_1(x) - q, k, v = nnf.linear(x_, b.attn.in_proj_weight, b.attn.in_proj_bias).chunk(3, dim=-1) - tgt_len, bsz, embed_dim = q.size() - - head_dim = embed_dim // b.attn.num_heads - scaling = float(head_dim) ** -0.5 - - q = q.contiguous().view(tgt_len, bsz * b.attn.num_heads, b.attn.head_dim).transpose(0, 1) - k = k.contiguous().view(-1, bsz * b.attn.num_heads, b.attn.head_dim).transpose(0, 1) - v = v.contiguous().view(-1, bsz * b.attn.num_heads, b.attn.head_dim).transpose(0, 1) - - q = q * scaling - - attn_output_weights = torch.bmm(q, k.transpose(1, 2)) # n_heads * batch_size, tokens^2, tokens^2 - if attn_mask is not None: - - - attn_mask_type, attn_mask = attn_mask - n_heads = attn_output_weights.size(0) // attn_mask.size(0) - attn_mask = attn_mask.repeat(n_heads, 1) - - if attn_mask_type == 'cls_token': - # the mask only affects similarities compared to the readout-token. - attn_output_weights[:, 0, 1:] = attn_output_weights[:, 0, 1:] * attn_mask[None,...] - # attn_output_weights[:, 0, 0] = 0*attn_output_weights[:, 0, 0] - - if attn_mask_type == 'all': - # print(attn_output_weights.shape, attn_mask[:, None].shape) - attn_output_weights[:, 1:, 1:] = attn_output_weights[:, 1:, 1:] * attn_mask[:, None] - - - attn_output_weights = torch.softmax(attn_output_weights, dim=-1) - - attn_output = torch.bmm(attn_output_weights, v) - attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) - attn_output = b.attn.out_proj(attn_output) - - x = x + attn_output - x = x + b.mlp(b.ln_2(x)) - - if with_aff: - return x, attn_output_weights - else: - return x - - -class CLIPDenseBase(nn.Module): - - def __init__(self, version, reduce_cond, reduce_dim, prompt, n_tokens): - super().__init__() - - import clip - - # prec = torch.FloatTensor - self.clip_model, _ = clip.load(version, device='cpu', jit=False) - self.model = self.clip_model.visual - - # if not None, scale conv weights such that we obtain n_tokens. - self.n_tokens = n_tokens - - for p in self.clip_model.parameters(): - p.requires_grad_(False) - - # conditional - if reduce_cond is not None: - self.reduce_cond = nn.Linear(512, reduce_cond) - for p in self.reduce_cond.parameters(): - p.requires_grad_(False) - else: - self.reduce_cond = None - - self.film_mul = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim) - self.film_add = nn.Linear(512 if reduce_cond is None else reduce_cond, reduce_dim) - - self.reduce = nn.Linear(768, reduce_dim) - - self.prompt_list = get_prompt_list(prompt) - - # precomputed prompts - import pickle - if isfile('precomputed_prompt_vectors.pickle'): - precomp = pickle.load(open('precomputed_prompt_vectors.pickle', 'rb')) - self.precomputed_prompts = {k: torch.from_numpy(v) for k, v in precomp.items()} - else: - self.precomputed_prompts = dict() - - def rescaled_pos_emb(self, new_size): - assert len(new_size) == 2 - - a = self.model.positional_embedding[1:].T.view(1, 768, *self.token_shape) - b = nnf.interpolate(a, new_size, mode='bicubic', align_corners=False).squeeze(0).view(768, new_size[0]*new_size[1]).T - return torch.cat([self.model.positional_embedding[:1], b]) - - def visual_forward(self, x_inp, extract_layers=(), skip=False, mask=None): - - - with torch.no_grad(): - - inp_size = x_inp.shape[2:] - - if self.n_tokens is not None: - stride2 = x_inp.shape[2] // self.n_tokens - conv_weight2 = nnf.interpolate(self.model.conv1.weight, (stride2, stride2), mode='bilinear', align_corners=True) - x = nnf.conv2d(x_inp, conv_weight2, bias=self.model.conv1.bias, stride=stride2, dilation=self.model.conv1.dilation) - else: - x = self.model.conv1(x_inp) # shape = [*, width, grid, grid] - - x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] - x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] - - x = torch.cat([self.model.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] - - standard_n_tokens = 50 if self.model.conv1.kernel_size[0] == 32 else 197 - - if x.shape[1] != standard_n_tokens: - new_shape = int(math.sqrt(x.shape[1]-1)) - x = x + self.rescaled_pos_emb((new_shape, new_shape)).to(x.dtype)[None,:,:] - else: - x = x + self.model.positional_embedding.to(x.dtype) - - x = self.model.ln_pre(x) - - x = x.permute(1, 0, 2) # NLD -> LND - - activations, affinities = [], [] - for i, res_block in enumerate(self.model.transformer.resblocks): - - if mask is not None: - mask_layer, mask_type, mask_tensor = mask - if mask_layer == i or mask_layer == 'all': - # import ipdb; ipdb.set_trace() - size = int(math.sqrt(x.shape[0] - 1)) - - attn_mask = (mask_type, nnf.interpolate(mask_tensor.unsqueeze(1).float(), (size, size)).view(mask_tensor.shape[0], size * size)) - - else: - attn_mask = None - else: - attn_mask = None - - x, aff_per_head = forward_multihead_attention(x, res_block, with_aff=True, attn_mask=attn_mask) - - if i in extract_layers: - affinities += [aff_per_head] - - #if self.n_tokens is not None: - # activations += [nnf.interpolate(x, inp_size, mode='bilinear', align_corners=True)] - #else: - activations += [x] - - if len(extract_layers) > 0 and i == max(extract_layers) and skip: - print('early skip') - break - - x = x.permute(1, 0, 2) # LND -> NLD - x = self.model.ln_post(x[:, 0, :]) - - if self.model.proj is not None: - x = x @ self.model.proj - - return x, activations, affinities - - def sample_prompts(self, words, prompt_list=None): - - prompt_list = prompt_list if prompt_list is not None else self.prompt_list - - prompt_indices = torch.multinomial(torch.ones(len(prompt_list)), len(words), replacement=True) - prompts = [prompt_list[i] for i in prompt_indices] - return [promt.format(w) for promt, w in zip(prompts, words)] - - def get_cond_vec(self, conditional, batch_size): - # compute conditional from a single string - if conditional is not None and type(conditional) == str: - cond = self.compute_conditional(conditional) - cond = cond.repeat(batch_size, 1) - - # compute conditional from string list/tuple - elif conditional is not None and type(conditional) in {list, tuple} and type(conditional[0]) == str: - assert len(conditional) == batch_size - cond = self.compute_conditional(conditional) - - # use conditional directly - elif conditional is not None and type(conditional) == torch.Tensor and conditional.ndim == 2: - cond = conditional - - # compute conditional from image - elif conditional is not None and type(conditional) == torch.Tensor: - with torch.no_grad(): - cond, _, _ = self.visual_forward(conditional) - else: - raise ValueError('invalid conditional') - return cond - - def compute_conditional(self, conditional): - import clip - - dev = next(self.parameters()).device - - if type(conditional) in {list, tuple}: - text_tokens = clip.tokenize(conditional).to(dev) - cond = self.clip_model.encode_text(text_tokens) - else: - if conditional in self.precomputed_prompts: - cond = self.precomputed_prompts[conditional].float().to(dev) - else: - text_tokens = clip.tokenize([conditional]).to(dev) - cond = self.clip_model.encode_text(text_tokens)[0] - - if self.shift_vector is not None: - return cond + self.shift_vector - else: - return cond - - -def clip_load_untrained(version): - assert version == 'ViT-B/16' - from clip.model import CLIP - from clip.clip import _MODELS, _download - model = torch.jit.load(_download(_MODELS['ViT-B/16'])).eval() - state_dict = model.state_dict() - - vision_width = state_dict["visual.conv1.weight"].shape[0] - vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) - vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] - grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) - image_resolution = vision_patch_size * grid_size - embed_dim = state_dict["text_projection"].shape[1] - context_length = state_dict["positional_embedding"].shape[0] - vocab_size = state_dict["token_embedding.weight"].shape[0] - transformer_width = state_dict["ln_final.weight"].shape[0] - transformer_heads = transformer_width // 64 - transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks"))) - - return CLIP(embed_dim, image_resolution, vision_layers, vision_width, vision_patch_size, - context_length, vocab_size, transformer_width, transformer_heads, transformer_layers) - - -class CLIPDensePredT(CLIPDenseBase): - - def __init__(self, version='ViT-B/32', extract_layers=(3, 6, 9), cond_layer=0, reduce_dim=128, n_heads=4, prompt='fixed', - extra_blocks=0, reduce_cond=None, fix_shift=False, - learn_trans_conv_only=False, limit_to_clip_only=False, upsample=False, - add_calibration=False, rev_activations=False, trans_conv=None, n_tokens=None, complex_trans_conv=False): - - super().__init__(version, reduce_cond, reduce_dim, prompt, n_tokens) - # device = 'cpu' - - self.extract_layers = extract_layers - self.cond_layer = cond_layer - self.limit_to_clip_only = limit_to_clip_only - self.process_cond = None - self.rev_activations = rev_activations - - depth = len(extract_layers) - - if add_calibration: - self.calibration_conds = 1 - - self.upsample_proj = nn.Conv2d(reduce_dim, 1, kernel_size=1) if upsample else None - - self.add_activation1 = True - - self.version = version - - self.token_shape = {'ViT-B/32': (7, 7), 'ViT-B/16': (14, 14)}[version] - - if fix_shift: - # self.shift_vector = nn.Parameter(torch.load(join(dirname(basename(__file__)), 'clip_text_shift_vector.pth')), requires_grad=False) - self.shift_vector = nn.Parameter(torch.load(join(dirname(basename(__file__)), 'shift_text_to_vis.pth')), requires_grad=False) - # self.shift_vector = nn.Parameter(-1*torch.load(join(dirname(basename(__file__)), 'shift2.pth')), requires_grad=False) - else: - self.shift_vector = None - - if trans_conv is None: - trans_conv_ks = {'ViT-B/32': (32, 32), 'ViT-B/16': (16, 16)}[version] - else: - # explicitly define transposed conv kernel size - trans_conv_ks = (trans_conv, trans_conv) - - if not complex_trans_conv: - self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks) - else: - assert trans_conv_ks[0] == trans_conv_ks[1] - - tp_kernels = (trans_conv_ks[0] // 4, trans_conv_ks[0] // 4) - - self.trans_conv = nn.Sequential( - nn.Conv2d(reduce_dim, reduce_dim, kernel_size=3, padding=1), - nn.ReLU(), - nn.ConvTranspose2d(reduce_dim, reduce_dim // 2, kernel_size=tp_kernels[0], stride=tp_kernels[0]), - nn.ReLU(), - nn.ConvTranspose2d(reduce_dim // 2, 1, kernel_size=tp_kernels[1], stride=tp_kernels[1]), - ) - -# self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks) - - assert len(self.extract_layers) == depth - - self.reduces = nn.ModuleList([nn.Linear(768, reduce_dim) for _ in range(depth)]) - self.blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(len(self.extract_layers))]) - self.extra_blocks = nn.ModuleList([nn.TransformerEncoderLayer(d_model=reduce_dim, nhead=n_heads) for _ in range(extra_blocks)]) - - # refinement and trans conv - - if learn_trans_conv_only: - for p in self.parameters(): - p.requires_grad_(False) - - for p in self.trans_conv.parameters(): - p.requires_grad_(True) - - self.prompt_list = get_prompt_list(prompt) - - - def forward(self, inp_image, conditional=None, return_features=False, mask=None): - - assert type(return_features) == bool - - inp_image = inp_image.to(self.model.positional_embedding.device) - - if mask is not None: - raise ValueError('mask not supported') - - # x_inp = normalize(inp_image) - x_inp = inp_image - - bs, dev = inp_image.shape[0], x_inp.device - - cond = self.get_cond_vec(conditional, bs) - - visual_q, activations, _ = self.visual_forward(x_inp, extract_layers=[0] + list(self.extract_layers)) - - activation1 = activations[0] - activations = activations[1:] - - _activations = activations[::-1] if not self.rev_activations else activations - - a = None - for i, (activation, block, reduce) in enumerate(zip(_activations, self.blocks, self.reduces)): - - if a is not None: - a = reduce(activation) + a - else: - a = reduce(activation) - - if i == self.cond_layer: - if self.reduce_cond is not None: - cond = self.reduce_cond(cond) - - a = self.film_mul(cond) * a + self.film_add(cond) - - a = block(a) - - for block in self.extra_blocks: - a = a + block(a) - - a = a[1:].permute(1, 2, 0) # rm cls token and -> BS, Feats, Tokens - - size = int(math.sqrt(a.shape[2])) - - a = a.view(bs, a.shape[1], size, size) - - a = self.trans_conv(a) - - if self.n_tokens is not None: - a = nnf.interpolate(a, x_inp.shape[2:], mode='bilinear', align_corners=True) - - if self.upsample_proj is not None: - a = self.upsample_proj(a) - a = nnf.interpolate(a, x_inp.shape[2:], mode='bilinear') - - if return_features: - return a, visual_q, cond, [activation1] + activations - else: - return a, - - - -class CLIPDensePredTMasked(CLIPDensePredT): - - def __init__(self, version='ViT-B/32', extract_layers=(3, 6, 9), cond_layer=0, reduce_dim=128, n_heads=4, - prompt='fixed', extra_blocks=0, reduce_cond=None, fix_shift=False, learn_trans_conv_only=False, - refine=None, limit_to_clip_only=False, upsample=False, add_calibration=False, n_tokens=None): - - super().__init__(version=version, extract_layers=extract_layers, cond_layer=cond_layer, reduce_dim=reduce_dim, - n_heads=n_heads, prompt=prompt, extra_blocks=extra_blocks, reduce_cond=reduce_cond, - fix_shift=fix_shift, learn_trans_conv_only=learn_trans_conv_only, - limit_to_clip_only=limit_to_clip_only, upsample=upsample, add_calibration=add_calibration, - n_tokens=n_tokens) - - def visual_forward_masked(self, img_s, seg_s): - return super().visual_forward(img_s, mask=('all', 'cls_token', seg_s)) - - def forward(self, img_q, cond_or_img_s, seg_s=None, return_features=False): - - if seg_s is None: - cond = cond_or_img_s - else: - img_s = cond_or_img_s - - with torch.no_grad(): - cond, _, _ = self.visual_forward_masked(img_s, seg_s) - - return super().forward(img_q, cond, return_features=return_features) - - - -class CLIPDenseBaseline(CLIPDenseBase): - - def __init__(self, version='ViT-B/32', cond_layer=0, - extract_layer=9, reduce_dim=128, reduce2_dim=None, prompt='fixed', - reduce_cond=None, limit_to_clip_only=False, n_tokens=None): - - super().__init__(version, reduce_cond, reduce_dim, prompt, n_tokens) - device = 'cpu' - - # self.cond_layer = cond_layer - self.extract_layer = extract_layer - self.limit_to_clip_only = limit_to_clip_only - self.shift_vector = None - - self.token_shape = {'ViT-B/32': (7, 7), 'ViT-B/16': (14, 14)}[version] - - assert reduce2_dim is not None - - self.reduce2 = nn.Sequential( - nn.Linear(reduce_dim, reduce2_dim), - nn.ReLU(), - nn.Linear(reduce2_dim, reduce_dim) - ) - - trans_conv_ks = {'ViT-B/32': (32, 32), 'ViT-B/16': (16, 16)}[version] - self.trans_conv = nn.ConvTranspose2d(reduce_dim, 1, trans_conv_ks, stride=trans_conv_ks) - - - def forward(self, inp_image, conditional=None, return_features=False): - - inp_image = inp_image.to(self.model.positional_embedding.device) - - # x_inp = normalize(inp_image) - x_inp = inp_image - - bs, dev = inp_image.shape[0], x_inp.device - - cond = self.get_cond_vec(conditional, bs) - - visual_q, activations, affinities = self.visual_forward(x_inp, extract_layers=[self.extract_layer]) - - a = activations[0] - a = self.reduce(a) - a = self.film_mul(cond) * a + self.film_add(cond) - - if self.reduce2 is not None: - a = self.reduce2(a) - - # the original model would execute a transformer block here - - a = a[1:].permute(1, 2, 0) # rm cls token and -> BS, Feats, Tokens - - size = int(math.sqrt(a.shape[2])) - - a = a.view(bs, a.shape[1], size, size) - a = self.trans_conv(a) - - if return_features: - return a, visual_q, cond, activations - else: - return a, - - -class CLIPSegMultiLabel(nn.Module): - - def __init__(self, model) -> None: - super().__init__() - - from third_party.JoEm.data_loader import get_seen_idx, get_unseen_idx, VOC - - self.pascal_classes = VOC - - from clip.clipseg import CLIPDensePredT - from general_utils import load_model - # self.clipseg = load_model('rd64-vit16-neg0.2-phrasecut', strict=False) - self.clipseg = load_model(model, strict=False) - - self.clipseg.eval() - - def forward(self, x): - - bs = x.shape[0] - out = torch.ones(21, bs, 352, 352).to(x.device) * -10 - - for class_id, class_name in enumerate(self.pascal_classes): - - fac = 3 if class_name == 'background' else 1 - - with torch.no_grad(): - pred = torch.sigmoid(self.clipseg(x, class_name)[0][:,0]) * fac - - out[class_id] += pred - - - out = out.permute(1, 0, 2, 3) - - return out - - # construct output tensor - diff --git a/spaces/postbot/autocomplete-emails/app.py b/spaces/postbot/autocomplete-emails/app.py deleted file mode 100644 index a864d1b68ae659a10ff4ff811e16076b34506c30..0000000000000000000000000000000000000000 --- a/spaces/postbot/autocomplete-emails/app.py +++ /dev/null @@ -1,295 +0,0 @@ -import argparse -import pprint as pp -import logging -import time -import gradio as gr -import torch -from transformers import pipeline - -from utils import make_mailto_form, postprocess, clear, make_email_link - -logging.basicConfig( - level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" -) - -use_gpu = torch.cuda.is_available() - - -def generate_text( - prompt: str, - gen_length=64, - penalty_alpha=0.6, - top_k=6, - length_penalty=1.0, - # perma params (not set by user) - abs_max_length=512, - verbose=False, -): - """ - generate_text - generate text using the text generation pipeline - - :param str prompt: the prompt to use for the text generation pipeline - :param int gen_length: the number of tokens to generate - :param float penalty_alpha: the penalty alpha for the text generation pipeline (contrastive search) - :param int top_k: the top k for the text generation pipeline (contrastive search) - :param int abs_max_length: the absolute max length for the text generation pipeline - :param bool verbose: verbose output - :return str: the generated text - """ - global generator - if verbose: - logging.info(f"Generating text from prompt:\n\n{prompt}") - logging.info( - pp.pformat( - f"params:\tmax_length={gen_length}, penalty_alpha={penalty_alpha}, top_k={top_k}, length_penalty={length_penalty}" - ) - ) - st = time.perf_counter() - - input_tokens = generator.tokenizer(prompt) - input_len = len(input_tokens["input_ids"]) - if input_len > abs_max_length: - logging.info(f"Input too long {input_len} > {abs_max_length}, may cause errors") - result = generator( - prompt, - max_length=gen_length + input_len, # old API for generation - min_length=input_len + 4, - penalty_alpha=penalty_alpha, - top_k=top_k, - length_penalty=length_penalty, - ) # generate - response = result[0]["generated_text"] - rt = time.perf_counter() - st - if verbose: - logging.info(f"Generated text: {response}") - rt_string = f"Generation time: {rt:.2f}s" - logging.info(rt_string) - - formatted_email = postprocess(response) - return make_mailto_form(body=formatted_email), formatted_email - - -def load_emailgen_model(model_tag: str): - """ - load_emailgen_model - load a text generation pipeline for email generation - - Args: - model_tag (str): the huggingface model tag to load - - Returns: - transformers.pipelines.TextGenerationPipeline: the text generation pipeline - """ - global generator - generator = pipeline( - "text-generation", - model_tag, - device=0 if use_gpu else -1, - ) - - -def get_parser(): - """ - get_parser - a helper function for the argparse module - """ - parser = argparse.ArgumentParser( - description="Text Generation demo for postbot", - ) - - parser.add_argument( - "-m", - "--model", - required=False, - type=str, - default="postbot/distilgpt2-emailgen-V2", - help="Pass an different huggingface model tag to use a custom model", - ) - parser.add_argument( - "-l", - "--max_length", - required=False, - type=int, - default=40, - help="default max length of the generated text", - ) - parser.add_argument( - "-a", - "--penalty_alpha", - type=float, - default=0.6, - help="The penalty alpha for the text generation pipeline (contrastive search) - default 0.6", - ) - - parser.add_argument( - "-k", - "--top_k", - type=int, - default=6, - help="The top k for the text generation pipeline (contrastive search) - default 6", - ) - parser.add_argument( - "-v", - "--verbose", - required=False, - action="store_true", - help="Verbose output", - ) - return parser - - -default_prompt = """ -Hello, - -Following up on last week's bubblegum shipment, I""" - -available_models = [ - "postbot/distilgpt2-emailgen-V2", - "postbot/distilgpt2-emailgen", - "postbot/gpt2-medium-emailgen", - "postbot/pythia-160m-hq-emails", -] - -if __name__ == "__main__": - - logging.info("\n\n\nStarting new instance of app.py") - args = get_parser().parse_args() - logging.info(f"received args:\t{args}") - model_tag = args.model - verbose = args.verbose - max_length = args.max_length - top_k = args.top_k - alpha = args.penalty_alpha - - assert top_k > 0, "top_k must be greater than 0" - assert alpha >= 0.0 and alpha <= 1.0, "penalty_alpha must be between 0 and 1" - - logging.info(f"Loading model: {model_tag}, use GPU = {use_gpu}") - generator = pipeline( - "text-generation", - model_tag, - device=0 if use_gpu else -1, - ) - - demo = gr.Blocks() - - logging.info("launching interface...") - - with demo: - gr.Markdown("# Auto-Complete Emails - Demo") - gr.Markdown( - "Enter part of an email, and a text-gen model will complete it! See details below. " - ) - gr.Markdown("---") - - with gr.Column(): - - gr.Markdown("## Generate Text") - gr.Markdown("Edit the prompt and parameters and press **Generate**!") - prompt_text = gr.Textbox( - lines=4, - label="Email Prompt", - value=default_prompt, - ) - - with gr.Row(): - clear_button = gr.Button( - value="Clear Prompt", - ) - num_gen_tokens = gr.Slider( - label="Generation Tokens", - value=max_length, - maximum=96, - minimum=16, - step=8, - ) - - generate_button = gr.Button( - value="Generate!", - variant="primary", - ) - gr.Markdown("---") - gr.Markdown("### Results") - # put a large HTML placeholder here - generated_email = gr.Textbox( - label="Generated Text", - placeholder="This is where the generated text will appear", - interactive=False, - ) - email_mailto_button = gr.HTML( - "a clickable email button will appear here" - ) - - gr.Markdown("---") - gr.Markdown("## Advanced Options") - gr.Markdown( - "This demo generates text via the new [contrastive search](https://huggingface.co/blog/introducing-csearch). See the csearch blog post for details on the parameters or [here](https://huggingface.co/blog/how-to-generate), for general decoding." - ) - with gr.Row(): - model_name = gr.Dropdown( - choices=available_models, - label="Choose a model", - value=model_tag, - ) - load_model_button = gr.Button( - "Load Model", - variant="secondary", - ) - with gr.Row(): - contrastive_top_k = gr.Radio( - choices=[2, 4, 6, 8], - label="Top K", - value=top_k, - ) - - penalty_alpha = gr.Slider( - label="Penalty Alpha", - value=alpha, - maximum=1.0, - minimum=0.0, - step=0.1, - ) - length_penalty = gr.Slider( - minimum=0.5, - maximum=1.0, - label="Length Penalty", - value=1.0, - step=0.1, - ) - gr.Markdown("---") - - with gr.Column(): - - gr.Markdown("## About") - gr.Markdown( - "[This model](https://huggingface.co/postbot/distilgpt2-emailgen) is a fine-tuned version of distilgpt2 on a dataset of 100k emails sourced from the internet, including the classic `aeslc` dataset.\n\nCheck out the model card for details on notebook & command line usage." - ) - gr.Markdown( - "The intended use of this model is to provide suggestions to _auto-complete_ the rest of your email. Said another way, it should serve as a **tool to write predictable emails faster**. It is not intended to write entire emails from scratch; at least **some input** is required to guide the direction of the model.\n\nPlease verify any suggestions by the model for A) False claims and B) negation statements **before** accepting/sending something." - ) - gr.Markdown("---") - - clear_button.click( - fn=clear, - inputs=[prompt_text], - outputs=[prompt_text], - ) - generate_button.click( - fn=generate_text, - inputs=[ - prompt_text, - num_gen_tokens, - penalty_alpha, - contrastive_top_k, - length_penalty, - ], - outputs=[email_mailto_button, generated_email], - ) - - load_model_button.click( - fn=load_emailgen_model, - inputs=[model_name], - outputs=[], - ) - demo.launch( - enable_queue=True, - share=True, # for local testing - ) diff --git a/spaces/pourmand1376/Seamlessm4t_diarization_VAD/README.md b/spaces/pourmand1376/Seamlessm4t_diarization_VAD/README.md deleted file mode 100644 index a5dcce3ab7c8ebee0395e83e9a6cd0ab76e91fcd..0000000000000000000000000000000000000000 --- a/spaces/pourmand1376/Seamlessm4t_diarization_VAD/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Seamlessm4t Diarization VAD -emoji: 📉 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.43.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/pragneshbarik/ikigai-chat/mistral7b.py b/spaces/pragneshbarik/ikigai-chat/mistral7b.py deleted file mode 100644 index a10b6c7c2ddf62fe81c88e447526d75c67143b9f..0000000000000000000000000000000000000000 --- a/spaces/pragneshbarik/ikigai-chat/mistral7b.py +++ /dev/null @@ -1,50 +0,0 @@ -from huggingface_hub import InferenceClient -import os -from dotenv import load_dotenv -load_dotenv() - -API_TOKEN = os.getenv('HF_TOKEN') - - -def format_prompt(message, history): - prompt = "" - for user_prompt, bot_response in history: - prompt += f"[INST] {user_prompt} [/INST]" - prompt += f" {bot_response} " - prompt += f"[INST] {message} [/INST]" - return prompt - -def chat( - prompt, history, chat_client = "mistralai/Mistral-7B-Instruct-v0.1",temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, -): - client = InferenceClient( - chat_client, - token=API_TOKEN - ) - temperature = float(temperature) - if temperature < 1e-2: - temperature = 1e-2 - top_p = float(top_p) - - generate_kwargs = dict( - temperature=temperature, - max_new_tokens=max_new_tokens, - top_p=top_p, - repetition_penalty=repetition_penalty, - do_sample=True, - seed=42, - ) - - formatted_prompt = format_prompt(prompt, history) - - stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) - output = "" - - # for response in stream: - # # print(response) - # output += response.token["text"] - # yield output - # return output - - - return stream diff --git a/spaces/pritamdeka/pubmed-abstract-retriever/README.md b/spaces/pritamdeka/pubmed-abstract-retriever/README.md deleted file mode 100644 index e09746392f963198184fdab41111dca78a6f8751..0000000000000000000000000000000000000000 --- a/spaces/pritamdeka/pubmed-abstract-retriever/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: PubMed Abstract Retriever -emoji: 📚​🔎​📄​ -colorFrom: red -colorTo: green -sdk: gradio -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/attr/_funcs.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/attr/_funcs.py deleted file mode 100644 index 7f5d9610f3cf0010a9185579f7188df5ff609384..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/attr/_funcs.py +++ /dev/null @@ -1,477 +0,0 @@ -# SPDX-License-Identifier: MIT - - -import copy - -from ._compat import PY_3_9_PLUS, get_generic_base -from ._make import NOTHING, _obj_setattr, fields -from .exceptions import AttrsAttributeNotFoundError - - -def asdict( - inst, - recurse=True, - filter=None, - dict_factory=dict, - retain_collection_types=False, - value_serializer=None, -): - """ - Return the *attrs* attribute values of *inst* as a dict. - - Optionally recurse into other *attrs*-decorated classes. - - :param inst: Instance of an *attrs*-decorated class. - :param bool recurse: Recurse into classes that are also - *attrs*-decorated. - :param callable filter: A callable whose return code determines whether an - attribute or element is included (``True``) or dropped (``False``). Is - called with the `attrs.Attribute` as the first argument and the - value as the second argument. - :param callable dict_factory: A callable to produce dictionaries from. For - example, to produce ordered dictionaries instead of normal Python - dictionaries, pass in ``collections.OrderedDict``. - :param bool retain_collection_types: Do not convert to ``list`` when - encountering an attribute whose type is ``tuple`` or ``set``. Only - meaningful if ``recurse`` is ``True``. - :param Optional[callable] value_serializer: A hook that is called for every - attribute or dict key/value. It receives the current instance, field - and value and must return the (updated) value. The hook is run *after* - the optional *filter* has been applied. - - :rtype: return type of *dict_factory* - - :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* - class. - - .. versionadded:: 16.0.0 *dict_factory* - .. versionadded:: 16.1.0 *retain_collection_types* - .. versionadded:: 20.3.0 *value_serializer* - .. versionadded:: 21.3.0 If a dict has a collection for a key, it is - serialized as a tuple. - """ - attrs = fields(inst.__class__) - rv = dict_factory() - for a in attrs: - v = getattr(inst, a.name) - if filter is not None and not filter(a, v): - continue - - if value_serializer is not None: - v = value_serializer(inst, a, v) - - if recurse is True: - if has(v.__class__): - rv[a.name] = asdict( - v, - recurse=True, - filter=filter, - dict_factory=dict_factory, - retain_collection_types=retain_collection_types, - value_serializer=value_serializer, - ) - elif isinstance(v, (tuple, list, set, frozenset)): - cf = v.__class__ if retain_collection_types is True else list - rv[a.name] = cf( - [ - _asdict_anything( - i, - is_key=False, - filter=filter, - dict_factory=dict_factory, - retain_collection_types=retain_collection_types, - value_serializer=value_serializer, - ) - for i in v - ] - ) - elif isinstance(v, dict): - df = dict_factory - rv[a.name] = df( - ( - _asdict_anything( - kk, - is_key=True, - filter=filter, - dict_factory=df, - retain_collection_types=retain_collection_types, - value_serializer=value_serializer, - ), - _asdict_anything( - vv, - is_key=False, - filter=filter, - dict_factory=df, - retain_collection_types=retain_collection_types, - value_serializer=value_serializer, - ), - ) - for kk, vv in v.items() - ) - else: - rv[a.name] = v - else: - rv[a.name] = v - return rv - - -def _asdict_anything( - val, - is_key, - filter, - dict_factory, - retain_collection_types, - value_serializer, -): - """ - ``asdict`` only works on attrs instances, this works on anything. - """ - if getattr(val.__class__, "__attrs_attrs__", None) is not None: - # Attrs class. - rv = asdict( - val, - recurse=True, - filter=filter, - dict_factory=dict_factory, - retain_collection_types=retain_collection_types, - value_serializer=value_serializer, - ) - elif isinstance(val, (tuple, list, set, frozenset)): - if retain_collection_types is True: - cf = val.__class__ - elif is_key: - cf = tuple - else: - cf = list - - rv = cf( - [ - _asdict_anything( - i, - is_key=False, - filter=filter, - dict_factory=dict_factory, - retain_collection_types=retain_collection_types, - value_serializer=value_serializer, - ) - for i in val - ] - ) - elif isinstance(val, dict): - df = dict_factory - rv = df( - ( - _asdict_anything( - kk, - is_key=True, - filter=filter, - dict_factory=df, - retain_collection_types=retain_collection_types, - value_serializer=value_serializer, - ), - _asdict_anything( - vv, - is_key=False, - filter=filter, - dict_factory=df, - retain_collection_types=retain_collection_types, - value_serializer=value_serializer, - ), - ) - for kk, vv in val.items() - ) - else: - rv = val - if value_serializer is not None: - rv = value_serializer(None, None, rv) - - return rv - - -def astuple( - inst, - recurse=True, - filter=None, - tuple_factory=tuple, - retain_collection_types=False, -): - """ - Return the *attrs* attribute values of *inst* as a tuple. - - Optionally recurse into other *attrs*-decorated classes. - - :param inst: Instance of an *attrs*-decorated class. - :param bool recurse: Recurse into classes that are also - *attrs*-decorated. - :param callable filter: A callable whose return code determines whether an - attribute or element is included (``True``) or dropped (``False``). Is - called with the `attrs.Attribute` as the first argument and the - value as the second argument. - :param callable tuple_factory: A callable to produce tuples from. For - example, to produce lists instead of tuples. - :param bool retain_collection_types: Do not convert to ``list`` - or ``dict`` when encountering an attribute which type is - ``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is - ``True``. - - :rtype: return type of *tuple_factory* - - :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* - class. - - .. versionadded:: 16.2.0 - """ - attrs = fields(inst.__class__) - rv = [] - retain = retain_collection_types # Very long. :/ - for a in attrs: - v = getattr(inst, a.name) - if filter is not None and not filter(a, v): - continue - if recurse is True: - if has(v.__class__): - rv.append( - astuple( - v, - recurse=True, - filter=filter, - tuple_factory=tuple_factory, - retain_collection_types=retain, - ) - ) - elif isinstance(v, (tuple, list, set, frozenset)): - cf = v.__class__ if retain is True else list - rv.append( - cf( - [ - astuple( - j, - recurse=True, - filter=filter, - tuple_factory=tuple_factory, - retain_collection_types=retain, - ) - if has(j.__class__) - else j - for j in v - ] - ) - ) - elif isinstance(v, dict): - df = v.__class__ if retain is True else dict - rv.append( - df( - ( - astuple( - kk, - tuple_factory=tuple_factory, - retain_collection_types=retain, - ) - if has(kk.__class__) - else kk, - astuple( - vv, - tuple_factory=tuple_factory, - retain_collection_types=retain, - ) - if has(vv.__class__) - else vv, - ) - for kk, vv in v.items() - ) - ) - else: - rv.append(v) - else: - rv.append(v) - - return rv if tuple_factory is list else tuple_factory(rv) - - -def has(cls): - """ - Check whether *cls* is a class with *attrs* attributes. - - :param type cls: Class to introspect. - :raise TypeError: If *cls* is not a class. - - :rtype: bool - """ - attrs = getattr(cls, "__attrs_attrs__", None) - if attrs is not None: - return True - - # No attrs, maybe it's a specialized generic (A[str])? - generic_base = get_generic_base(cls) - if generic_base is not None: - generic_attrs = getattr(generic_base, "__attrs_attrs__", None) - if generic_attrs is not None: - # Stick it on here for speed next time. - cls.__attrs_attrs__ = generic_attrs - return generic_attrs is not None - return False - - -def assoc(inst, **changes): - """ - Copy *inst* and apply *changes*. - - This is different from `evolve` that applies the changes to the arguments - that create the new instance. - - `evolve`'s behavior is preferable, but there are `edge cases`_ where it - doesn't work. Therefore `assoc` is deprecated, but will not be removed. - - .. _`edge cases`: https://github.com/python-attrs/attrs/issues/251 - - :param inst: Instance of a class with *attrs* attributes. - :param changes: Keyword changes in the new copy. - - :return: A copy of inst with *changes* incorporated. - - :raise attrs.exceptions.AttrsAttributeNotFoundError: If *attr_name* - couldn't be found on *cls*. - :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* - class. - - .. deprecated:: 17.1.0 - Use `attrs.evolve` instead if you can. - This function will not be removed du to the slightly different approach - compared to `attrs.evolve`. - """ - new = copy.copy(inst) - attrs = fields(inst.__class__) - for k, v in changes.items(): - a = getattr(attrs, k, NOTHING) - if a is NOTHING: - raise AttrsAttributeNotFoundError( - f"{k} is not an attrs attribute on {new.__class__}." - ) - _obj_setattr(new, k, v) - return new - - -def evolve(*args, **changes): - """ - Create a new instance, based on the first positional argument with - *changes* applied. - - :param inst: Instance of a class with *attrs* attributes. - :param changes: Keyword changes in the new copy. - - :return: A copy of inst with *changes* incorporated. - - :raise TypeError: If *attr_name* couldn't be found in the class - ``__init__``. - :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* - class. - - .. versionadded:: 17.1.0 - .. deprecated:: 23.1.0 - It is now deprecated to pass the instance using the keyword argument - *inst*. It will raise a warning until at least April 2024, after which - it will become an error. Always pass the instance as a positional - argument. - """ - # Try to get instance by positional argument first. - # Use changes otherwise and warn it'll break. - if args: - try: - (inst,) = args - except ValueError: - raise TypeError( - f"evolve() takes 1 positional argument, but {len(args)} " - "were given" - ) from None - else: - try: - inst = changes.pop("inst") - except KeyError: - raise TypeError( - "evolve() missing 1 required positional argument: 'inst'" - ) from None - - import warnings - - warnings.warn( - "Passing the instance per keyword argument is deprecated and " - "will stop working in, or after, April 2024.", - DeprecationWarning, - stacklevel=2, - ) - - cls = inst.__class__ - attrs = fields(cls) - for a in attrs: - if not a.init: - continue - attr_name = a.name # To deal with private attributes. - init_name = a.alias - if init_name not in changes: - changes[init_name] = getattr(inst, attr_name) - - return cls(**changes) - - -def resolve_types( - cls, globalns=None, localns=None, attribs=None, include_extras=True -): - """ - Resolve any strings and forward annotations in type annotations. - - This is only required if you need concrete types in `Attribute`'s *type* - field. In other words, you don't need to resolve your types if you only - use them for static type checking. - - With no arguments, names will be looked up in the module in which the class - was created. If this is not what you want, e.g. if the name only exists - inside a method, you may pass *globalns* or *localns* to specify other - dictionaries in which to look up these names. See the docs of - `typing.get_type_hints` for more details. - - :param type cls: Class to resolve. - :param Optional[dict] globalns: Dictionary containing global variables. - :param Optional[dict] localns: Dictionary containing local variables. - :param Optional[list] attribs: List of attribs for the given class. - This is necessary when calling from inside a ``field_transformer`` - since *cls* is not an *attrs* class yet. - :param bool include_extras: Resolve more accurately, if possible. - Pass ``include_extras`` to ``typing.get_hints``, if supported by the - typing module. On supported Python versions (3.9+), this resolves the - types more accurately. - - :raise TypeError: If *cls* is not a class. - :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs* - class and you didn't pass any attribs. - :raise NameError: If types cannot be resolved because of missing variables. - - :returns: *cls* so you can use this function also as a class decorator. - Please note that you have to apply it **after** `attrs.define`. That - means the decorator has to come in the line **before** `attrs.define`. - - .. versionadded:: 20.1.0 - .. versionadded:: 21.1.0 *attribs* - .. versionadded:: 23.1.0 *include_extras* - - """ - # Since calling get_type_hints is expensive we cache whether we've - # done it already. - if getattr(cls, "__attrs_types_resolved__", None) != cls: - import typing - - kwargs = {"globalns": globalns, "localns": localns} - - if PY_3_9_PLUS: - kwargs["include_extras"] = include_extras - - hints = typing.get_type_hints(cls, **kwargs) - for field in fields(cls) if attribs is None else attribs: - if field.name in hints: - # Since fields have been frozen we must work around it. - _obj_setattr(field, "type", hints[field.name]) - # We store the class we resolved so that subclasses know they haven't - # been resolved. - cls.__attrs_types_resolved__ = cls - - # Return the class so you can use it as a decorator too. - return cls diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/feaLib/location.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/feaLib/location.py deleted file mode 100644 index 50f761d2d2a13bd101a7db9c259fedc98eed52cf..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/feaLib/location.py +++ /dev/null @@ -1,12 +0,0 @@ -from typing import NamedTuple - - -class FeatureLibLocation(NamedTuple): - """A location in a feature file""" - - file: str - line: int - column: int - - def __str__(self): - return f"{self.file}:{self.line}:{self.column}" diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Copy-1b5c0932.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Copy-1b5c0932.js deleted file mode 100644 index 108fde315f01e1215f48ad491d10d95986677186..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Copy-1b5c0932.js +++ /dev/null @@ -1,2 +0,0 @@ -const{SvelteComponent:d,append:u,attr:e,detach:v,init:g,insert:w,noop:i,safe_not_equal:m,svg_element:c}=window.__gradio__svelte__internal;function x(s){let t,o;return{c(){t=c("svg"),o=c("polyline"),e(o,"points","20 6 9 17 4 12"),e(t,"xmlns","http://www.w3.org/2000/svg"),e(t,"width","15px"),e(t,"height","14px"),e(t,"viewBox","2 0 20 20"),e(t,"fill","none"),e(t,"stroke","currentColor"),e(t,"stroke-width","3"),e(t,"stroke-linecap","round"),e(t,"stroke-linejoin","round")},m(r,l){w(r,t,l),u(t,o)},p:i,i,o:i,d(r){r&&v(t)}}}class y extends d{constructor(t){super(),g(this,t,null,x,m,{})}}const{SvelteComponent:f,append:_,attr:n,detach:C,init:$,insert:k,noop:a,safe_not_equal:H,svg_element:p}=window.__gradio__svelte__internal;function q(s){let t,o,r;return{c(){t=p("svg"),o=p("path"),r=p("path"),n(o,"fill","currentColor"),n(o,"d","M28 10v18H10V10h18m0-2H10a2 2 0 0 0-2 2v18a2 2 0 0 0 2 2h18a2 2 0 0 0 2-2V10a2 2 0 0 0-2-2Z"),n(r,"fill","currentColor"),n(r,"d","M4 18H2V4a2 2 0 0 1 2-2h14v2H4Z"),n(t,"xmlns","http://www.w3.org/2000/svg"),n(t,"width","15px"),n(t,"height","14px"),n(t,"viewBox","0 0 33 33"),n(t,"color","currentColor")},m(l,h){k(l,t,h),_(t,o),_(t,r)},p:a,i:a,o:a,d(l){l&&C(t)}}}class S extends f{constructor(t){super(),$(this,t,null,q,H,{})}}export{S as C,y as a}; -//# sourceMappingURL=Copy-1b5c0932.js.map diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/markdown_it/rules_block/table.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/markdown_it/rules_block/table.py deleted file mode 100644 index 4b666c1d5d9ede544b27c38d7ffb5d850edac70b..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/markdown_it/rules_block/table.py +++ /dev/null @@ -1,236 +0,0 @@ -# GFM table, https://github.github.com/gfm/#tables-extension- -from __future__ import annotations - -import re - -from ..common.utils import charStrAt, isStrSpace -from .state_block import StateBlock - -headerLineRe = re.compile(r"^:?-+:?$") -enclosingPipesRe = re.compile(r"^\||\|$") - - -def getLine(state: StateBlock, line: int) -> str: - pos = state.bMarks[line] + state.tShift[line] - maximum = state.eMarks[line] - - # return state.src.substr(pos, max - pos) - return state.src[pos:maximum] - - -def escapedSplit(string: str) -> list[str]: - result: list[str] = [] - pos = 0 - max = len(string) - isEscaped = False - lastPos = 0 - current = "" - ch = charStrAt(string, pos) - - while pos < max: - if ch == "|": - if not isEscaped: - # pipe separating cells, '|' - result.append(current + string[lastPos:pos]) - current = "" - lastPos = pos + 1 - else: - # escaped pipe, '\|' - current += string[lastPos : pos - 1] - lastPos = pos - - isEscaped = ch == "\\" - pos += 1 - - ch = charStrAt(string, pos) - - result.append(current + string[lastPos:]) - - return result - - -def table(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: - tbodyLines = None - - # should have at least two lines - if startLine + 2 > endLine: - return False - - nextLine = startLine + 1 - - if state.sCount[nextLine] < state.blkIndent: - return False - - if state.is_code_block(nextLine): - return False - - # first character of the second line should be '|', '-', ':', - # and no other characters are allowed but spaces; - # basically, this is the equivalent of /^[-:|][-:|\s]*$/ regexp - - pos = state.bMarks[nextLine] + state.tShift[nextLine] - if pos >= state.eMarks[nextLine]: - return False - first_ch = state.src[pos] - pos += 1 - if first_ch not in ("|", "-", ":"): - return False - - if pos >= state.eMarks[nextLine]: - return False - second_ch = state.src[pos] - pos += 1 - if second_ch not in ("|", "-", ":") and not isStrSpace(second_ch): - return False - - # if first character is '-', then second character must not be a space - # (due to parsing ambiguity with list) - if first_ch == "-" and isStrSpace(second_ch): - return False - - while pos < state.eMarks[nextLine]: - ch = state.src[pos] - - if ch not in ("|", "-", ":") and not isStrSpace(ch): - return False - - pos += 1 - - lineText = getLine(state, startLine + 1) - - columns = lineText.split("|") - aligns = [] - for i in range(len(columns)): - t = columns[i].strip() - if not t: - # allow empty columns before and after table, but not in between columns; - # e.g. allow ` |---| `, disallow ` ---||--- ` - if i == 0 or i == len(columns) - 1: - continue - else: - return False - - if not headerLineRe.search(t): - return False - if charStrAt(t, len(t) - 1) == ":": - aligns.append("center" if charStrAt(t, 0) == ":" else "right") - elif charStrAt(t, 0) == ":": - aligns.append("left") - else: - aligns.append("") - - lineText = getLine(state, startLine).strip() - if "|" not in lineText: - return False - if state.is_code_block(startLine): - return False - columns = escapedSplit(lineText) - if columns and columns[0] == "": - columns.pop(0) - if columns and columns[-1] == "": - columns.pop() - - # header row will define an amount of columns in the entire table, - # and align row should be exactly the same (the rest of the rows can differ) - columnCount = len(columns) - if columnCount == 0 or columnCount != len(aligns): - return False - - if silent: - return True - - oldParentType = state.parentType - state.parentType = "table" - - # use 'blockquote' lists for termination because it's - # the most similar to tables - terminatorRules = state.md.block.ruler.getRules("blockquote") - - token = state.push("table_open", "table", 1) - token.map = tableLines = [startLine, 0] - - token = state.push("thead_open", "thead", 1) - token.map = [startLine, startLine + 1] - - token = state.push("tr_open", "tr", 1) - token.map = [startLine, startLine + 1] - - for i in range(len(columns)): - token = state.push("th_open", "th", 1) - if aligns[i]: - token.attrs = {"style": "text-align:" + aligns[i]} - - token = state.push("inline", "", 0) - # note in markdown-it this map was removed in v12.0.0 however, we keep it, - # since it is helpful to propagate to children tokens - token.map = [startLine, startLine + 1] - token.content = columns[i].strip() - token.children = [] - - token = state.push("th_close", "th", -1) - - token = state.push("tr_close", "tr", -1) - token = state.push("thead_close", "thead", -1) - - nextLine = startLine + 2 - while nextLine < endLine: - if state.sCount[nextLine] < state.blkIndent: - break - - terminate = False - for i in range(len(terminatorRules)): - if terminatorRules[i](state, nextLine, endLine, True): - terminate = True - break - - if terminate: - break - lineText = getLine(state, nextLine).strip() - if not lineText: - break - if state.is_code_block(nextLine): - break - columns = escapedSplit(lineText) - if columns and columns[0] == "": - columns.pop(0) - if columns and columns[-1] == "": - columns.pop() - - if nextLine == startLine + 2: - token = state.push("tbody_open", "tbody", 1) - token.map = tbodyLines = [startLine + 2, 0] - - token = state.push("tr_open", "tr", 1) - token.map = [nextLine, nextLine + 1] - - for i in range(columnCount): - token = state.push("td_open", "td", 1) - if aligns[i]: - token.attrs = {"style": "text-align:" + aligns[i]} - - token = state.push("inline", "", 0) - # note in markdown-it this map was removed in v12.0.0 however, we keep it, - # since it is helpful to propagate to children tokens - token.map = [nextLine, nextLine + 1] - try: - token.content = columns[i].strip() if columns[i] else "" - except IndexError: - token.content = "" - token.children = [] - - token = state.push("td_close", "td", -1) - - token = state.push("tr_close", "tr", -1) - - nextLine += 1 - - if tbodyLines: - token = state.push("tbody_close", "tbody", -1) - tbodyLines[1] = nextLine - - token = state.push("table_close", "table", -1) - - tableLines[1] = nextLine - state.parentType = oldParentType - state.line = nextLine - return True diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/line_endings.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/line_endings.py deleted file mode 100644 index 686e5ebd937fff16d5aa7f154d5c823ed17d9e0a..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/line_endings.py +++ /dev/null @@ -1,77 +0,0 @@ -""" Functions for converting from DOS to UNIX line endings - -""" -import os -import re -import sys - - -def dos2unix(file): - "Replace CRLF with LF in argument files. Print names of changed files." - if os.path.isdir(file): - print(file, "Directory!") - return - - with open(file, "rb") as fp: - data = fp.read() - if '\0' in data: - print(file, "Binary!") - return - - newdata = re.sub("\r\n", "\n", data) - if newdata != data: - print('dos2unix:', file) - with open(file, "wb") as f: - f.write(newdata) - return file - else: - print(file, 'ok') - -def dos2unix_one_dir(modified_files, dir_name, file_names): - for file in file_names: - full_path = os.path.join(dir_name, file) - file = dos2unix(full_path) - if file is not None: - modified_files.append(file) - -def dos2unix_dir(dir_name): - modified_files = [] - os.path.walk(dir_name, dos2unix_one_dir, modified_files) - return modified_files -#---------------------------------- - -def unix2dos(file): - "Replace LF with CRLF in argument files. Print names of changed files." - if os.path.isdir(file): - print(file, "Directory!") - return - - with open(file, "rb") as fp: - data = fp.read() - if '\0' in data: - print(file, "Binary!") - return - newdata = re.sub("\r\n", "\n", data) - newdata = re.sub("\n", "\r\n", newdata) - if newdata != data: - print('unix2dos:', file) - with open(file, "wb") as f: - f.write(newdata) - return file - else: - print(file, 'ok') - -def unix2dos_one_dir(modified_files, dir_name, file_names): - for file in file_names: - full_path = os.path.join(dir_name, file) - unix2dos(full_path) - if file is not None: - modified_files.append(file) - -def unix2dos_dir(dir_name): - modified_files = [] - os.path.walk(dir_name, unix2dos_one_dir, modified_files) - return modified_files - -if __name__ == "__main__": - dos2unix_dir(sys.argv[1]) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/tests/test_log.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/tests/test_log.py deleted file mode 100644 index 72fddf37370f1b5c81473a24c823a236f9f299bc..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/tests/test_log.py +++ /dev/null @@ -1,34 +0,0 @@ -import io -import re -from contextlib import redirect_stdout - -import pytest - -from numpy.distutils import log - - -def setup_module(): - f = io.StringIO() # changing verbosity also logs here, capture that - with redirect_stdout(f): - log.set_verbosity(2, force=True) # i.e. DEBUG - - -def teardown_module(): - log.set_verbosity(0, force=True) # the default - - -r_ansi = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") - - -@pytest.mark.parametrize("func_name", ["error", "warn", "info", "debug"]) -def test_log_prefix(func_name): - func = getattr(log, func_name) - msg = f"{func_name} message" - f = io.StringIO() - with redirect_stdout(f): - func(msg) - out = f.getvalue() - assert out # sanity check - clean_out = r_ansi.sub("", out) - line = next(line for line in clean_out.splitlines()) - assert line == f"{func_name.upper()}: {msg}" diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_fillna.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_fillna.py deleted file mode 100644 index 12a07bac25a59e8d3e3757898d4950a0f69804c3..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/period/methods/test_fillna.py +++ /dev/null @@ -1,41 +0,0 @@ -from pandas import ( - Index, - NaT, - Period, - PeriodIndex, -) -import pandas._testing as tm - - -class TestFillNA: - def test_fillna_period(self): - # GH#11343 - idx = PeriodIndex(["2011-01-01 09:00", NaT, "2011-01-01 11:00"], freq="H") - - exp = PeriodIndex( - ["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"], freq="H" - ) - result = idx.fillna(Period("2011-01-01 10:00", freq="H")) - tm.assert_index_equal(result, exp) - - exp = Index( - [ - Period("2011-01-01 09:00", freq="H"), - "x", - Period("2011-01-01 11:00", freq="H"), - ], - dtype=object, - ) - result = idx.fillna("x") - tm.assert_index_equal(result, exp) - - exp = Index( - [ - Period("2011-01-01 09:00", freq="H"), - Period("2011-01-01", freq="D"), - Period("2011-01-01 11:00", freq="H"), - ], - dtype=object, - ) - result = idx.fillna(Period("2011-01-01", freq="D")) - tm.assert_index_equal(result, exp) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/resolvelib/providers.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/resolvelib/providers.py deleted file mode 100644 index 7d0a9c22a4656951910a9fbb70af59a0706cadde..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/resolvelib/providers.py +++ /dev/null @@ -1,133 +0,0 @@ -class AbstractProvider(object): - """Delegate class to provide requirement interface for the resolver.""" - - def identify(self, requirement_or_candidate): - """Given a requirement, return an identifier for it. - - This is used to identify a requirement, e.g. whether two requirements - should have their specifier parts merged. - """ - raise NotImplementedError - - def get_preference( - self, - identifier, - resolutions, - candidates, - information, - backtrack_causes, - ): - """Produce a sort key for given requirement based on preference. - - The preference is defined as "I think this requirement should be - resolved first". The lower the return value is, the more preferred - this group of arguments is. - - :param identifier: An identifier as returned by ``identify()``. This - identifies the dependency matches of which should be returned. - :param resolutions: Mapping of candidates currently pinned by the - resolver. Each key is an identifier, and the value a candidate. - The candidate may conflict with requirements from ``information``. - :param candidates: Mapping of each dependency's possible candidates. - Each value is an iterator of candidates. - :param information: Mapping of requirement information of each package. - Each value is an iterator of *requirement information*. - :param backtrack_causes: Sequence of requirement information that were - the requirements that caused the resolver to most recently backtrack. - - A *requirement information* instance is a named tuple with two members: - - * ``requirement`` specifies a requirement contributing to the current - list of candidates. - * ``parent`` specifies the candidate that provides (dependend on) the - requirement, or ``None`` to indicate a root requirement. - - The preference could depend on a various of issues, including (not - necessarily in this order): - - * Is this package pinned in the current resolution result? - * How relaxed is the requirement? Stricter ones should probably be - worked on first? (I don't know, actually.) - * How many possibilities are there to satisfy this requirement? Those - with few left should likely be worked on first, I guess? - * Are there any known conflicts for this requirement? We should - probably work on those with the most known conflicts. - - A sortable value should be returned (this will be used as the ``key`` - parameter of the built-in sorting function). The smaller the value is, - the more preferred this requirement is (i.e. the sorting function - is called with ``reverse=False``). - """ - raise NotImplementedError - - def find_matches(self, identifier, requirements, incompatibilities): - """Find all possible candidates that satisfy given constraints. - - :param identifier: An identifier as returned by ``identify()``. This - identifies the dependency matches of which should be returned. - :param requirements: A mapping of requirements that all returned - candidates must satisfy. Each key is an identifier, and the value - an iterator of requirements for that dependency. - :param incompatibilities: A mapping of known incompatibilities of - each dependency. Each key is an identifier, and the value an - iterator of incompatibilities known to the resolver. All - incompatibilities *must* be excluded from the return value. - - This should try to get candidates based on the requirements' types. - For VCS, local, and archive requirements, the one-and-only match is - returned, and for a "named" requirement, the index(es) should be - consulted to find concrete candidates for this requirement. - - The return value should produce candidates ordered by preference; the - most preferred candidate should come first. The return type may be one - of the following: - - * A callable that returns an iterator that yields candidates. - * An collection of candidates. - * An iterable of candidates. This will be consumed immediately into a - list of candidates. - """ - raise NotImplementedError - - def is_satisfied_by(self, requirement, candidate): - """Whether the given requirement can be satisfied by a candidate. - - The candidate is guarenteed to have been generated from the - requirement. - - A boolean should be returned to indicate whether ``candidate`` is a - viable solution to the requirement. - """ - raise NotImplementedError - - def get_dependencies(self, candidate): - """Get dependencies of a candidate. - - This should return a collection of requirements that `candidate` - specifies as its dependencies. - """ - raise NotImplementedError - - -class AbstractResolver(object): - """The thing that performs the actual resolution work.""" - - base_exception = Exception - - def __init__(self, provider, reporter): - self.provider = provider - self.reporter = reporter - - def resolve(self, requirements, **kwargs): - """Take a collection of constraints, spit out the resolution result. - - This returns a representation of the final resolution state, with one - guarenteed attribute ``mapping`` that contains resolved candidates as - values. The keys are their respective identifiers. - - :param requirements: A collection of constraints. - :param kwargs: Additional keyword arguments that subclasses may accept. - - :raises: ``self.base_exception`` or its subclass. - """ - raise NotImplementedError diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/styles/monokai.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/styles/monokai.py deleted file mode 100644 index bf968d4ab4a2a8a8b338ddfe6a927899d8001486..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/styles/monokai.py +++ /dev/null @@ -1,107 +0,0 @@ -""" - pygments.styles.monokai - ~~~~~~~~~~~~~~~~~~~~~~~ - - Mimic the Monokai color scheme. Based on tango.py. - - http://www.monokai.nl/blog/2006/07/15/textmate-color-theme/ - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.style import Style -from pygments.token import Keyword, Name, Comment, String, Error, Token, \ - Number, Operator, Generic, Whitespace, Punctuation, Other, Literal - -class MonokaiStyle(Style): - """ - This style mimics the Monokai color scheme. - """ - - background_color = "#272822" - highlight_color = "#49483e" - - styles = { - # No corresponding class for the following: - Token: "#f8f8f2", # class: '' - Whitespace: "", # class: 'w' - Error: "#ed007e bg:#1e0010", # class: 'err' - Other: "", # class 'x' - - Comment: "#959077", # class: 'c' - Comment.Multiline: "", # class: 'cm' - Comment.Preproc: "", # class: 'cp' - Comment.Single: "", # class: 'c1' - Comment.Special: "", # class: 'cs' - - Keyword: "#66d9ef", # class: 'k' - Keyword.Constant: "", # class: 'kc' - Keyword.Declaration: "", # class: 'kd' - Keyword.Namespace: "#ff4689", # class: 'kn' - Keyword.Pseudo: "", # class: 'kp' - Keyword.Reserved: "", # class: 'kr' - Keyword.Type: "", # class: 'kt' - - Operator: "#ff4689", # class: 'o' - Operator.Word: "", # class: 'ow' - like keywords - - Punctuation: "#f8f8f2", # class: 'p' - - Name: "#f8f8f2", # class: 'n' - Name.Attribute: "#a6e22e", # class: 'na' - to be revised - Name.Builtin: "", # class: 'nb' - Name.Builtin.Pseudo: "", # class: 'bp' - Name.Class: "#a6e22e", # class: 'nc' - to be revised - Name.Constant: "#66d9ef", # class: 'no' - to be revised - Name.Decorator: "#a6e22e", # class: 'nd' - to be revised - Name.Entity: "", # class: 'ni' - Name.Exception: "#a6e22e", # class: 'ne' - Name.Function: "#a6e22e", # class: 'nf' - Name.Property: "", # class: 'py' - Name.Label: "", # class: 'nl' - Name.Namespace: "", # class: 'nn' - to be revised - Name.Other: "#a6e22e", # class: 'nx' - Name.Tag: "#ff4689", # class: 'nt' - like a keyword - Name.Variable: "", # class: 'nv' - to be revised - Name.Variable.Class: "", # class: 'vc' - to be revised - Name.Variable.Global: "", # class: 'vg' - to be revised - Name.Variable.Instance: "", # class: 'vi' - to be revised - - Number: "#ae81ff", # class: 'm' - Number.Float: "", # class: 'mf' - Number.Hex: "", # class: 'mh' - Number.Integer: "", # class: 'mi' - Number.Integer.Long: "", # class: 'il' - Number.Oct: "", # class: 'mo' - - Literal: "#ae81ff", # class: 'l' - Literal.Date: "#e6db74", # class: 'ld' - - String: "#e6db74", # class: 's' - String.Backtick: "", # class: 'sb' - String.Char: "", # class: 'sc' - String.Doc: "", # class: 'sd' - like a comment - String.Double: "", # class: 's2' - String.Escape: "#ae81ff", # class: 'se' - String.Heredoc: "", # class: 'sh' - String.Interpol: "", # class: 'si' - String.Other: "", # class: 'sx' - String.Regex: "", # class: 'sr' - String.Single: "", # class: 's1' - String.Symbol: "", # class: 'ss' - - - Generic: "", # class: 'g' - Generic.Deleted: "#ff4689", # class: 'gd', - Generic.Emph: "italic", # class: 'ge' - Generic.Error: "", # class: 'gr' - Generic.Heading: "", # class: 'gh' - Generic.Inserted: "#a6e22e", # class: 'gi' - Generic.Output: "#66d9ef", # class: 'go' - Generic.Prompt: "bold #ff4689", # class: 'gp' - Generic.Strong: "bold", # class: 'gs' - Generic.EmphStrong: "bold italic", # class: 'ges' - Generic.Subheading: "#959077", # class: 'gu' - Generic.Traceback: "", # class: 'gt' - } diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/six.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/six.py deleted file mode 100644 index 4e15675d8b5caa33255fe37271700f587bd26671..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/six.py +++ /dev/null @@ -1,998 +0,0 @@ -# Copyright (c) 2010-2020 Benjamin Peterson -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -"""Utilities for writing code that runs on Python 2 and 3""" - -from __future__ import absolute_import - -import functools -import itertools -import operator -import sys -import types - -__author__ = "Benjamin Peterson " -__version__ = "1.16.0" - - -# Useful for very coarse version differentiation. -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 -PY34 = sys.version_info[0:2] >= (3, 4) - -if PY3: - string_types = str, - integer_types = int, - class_types = type, - text_type = str - binary_type = bytes - - MAXSIZE = sys.maxsize -else: - string_types = basestring, - integer_types = (int, long) - class_types = (type, types.ClassType) - text_type = unicode - binary_type = str - - if sys.platform.startswith("java"): - # Jython always uses 32 bits. - MAXSIZE = int((1 << 31) - 1) - else: - # It's possible to have sizeof(long) != sizeof(Py_ssize_t). - class X(object): - - def __len__(self): - return 1 << 31 - try: - len(X()) - except OverflowError: - # 32-bit - MAXSIZE = int((1 << 31) - 1) - else: - # 64-bit - MAXSIZE = int((1 << 63) - 1) - del X - -if PY34: - from importlib.util import spec_from_loader -else: - spec_from_loader = None - - -def _add_doc(func, doc): - """Add documentation to a function.""" - func.__doc__ = doc - - -def _import_module(name): - """Import module, returning the module after the last dot.""" - __import__(name) - return sys.modules[name] - - -class _LazyDescr(object): - - def __init__(self, name): - self.name = name - - def __get__(self, obj, tp): - result = self._resolve() - setattr(obj, self.name, result) # Invokes __set__. - try: - # This is a bit ugly, but it avoids running this again by - # removing this descriptor. - delattr(obj.__class__, self.name) - except AttributeError: - pass - return result - - -class MovedModule(_LazyDescr): - - def __init__(self, name, old, new=None): - super(MovedModule, self).__init__(name) - if PY3: - if new is None: - new = name - self.mod = new - else: - self.mod = old - - def _resolve(self): - return _import_module(self.mod) - - def __getattr__(self, attr): - _module = self._resolve() - value = getattr(_module, attr) - setattr(self, attr, value) - return value - - -class _LazyModule(types.ModuleType): - - def __init__(self, name): - super(_LazyModule, self).__init__(name) - self.__doc__ = self.__class__.__doc__ - - def __dir__(self): - attrs = ["__doc__", "__name__"] - attrs += [attr.name for attr in self._moved_attributes] - return attrs - - # Subclasses should override this - _moved_attributes = [] - - -class MovedAttribute(_LazyDescr): - - def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): - super(MovedAttribute, self).__init__(name) - if PY3: - if new_mod is None: - new_mod = name - self.mod = new_mod - if new_attr is None: - if old_attr is None: - new_attr = name - else: - new_attr = old_attr - self.attr = new_attr - else: - self.mod = old_mod - if old_attr is None: - old_attr = name - self.attr = old_attr - - def _resolve(self): - module = _import_module(self.mod) - return getattr(module, self.attr) - - -class _SixMetaPathImporter(object): - - """ - A meta path importer to import six.moves and its submodules. - - This class implements a PEP302 finder and loader. It should be compatible - with Python 2.5 and all existing versions of Python3 - """ - - def __init__(self, six_module_name): - self.name = six_module_name - self.known_modules = {} - - def _add_module(self, mod, *fullnames): - for fullname in fullnames: - self.known_modules[self.name + "." + fullname] = mod - - def _get_module(self, fullname): - return self.known_modules[self.name + "." + fullname] - - def find_module(self, fullname, path=None): - if fullname in self.known_modules: - return self - return None - - def find_spec(self, fullname, path, target=None): - if fullname in self.known_modules: - return spec_from_loader(fullname, self) - return None - - def __get_module(self, fullname): - try: - return self.known_modules[fullname] - except KeyError: - raise ImportError("This loader does not know module " + fullname) - - def load_module(self, fullname): - try: - # in case of a reload - return sys.modules[fullname] - except KeyError: - pass - mod = self.__get_module(fullname) - if isinstance(mod, MovedModule): - mod = mod._resolve() - else: - mod.__loader__ = self - sys.modules[fullname] = mod - return mod - - def is_package(self, fullname): - """ - Return true, if the named module is a package. - - We need this method to get correct spec objects with - Python 3.4 (see PEP451) - """ - return hasattr(self.__get_module(fullname), "__path__") - - def get_code(self, fullname): - """Return None - - Required, if is_package is implemented""" - self.__get_module(fullname) # eventually raises ImportError - return None - get_source = get_code # same as get_code - - def create_module(self, spec): - return self.load_module(spec.name) - - def exec_module(self, module): - pass - -_importer = _SixMetaPathImporter(__name__) - - -class _MovedItems(_LazyModule): - - """Lazy loading of moved objects""" - __path__ = [] # mark as package - - -_moved_attributes = [ - MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), - MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), - MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), - MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), - MovedAttribute("intern", "__builtin__", "sys"), - MovedAttribute("map", "itertools", "builtins", "imap", "map"), - MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), - MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), - MovedAttribute("getoutput", "commands", "subprocess"), - MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), - MovedAttribute("reduce", "__builtin__", "functools"), - MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), - MovedAttribute("StringIO", "StringIO", "io"), - MovedAttribute("UserDict", "UserDict", "collections"), - MovedAttribute("UserList", "UserList", "collections"), - MovedAttribute("UserString", "UserString", "collections"), - MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), - MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), - MovedModule("builtins", "__builtin__"), - MovedModule("configparser", "ConfigParser"), - MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"), - MovedModule("copyreg", "copy_reg"), - MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), - MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"), - MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"), - MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), - MovedModule("http_cookies", "Cookie", "http.cookies"), - MovedModule("html_entities", "htmlentitydefs", "html.entities"), - MovedModule("html_parser", "HTMLParser", "html.parser"), - MovedModule("http_client", "httplib", "http.client"), - MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), - MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), - MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), - MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), - MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), - MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), - MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), - MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), - MovedModule("cPickle", "cPickle", "pickle"), - MovedModule("queue", "Queue"), - MovedModule("reprlib", "repr"), - MovedModule("socketserver", "SocketServer"), - MovedModule("_thread", "thread", "_thread"), - MovedModule("tkinter", "Tkinter"), - MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), - MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), - MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), - MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), - MovedModule("tkinter_tix", "Tix", "tkinter.tix"), - MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), - MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), - MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), - MovedModule("tkinter_colorchooser", "tkColorChooser", - "tkinter.colorchooser"), - MovedModule("tkinter_commondialog", "tkCommonDialog", - "tkinter.commondialog"), - MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), - MovedModule("tkinter_font", "tkFont", "tkinter.font"), - MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), - MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", - "tkinter.simpledialog"), - MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), - MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), - MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), - MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), - MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), - MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), -] -# Add windows specific modules. -if sys.platform == "win32": - _moved_attributes += [ - MovedModule("winreg", "_winreg"), - ] - -for attr in _moved_attributes: - setattr(_MovedItems, attr.name, attr) - if isinstance(attr, MovedModule): - _importer._add_module(attr, "moves." + attr.name) -del attr - -_MovedItems._moved_attributes = _moved_attributes - -moves = _MovedItems(__name__ + ".moves") -_importer._add_module(moves, "moves") - - -class Module_six_moves_urllib_parse(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_parse""" - - -_urllib_parse_moved_attributes = [ - MovedAttribute("ParseResult", "urlparse", "urllib.parse"), - MovedAttribute("SplitResult", "urlparse", "urllib.parse"), - MovedAttribute("parse_qs", "urlparse", "urllib.parse"), - MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), - MovedAttribute("urldefrag", "urlparse", "urllib.parse"), - MovedAttribute("urljoin", "urlparse", "urllib.parse"), - MovedAttribute("urlparse", "urlparse", "urllib.parse"), - MovedAttribute("urlsplit", "urlparse", "urllib.parse"), - MovedAttribute("urlunparse", "urlparse", "urllib.parse"), - MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), - MovedAttribute("quote", "urllib", "urllib.parse"), - MovedAttribute("quote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote", "urllib", "urllib.parse"), - MovedAttribute("unquote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), - MovedAttribute("urlencode", "urllib", "urllib.parse"), - MovedAttribute("splitquery", "urllib", "urllib.parse"), - MovedAttribute("splittag", "urllib", "urllib.parse"), - MovedAttribute("splituser", "urllib", "urllib.parse"), - MovedAttribute("splitvalue", "urllib", "urllib.parse"), - MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), - MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), - MovedAttribute("uses_params", "urlparse", "urllib.parse"), - MovedAttribute("uses_query", "urlparse", "urllib.parse"), - MovedAttribute("uses_relative", "urlparse", "urllib.parse"), -] -for attr in _urllib_parse_moved_attributes: - setattr(Module_six_moves_urllib_parse, attr.name, attr) -del attr - -Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes - -_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), - "moves.urllib_parse", "moves.urllib.parse") - - -class Module_six_moves_urllib_error(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_error""" - - -_urllib_error_moved_attributes = [ - MovedAttribute("URLError", "urllib2", "urllib.error"), - MovedAttribute("HTTPError", "urllib2", "urllib.error"), - MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), -] -for attr in _urllib_error_moved_attributes: - setattr(Module_six_moves_urllib_error, attr.name, attr) -del attr - -Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes - -_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), - "moves.urllib_error", "moves.urllib.error") - - -class Module_six_moves_urllib_request(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_request""" - - -_urllib_request_moved_attributes = [ - MovedAttribute("urlopen", "urllib2", "urllib.request"), - MovedAttribute("install_opener", "urllib2", "urllib.request"), - MovedAttribute("build_opener", "urllib2", "urllib.request"), - MovedAttribute("pathname2url", "urllib", "urllib.request"), - MovedAttribute("url2pathname", "urllib", "urllib.request"), - MovedAttribute("getproxies", "urllib", "urllib.request"), - MovedAttribute("Request", "urllib2", "urllib.request"), - MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), - MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), - MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), - MovedAttribute("BaseHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), - MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), - MovedAttribute("FileHandler", "urllib2", "urllib.request"), - MovedAttribute("FTPHandler", "urllib2", "urllib.request"), - MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), - MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), - MovedAttribute("urlretrieve", "urllib", "urllib.request"), - MovedAttribute("urlcleanup", "urllib", "urllib.request"), - MovedAttribute("URLopener", "urllib", "urllib.request"), - MovedAttribute("FancyURLopener", "urllib", "urllib.request"), - MovedAttribute("proxy_bypass", "urllib", "urllib.request"), - MovedAttribute("parse_http_list", "urllib2", "urllib.request"), - MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), -] -for attr in _urllib_request_moved_attributes: - setattr(Module_six_moves_urllib_request, attr.name, attr) -del attr - -Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes - -_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), - "moves.urllib_request", "moves.urllib.request") - - -class Module_six_moves_urllib_response(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_response""" - - -_urllib_response_moved_attributes = [ - MovedAttribute("addbase", "urllib", "urllib.response"), - MovedAttribute("addclosehook", "urllib", "urllib.response"), - MovedAttribute("addinfo", "urllib", "urllib.response"), - MovedAttribute("addinfourl", "urllib", "urllib.response"), -] -for attr in _urllib_response_moved_attributes: - setattr(Module_six_moves_urllib_response, attr.name, attr) -del attr - -Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes - -_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), - "moves.urllib_response", "moves.urllib.response") - - -class Module_six_moves_urllib_robotparser(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_robotparser""" - - -_urllib_robotparser_moved_attributes = [ - MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), -] -for attr in _urllib_robotparser_moved_attributes: - setattr(Module_six_moves_urllib_robotparser, attr.name, attr) -del attr - -Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes - -_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), - "moves.urllib_robotparser", "moves.urllib.robotparser") - - -class Module_six_moves_urllib(types.ModuleType): - - """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" - __path__ = [] # mark as package - parse = _importer._get_module("moves.urllib_parse") - error = _importer._get_module("moves.urllib_error") - request = _importer._get_module("moves.urllib_request") - response = _importer._get_module("moves.urllib_response") - robotparser = _importer._get_module("moves.urllib_robotparser") - - def __dir__(self): - return ['parse', 'error', 'request', 'response', 'robotparser'] - -_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), - "moves.urllib") - - -def add_move(move): - """Add an item to six.moves.""" - setattr(_MovedItems, move.name, move) - - -def remove_move(name): - """Remove item from six.moves.""" - try: - delattr(_MovedItems, name) - except AttributeError: - try: - del moves.__dict__[name] - except KeyError: - raise AttributeError("no such move, %r" % (name,)) - - -if PY3: - _meth_func = "__func__" - _meth_self = "__self__" - - _func_closure = "__closure__" - _func_code = "__code__" - _func_defaults = "__defaults__" - _func_globals = "__globals__" -else: - _meth_func = "im_func" - _meth_self = "im_self" - - _func_closure = "func_closure" - _func_code = "func_code" - _func_defaults = "func_defaults" - _func_globals = "func_globals" - - -try: - advance_iterator = next -except NameError: - def advance_iterator(it): - return it.next() -next = advance_iterator - - -try: - callable = callable -except NameError: - def callable(obj): - return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) - - -if PY3: - def get_unbound_function(unbound): - return unbound - - create_bound_method = types.MethodType - - def create_unbound_method(func, cls): - return func - - Iterator = object -else: - def get_unbound_function(unbound): - return unbound.im_func - - def create_bound_method(func, obj): - return types.MethodType(func, obj, obj.__class__) - - def create_unbound_method(func, cls): - return types.MethodType(func, None, cls) - - class Iterator(object): - - def next(self): - return type(self).__next__(self) - - callable = callable -_add_doc(get_unbound_function, - """Get the function out of a possibly unbound function""") - - -get_method_function = operator.attrgetter(_meth_func) -get_method_self = operator.attrgetter(_meth_self) -get_function_closure = operator.attrgetter(_func_closure) -get_function_code = operator.attrgetter(_func_code) -get_function_defaults = operator.attrgetter(_func_defaults) -get_function_globals = operator.attrgetter(_func_globals) - - -if PY3: - def iterkeys(d, **kw): - return iter(d.keys(**kw)) - - def itervalues(d, **kw): - return iter(d.values(**kw)) - - def iteritems(d, **kw): - return iter(d.items(**kw)) - - def iterlists(d, **kw): - return iter(d.lists(**kw)) - - viewkeys = operator.methodcaller("keys") - - viewvalues = operator.methodcaller("values") - - viewitems = operator.methodcaller("items") -else: - def iterkeys(d, **kw): - return d.iterkeys(**kw) - - def itervalues(d, **kw): - return d.itervalues(**kw) - - def iteritems(d, **kw): - return d.iteritems(**kw) - - def iterlists(d, **kw): - return d.iterlists(**kw) - - viewkeys = operator.methodcaller("viewkeys") - - viewvalues = operator.methodcaller("viewvalues") - - viewitems = operator.methodcaller("viewitems") - -_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") -_add_doc(itervalues, "Return an iterator over the values of a dictionary.") -_add_doc(iteritems, - "Return an iterator over the (key, value) pairs of a dictionary.") -_add_doc(iterlists, - "Return an iterator over the (key, [values]) pairs of a dictionary.") - - -if PY3: - def b(s): - return s.encode("latin-1") - - def u(s): - return s - unichr = chr - import struct - int2byte = struct.Struct(">B").pack - del struct - byte2int = operator.itemgetter(0) - indexbytes = operator.getitem - iterbytes = iter - import io - StringIO = io.StringIO - BytesIO = io.BytesIO - del io - _assertCountEqual = "assertCountEqual" - if sys.version_info[1] <= 1: - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" - _assertNotRegex = "assertNotRegexpMatches" - else: - _assertRaisesRegex = "assertRaisesRegex" - _assertRegex = "assertRegex" - _assertNotRegex = "assertNotRegex" -else: - def b(s): - return s - # Workaround for standalone backslash - - def u(s): - return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") - unichr = unichr - int2byte = chr - - def byte2int(bs): - return ord(bs[0]) - - def indexbytes(buf, i): - return ord(buf[i]) - iterbytes = functools.partial(itertools.imap, ord) - import StringIO - StringIO = BytesIO = StringIO.StringIO - _assertCountEqual = "assertItemsEqual" - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" - _assertNotRegex = "assertNotRegexpMatches" -_add_doc(b, """Byte literal""") -_add_doc(u, """Text literal""") - - -def assertCountEqual(self, *args, **kwargs): - return getattr(self, _assertCountEqual)(*args, **kwargs) - - -def assertRaisesRegex(self, *args, **kwargs): - return getattr(self, _assertRaisesRegex)(*args, **kwargs) - - -def assertRegex(self, *args, **kwargs): - return getattr(self, _assertRegex)(*args, **kwargs) - - -def assertNotRegex(self, *args, **kwargs): - return getattr(self, _assertNotRegex)(*args, **kwargs) - - -if PY3: - exec_ = getattr(moves.builtins, "exec") - - def reraise(tp, value, tb=None): - try: - if value is None: - value = tp() - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - finally: - value = None - tb = None - -else: - def exec_(_code_, _globs_=None, _locs_=None): - """Execute code in a namespace.""" - if _globs_ is None: - frame = sys._getframe(1) - _globs_ = frame.f_globals - if _locs_ is None: - _locs_ = frame.f_locals - del frame - elif _locs_ is None: - _locs_ = _globs_ - exec("""exec _code_ in _globs_, _locs_""") - - exec_("""def reraise(tp, value, tb=None): - try: - raise tp, value, tb - finally: - tb = None -""") - - -if sys.version_info[:2] > (3,): - exec_("""def raise_from(value, from_value): - try: - raise value from from_value - finally: - value = None -""") -else: - def raise_from(value, from_value): - raise value - - -print_ = getattr(moves.builtins, "print", None) -if print_ is None: - def print_(*args, **kwargs): - """The new-style print function for Python 2.4 and 2.5.""" - fp = kwargs.pop("file", sys.stdout) - if fp is None: - return - - def write(data): - if not isinstance(data, basestring): - data = str(data) - # If the file has an encoding, encode unicode with it. - if (isinstance(fp, file) and - isinstance(data, unicode) and - fp.encoding is not None): - errors = getattr(fp, "errors", None) - if errors is None: - errors = "strict" - data = data.encode(fp.encoding, errors) - fp.write(data) - want_unicode = False - sep = kwargs.pop("sep", None) - if sep is not None: - if isinstance(sep, unicode): - want_unicode = True - elif not isinstance(sep, str): - raise TypeError("sep must be None or a string") - end = kwargs.pop("end", None) - if end is not None: - if isinstance(end, unicode): - want_unicode = True - elif not isinstance(end, str): - raise TypeError("end must be None or a string") - if kwargs: - raise TypeError("invalid keyword arguments to print()") - if not want_unicode: - for arg in args: - if isinstance(arg, unicode): - want_unicode = True - break - if want_unicode: - newline = unicode("\n") - space = unicode(" ") - else: - newline = "\n" - space = " " - if sep is None: - sep = space - if end is None: - end = newline - for i, arg in enumerate(args): - if i: - write(sep) - write(arg) - write(end) -if sys.version_info[:2] < (3, 3): - _print = print_ - - def print_(*args, **kwargs): - fp = kwargs.get("file", sys.stdout) - flush = kwargs.pop("flush", False) - _print(*args, **kwargs) - if flush and fp is not None: - fp.flush() - -_add_doc(reraise, """Reraise an exception.""") - -if sys.version_info[0:2] < (3, 4): - # This does exactly the same what the :func:`py3:functools.update_wrapper` - # function does on Python versions after 3.2. It sets the ``__wrapped__`` - # attribute on ``wrapper`` object and it doesn't raise an error if any of - # the attributes mentioned in ``assigned`` and ``updated`` are missing on - # ``wrapped`` object. - def _update_wrapper(wrapper, wrapped, - assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): - for attr in assigned: - try: - value = getattr(wrapped, attr) - except AttributeError: - continue - else: - setattr(wrapper, attr, value) - for attr in updated: - getattr(wrapper, attr).update(getattr(wrapped, attr, {})) - wrapper.__wrapped__ = wrapped - return wrapper - _update_wrapper.__doc__ = functools.update_wrapper.__doc__ - - def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): - return functools.partial(_update_wrapper, wrapped=wrapped, - assigned=assigned, updated=updated) - wraps.__doc__ = functools.wraps.__doc__ - -else: - wraps = functools.wraps - - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(type): - - def __new__(cls, name, this_bases, d): - if sys.version_info[:2] >= (3, 7): - # This version introduced PEP 560 that requires a bit - # of extra care (we mimic what is done by __build_class__). - resolved_bases = types.resolve_bases(bases) - if resolved_bases is not bases: - d['__orig_bases__'] = bases - else: - resolved_bases = bases - return meta(name, resolved_bases, d) - - @classmethod - def __prepare__(cls, name, this_bases): - return meta.__prepare__(name, bases) - return type.__new__(metaclass, 'temporary_class', (), {}) - - -def add_metaclass(metaclass): - """Class decorator for creating a class with a metaclass.""" - def wrapper(cls): - orig_vars = cls.__dict__.copy() - slots = orig_vars.get('__slots__') - if slots is not None: - if isinstance(slots, str): - slots = [slots] - for slots_var in slots: - orig_vars.pop(slots_var) - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) - if hasattr(cls, '__qualname__'): - orig_vars['__qualname__'] = cls.__qualname__ - return metaclass(cls.__name__, cls.__bases__, orig_vars) - return wrapper - - -def ensure_binary(s, encoding='utf-8', errors='strict'): - """Coerce **s** to six.binary_type. - - For Python 2: - - `unicode` -> encoded to `str` - - `str` -> `str` - - For Python 3: - - `str` -> encoded to `bytes` - - `bytes` -> `bytes` - """ - if isinstance(s, binary_type): - return s - if isinstance(s, text_type): - return s.encode(encoding, errors) - raise TypeError("not expecting type '%s'" % type(s)) - - -def ensure_str(s, encoding='utf-8', errors='strict'): - """Coerce *s* to `str`. - - For Python 2: - - `unicode` -> encoded to `str` - - `str` -> `str` - - For Python 3: - - `str` -> `str` - - `bytes` -> decoded to `str` - """ - # Optimization: Fast return for the common case. - if type(s) is str: - return s - if PY2 and isinstance(s, text_type): - return s.encode(encoding, errors) - elif PY3 and isinstance(s, binary_type): - return s.decode(encoding, errors) - elif not isinstance(s, (text_type, binary_type)): - raise TypeError("not expecting type '%s'" % type(s)) - return s - - -def ensure_text(s, encoding='utf-8', errors='strict'): - """Coerce *s* to six.text_type. - - For Python 2: - - `unicode` -> `unicode` - - `str` -> `unicode` - - For Python 3: - - `str` -> `str` - - `bytes` -> decoded to `str` - """ - if isinstance(s, binary_type): - return s.decode(encoding, errors) - elif isinstance(s, text_type): - return s - else: - raise TypeError("not expecting type '%s'" % type(s)) - - -def python_2_unicode_compatible(klass): - """ - A class decorator that defines __unicode__ and __str__ methods under Python 2. - Under Python 3 it does nothing. - - To support Python 2 and 3 with a single code base, define a __str__ method - returning text and apply this decorator to the class. - """ - if PY2: - if '__str__' not in klass.__dict__: - raise ValueError("@python_2_unicode_compatible cannot be applied " - "to %s because it doesn't define __str__()." % - klass.__name__) - klass.__unicode__ = klass.__str__ - klass.__str__ = lambda self: self.__unicode__().encode('utf-8') - return klass - - -# Complete the moves implementation. -# This code is at the end of this module to speed up module loading. -# Turn this module into a package. -__path__ = [] # required for PEP 302 and PEP 451 -__package__ = __name__ # see PEP 366 @ReservedAssignment -if globals().get("__spec__") is not None: - __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable -# Remove other six meta path importers, since they cause problems. This can -# happen if six is removed from sys.modules and then reloaded. (Setuptools does -# this for some reason.) -if sys.meta_path: - for i, importer in enumerate(sys.meta_path): - # Here's some real nastiness: Another "instance" of the six module might - # be floating around. Therefore, we can't use isinstance() to check for - # the six meta path importer, since the other six instance will have - # inserted an importer with different class. - if (type(importer).__name__ == "_SixMetaPathImporter" and - importer.name == __name__): - del sys.meta_path[i] - break - del i, importer -# Finally, add the importer to the meta path import hook. -sys.meta_path.append(_importer) diff --git a/spaces/pytorch/DCGAN_on_fashiongen/README.md b/spaces/pytorch/DCGAN_on_fashiongen/README.md deleted file mode 100644 index 07bac943760a47d7e303afd042d91f0722dfe2be..0000000000000000000000000000000000000000 --- a/spaces/pytorch/DCGAN_on_fashiongen/README.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: DCGAN_on_fashiongen -emoji: 🐠 -colorFrom: indigo -colorTo: green -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/qinzhu/moe-tts-tech/text/mandarin.py b/spaces/qinzhu/moe-tts-tech/text/mandarin.py deleted file mode 100644 index ff71de9788e4f20c897b971a775d1ecfbfe1c7b7..0000000000000000000000000000000000000000 --- a/spaces/qinzhu/moe-tts-tech/text/mandarin.py +++ /dev/null @@ -1,329 +0,0 @@ -import os -import sys -import re -from pypinyin import lazy_pinyin, BOPOMOFO -import jieba -import cn2an -import logging - -logging.getLogger('jieba').setLevel(logging.WARNING) -jieba.initialize() - - -# List of (Latin alphabet, bopomofo) pairs: -_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'ㄟˉ'), - ('b', 'ㄅㄧˋ'), - ('c', 'ㄙㄧˉ'), - ('d', 'ㄉㄧˋ'), - ('e', 'ㄧˋ'), - ('f', 'ㄝˊㄈㄨˋ'), - ('g', 'ㄐㄧˋ'), - ('h', 'ㄝˇㄑㄩˋ'), - ('i', 'ㄞˋ'), - ('j', 'ㄐㄟˋ'), - ('k', 'ㄎㄟˋ'), - ('l', 'ㄝˊㄛˋ'), - ('m', 'ㄝˊㄇㄨˋ'), - ('n', 'ㄣˉ'), - ('o', 'ㄡˉ'), - ('p', 'ㄆㄧˉ'), - ('q', 'ㄎㄧㄡˉ'), - ('r', 'ㄚˋ'), - ('s', 'ㄝˊㄙˋ'), - ('t', 'ㄊㄧˋ'), - ('u', 'ㄧㄡˉ'), - ('v', 'ㄨㄧˉ'), - ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), - ('x', 'ㄝˉㄎㄨˋㄙˋ'), - ('y', 'ㄨㄞˋ'), - ('z', 'ㄗㄟˋ') -]] - -# List of (bopomofo, romaji) pairs: -_bopomofo_to_romaji = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'ʧ⁼'), - ('ㄑ', 'ʧʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ʦ`⁼'), - ('ㄔ', 'ʦ`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ʦ⁼'), - ('ㄘ', 'ʦʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'e'), - ('ㄞ', 'ai'), - ('ㄟ', 'ei'), - ('ㄠ', 'au'), - ('ㄡ', 'ou'), - ('ㄧㄢ', 'yeNN'), - ('ㄢ', 'aNN'), - ('ㄧㄣ', 'iNN'), - ('ㄣ', 'əNN'), - ('ㄤ', 'aNg'), - ('ㄧㄥ', 'iNg'), - ('ㄨㄥ', 'uNg'), - ('ㄩㄥ', 'yuNg'), - ('ㄥ', 'əNg'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - -# List of (romaji, ipa) pairs: -_romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ʃy', 'ʃ'), - ('ʧʰy', 'ʧʰ'), - ('ʧ⁼y', 'ʧ⁼'), - ('NN', 'n'), - ('Ng', 'ŋ'), - ('y', 'j'), - ('h', 'x') -]] - -# List of (bopomofo, ipa) pairs: -_bopomofo_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'x'), - ('ㄐ', 'tʃ⁼'), - ('ㄑ', 'tʃʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ts`⁼'), - ('ㄔ', 'ts`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ts⁼'), - ('ㄘ', 'tsʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'ɛ'), - ('ㄞ', 'aɪ'), - ('ㄟ', 'eɪ'), - ('ㄠ', 'ɑʊ'), - ('ㄡ', 'oʊ'), - ('ㄧㄢ', 'jɛn'), - ('ㄩㄢ', 'ɥæn'), - ('ㄢ', 'an'), - ('ㄧㄣ', 'in'), - ('ㄩㄣ', 'ɥn'), - ('ㄣ', 'ən'), - ('ㄤ', 'ɑŋ'), - ('ㄧㄥ', 'iŋ'), - ('ㄨㄥ', 'ʊŋ'), - ('ㄩㄥ', 'jʊŋ'), - ('ㄥ', 'əŋ'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - -# List of (bopomofo, ipa2) pairs: -_bopomofo_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄅㄛ', 'pwo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'tɕ'), - ('ㄑ', 'tɕʰ'), - ('ㄒ', 'ɕ'), - ('ㄓ', 'tʂ'), - ('ㄔ', 'tʂʰ'), - ('ㄕ', 'ʂ'), - ('ㄖ', 'ɻ'), - ('ㄗ', 'ts'), - ('ㄘ', 'tsʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ɤ'), - ('ㄝ', 'ɛ'), - ('ㄞ', 'aɪ'), - ('ㄟ', 'eɪ'), - ('ㄠ', 'ɑʊ'), - ('ㄡ', 'oʊ'), - ('ㄧㄢ', 'jɛn'), - ('ㄩㄢ', 'yæn'), - ('ㄢ', 'an'), - ('ㄧㄣ', 'in'), - ('ㄩㄣ', 'yn'), - ('ㄣ', 'ən'), - ('ㄤ', 'ɑŋ'), - ('ㄧㄥ', 'iŋ'), - ('ㄨㄥ', 'ʊŋ'), - ('ㄩㄥ', 'jʊŋ'), - ('ㄥ', 'ɤŋ'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'y'), - ('ˉ', '˥'), - ('ˊ', '˧˥'), - ('ˇ', '˨˩˦'), - ('ˋ', '˥˩'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - - -def number_to_chinese(text): - numbers = re.findall(r'\d+(?:\.?\d+)?', text) - for number in numbers: - text = text.replace(number, cn2an.an2cn(number), 1) - return text - - -def chinese_to_bopomofo(text): - text = text.replace('、', ',').replace(';', ',').replace(':', ',') - words = jieba.lcut(text, cut_all=False) - text = '' - for word in words: - bopomofos = lazy_pinyin(word, BOPOMOFO) - if not re.search('[\u4e00-\u9fff]', word): - text += word - continue - for i in range(len(bopomofos)): - bopomofos[i] = re.sub(r'([\u3105-\u3129])$', r'\1ˉ', bopomofos[i]) - if text != '': - text += ' ' - text += ''.join(bopomofos) - return text - - -def latin_to_bopomofo(text): - for regex, replacement in _latin_to_bopomofo: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_romaji(text): - for regex, replacement in _bopomofo_to_romaji: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_ipa(text): - for regex, replacement in _bopomofo_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def bopomofo_to_ipa2(text): - for regex, replacement in _bopomofo_to_ipa2: - text = re.sub(regex, replacement, text) - return text - - -def chinese_to_romaji(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_romaji(text) - text = re.sub('i([aoe])', r'y\1', text) - text = re.sub('u([aoəe])', r'w\1', text) - text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', - r'\1ɹ`\2', text).replace('ɻ', 'ɹ`') - text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text) - return text - - -def chinese_to_lazy_ipa(text): - text = chinese_to_romaji(text) - for regex, replacement in _romaji_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def chinese_to_ipa(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_ipa(text) - text = re.sub('i([aoe])', r'j\1', text) - text = re.sub('u([aoəe])', r'w\1', text) - text = re.sub('([sɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', - r'\1ɹ`\2', text).replace('ɻ', 'ɹ`') - text = re.sub('([s][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text) - return text - - -def chinese_to_ipa2(text): - text = number_to_chinese(text) - text = chinese_to_bopomofo(text) - text = latin_to_bopomofo(text) - text = bopomofo_to_ipa2(text) - text = re.sub(r'i([aoe])', r'j\1', text) - text = re.sub(r'u([aoəe])', r'w\1', text) - text = re.sub(r'([ʂɹ]ʰ?)([˩˨˧˦˥ ]+|$)', r'\1ʅ\2', text) - text = re.sub(r'(sʰ?)([˩˨˧˦˥ ]+|$)', r'\1ɿ\2', text) - return text diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Dawn Of War - Soulstorm Demo Fitgirl Repack.md b/spaces/quidiaMuxgu/Expedit-SAM/Dawn Of War - Soulstorm Demo Fitgirl Repack.md deleted file mode 100644 index 29b894493319e09668a49d8f512490457bc7a386..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Dawn Of War - Soulstorm Demo Fitgirl Repack.md +++ /dev/null @@ -1,10 +0,0 @@ - -

    dawn of war: soulstorm. (full game) for pc, ps4, xbox one. the heart of the soulstorm (full game) - pc - version 1. dawn of war: soulstorm - pc/repacked [repack] [pc] [repacked] [repacked] [repacked] [repacked]. free download dawn of war: soulstorm demo pc [repacked] [repacked] [repacked] [repacked] [repacked]. how to play dawn of war: soulstorm: download the full version of the dawn of war: soulstorm demo.

    -

    Dawn of War - Soulstorm Demo fitgirl repack


    DOWNLOAD ○○○ https://geags.com/2uCsHJ



    -

    warhammer 40,000: dawn of war 2 v4.1.2: download dawn of war 2 game for free. this game is set in the fictional science fiction universe of the warhammer 40,000 and other fictional worlds of warhammer 40,000: dawn of war. warhammer 40,000: dawn of war 3 v3.0.

    -

    download warhammer 40000: dawn of war soulstorm, soulstorm, the restoration. a game with a lot of potential and with a huge number of mods for the game that will make it play even better. . for more information about the game, read this review. 1.0.

    -

    soulstorm. soulstorm 2.. . soulstorm. 1. 2.0.1.2.3.4.5.6.7.8.9. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15. 16. 17. 18. 19. 20. 21. 22. 23. 24. 25. 26. 27. 28. 29. 30. 31. 32. 33. 34. 35. 36. 37. 38. 39. 40. 41. 42. 43. 44. 45. 46. 47. 48. 49. 50. 51. 52. 53.

    -

    dawn of war - soulstorm demo fitgirl repack. dawn of war is a real-time strategy video game developed by relic entertainment. the game was released in may 2006, and was the third in the dawn of war series. the game was released on a full retail disc for the pc, xbox, and playstation 2. it is the third and final expansion to the genre-defining and critically-acclaimed rts, dawn of war. dawn of war - soulstorm introduces a new race, the dark eldar. the game also includes a new campaign, a new multiplayer map, and several new units. the dark eldar are playable during the campaign and online multiplayer maps, and players can customize their units and units with the new hero system. the dark eldar are the native race of the planet tzeentch, and were defeated by the forces of chaos. however, the dark eldar retain some of their technological knowledge, and have allied themselves with the chaos. together, the dark eldar and chaos can destroy the eldar homeworld, but the player must stop them before they can fully destroy the planet. releases the following fixes and addons:. (release. dlc.

    -

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/rachana219/MODT2/trackers/multi_tracker_zoo.py b/spaces/rachana219/MODT2/trackers/multi_tracker_zoo.py deleted file mode 100644 index 0a41973f77fb4e1dd1cf552f78f020e7f16c542c..0000000000000000000000000000000000000000 --- a/spaces/rachana219/MODT2/trackers/multi_tracker_zoo.py +++ /dev/null @@ -1,52 +0,0 @@ -from trackers.strongsort.utils.parser import get_config - - -def create_tracker(tracker_type, tracker_config, reid_weights, device, half): - - cfg = get_config() - cfg.merge_from_file(tracker_config) - - if tracker_type == 'strongsort': - from trackers.strongsort.strong_sort import StrongSORT - strongsort = StrongSORT( - reid_weights, - device, - half, - max_dist=cfg.strongsort.max_dist, - max_iou_dist=cfg.strongsort.max_iou_dist, - max_age=cfg.strongsort.max_age, - max_unmatched_preds=cfg.strongsort.max_unmatched_preds, - n_init=cfg.strongsort.n_init, - nn_budget=cfg.strongsort.nn_budget, - mc_lambda=cfg.strongsort.mc_lambda, - ema_alpha=cfg.strongsort.ema_alpha, - - ) - return strongsort - - elif tracker_type == 'ocsort': - from trackers.ocsort.ocsort import OCSort - ocsort = OCSort( - det_thresh=cfg.ocsort.det_thresh, - max_age=cfg.ocsort.max_age, - min_hits=cfg.ocsort.min_hits, - iou_threshold=cfg.ocsort.iou_thresh, - delta_t=cfg.ocsort.delta_t, - asso_func=cfg.ocsort.asso_func, - inertia=cfg.ocsort.inertia, - use_byte=cfg.ocsort.use_byte, - ) - return ocsort - - elif tracker_type == 'bytetrack': - from trackers.bytetrack.byte_tracker import BYTETracker - bytetracker = BYTETracker( - track_thresh=cfg.bytetrack.track_thresh, - match_thresh=cfg.bytetrack.match_thresh, - track_buffer=cfg.bytetrack.track_buffer, - frame_rate=cfg.bytetrack.frame_rate - ) - return bytetracker - else: - print('No such tracker') - exit() \ No newline at end of file diff --git a/spaces/radames/Candle-T5-Generation-Wasm/README.md b/spaces/radames/Candle-T5-Generation-Wasm/README.md deleted file mode 100644 index 6b8523c6ccc1439291f02b809573cffd913c3731..0000000000000000000000000000000000000000 --- a/spaces/radames/Candle-T5-Generation-Wasm/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Candle T5 Generation Wasm -emoji: 🕯️🔡 -colorFrom: blue -colorTo: purple -sdk: static -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/radames/Real-Time-Latent-Consistency-Model-Text-To-Image/img2img/index.html b/spaces/radames/Real-Time-Latent-Consistency-Model-Text-To-Image/img2img/index.html deleted file mode 100644 index 1534cfd3f01334c96bc858ceae8357992b0ca4ae..0000000000000000000000000000000000000000 --- a/spaces/radames/Real-Time-Latent-Consistency-Model-Text-To-Image/img2img/index.html +++ /dev/null @@ -1,383 +0,0 @@ - - - - - - Real-Time Latent Consistency Model - - - - - - - - - -
    -
    -
    -
    -

    Real-Time Latent Consistency Model

    -

    Image to Image

    -

    - This demo showcases - LCM Image to Image pipeline - using - Diffusers with a MJPEG - stream server. -

    -

    - There are 0 user(s) sharing the same GPU, affecting - real-time performance. Maximum queue size is 4. Duplicate and run it on your - own GPU. -

    -
    -
    -

    Prompt

    -

    - Change the prompt to generate different images, accepts Compel syntax. -

    -
    - -
    - -
    -
    -
    - Advanced Options -
    - - -
    - - - - 4 - - - - - 50 - - - - - 8.0 - - - - - 0.5 - - - - - - - -
    -
    - - -
    -
    - - -
    -
    - -
    -
    -
    -
    - - - -
    -
    - -
    - - - - -
    -
    -
    - - - \ No newline at end of file diff --git a/spaces/radames/gradio-blender-bpy/app.py b/spaces/radames/gradio-blender-bpy/app.py deleted file mode 100644 index cbf1910fc8db399823a71f2b7c24d715939661d0..0000000000000000000000000000000000000000 --- a/spaces/radames/gradio-blender-bpy/app.py +++ /dev/null @@ -1,192 +0,0 @@ -import gradio as gr -import base64 -from PIL import ImageColor -from pathlib import Path -import bpy -from tqdm import tqdm -from math import pi -import tempfile - - -def enable_GPUS(): - bpy.data.scenes[0].render.engine = "CYCLES" #"CYCLES" - # Set the device_type - bpy.context.preferences.addons[ - "cycles" - ].preferences.compute_device_type = "CUDA" # or "OPENCL" - - # Set the device and feature set - bpy.context.scene.cycles.device = "GPU" - - for scene in bpy.data.scenes: - scene.cycles.device = "GPU" - - bpy.context.preferences.addons["cycles"].preferences.get_devices() - print(bpy.context.preferences.addons["cycles"].preferences.compute_device_type) - for d in bpy.context.preferences.addons["cycles"].preferences.devices: - d["use"] = True # Using all devices, include GPU and CPU - print(d["name"]) - - -enable_GPUS() - -# bpy.ops.wm.read_factory_settings(use_empty=True) - -def generate( - color1, - color2, - camera_X, - camera_Y, - camera_Z, - fov, - torus_X, - torus_Y, - torus_Z, - progress=gr.Progress(track_tqdm=True), -): - rgb1 = ImageColor.getcolor(color1, "RGBA") - rgb1 = tuple(v / 255.0 for v in rgb1) - rgb2 = ImageColor.getcolor(color2, "RGBA") - rgb2 = tuple(v / 255.0 for v in rgb2) - - - # Delete all mesh objects from the scene - for obj in bpy.context.scene.objects: - # If the object is of MESH type - if obj.type == 'MESH': - # Delete the object - bpy.data.objects.remove(obj, do_unlink=True) - # Add a torus - bpy.ops.mesh.primitive_torus_add( - major_radius=1.5, - minor_radius=0.75, - major_segments=12*4, - minor_segments=12*4, - align="WORLD", - location=(0, 1, 1), - rotation=(torus_X,torus_Y,torus_Z) - - ) - - # Assigning the torus to a variable - torus = bpy.context.view_layer.objects.active - - # Create a new material and assign it to the torus - material = bpy.data.materials.new(name="RainbowGradient") - torus.data.materials.append(material) - material.use_nodes = True - nodes = material.node_tree.nodes - - # Clear default nodes - for node in nodes: - nodes.remove(node) - - # Add a Gradient Texture and set it to a color ramp of a rainbow - gradient = nodes.new(type="ShaderNodeTexGradient") - gradient.gradient_type = "LINEAR" - gradient.location = (0, 0) - - ramp = nodes.new(type="ShaderNodeValToRGB") - ramp.color_ramp.interpolation = "LINEAR" - ramp.location = (200, 0) - - ramp.color_ramp.elements[0].color = rgb1 - ramp.color_ramp.elements[1].color = rgb2 - - # Add Shader nodes - bsdf = nodes.new(type="ShaderNodeBsdfPrincipled") - bsdf.location = (400, 0) - - output = nodes.new(type="ShaderNodeOutputMaterial") - output.location = (600, 0) - - # Connect the nodes - material.node_tree.links.new - material.node_tree.links.new(gradient.outputs["Color"], ramp.inputs[0]) - material.node_tree.links.new(ramp.outputs["Color"], bsdf.inputs["Base Color"]) - material.node_tree.links.new(bsdf.outputs["BSDF"], output.inputs["Surface"]) - - # Rotate the gradient to apply it from left to right - torus = bpy.context.view_layer.objects.active - # torus.rotation_euler = - - # Light - light = bpy.data.objects["Light"] - light.location = (0.1, 0, 2) # Position the light - - # Camera - camera = bpy.data.objects["Camera"] - camera.location = (camera_X, camera_Y, camera_Z) - camera.data.dof.use_dof = True - camera.data.dof.focus_distance = 5 - camera.data.dof.aperture_fstop = 4 - camera.data.angle = fov - camera.data.type = 'PERSP' - - # Render - with tempfile.NamedTemporaryFile(suffix=".JPEG", delete=False) as f: - - bpy.context.scene.render.resolution_y = 288 - bpy.context.scene.render.resolution_x = 512 - bpy.context.scene.render.image_settings.file_format = "JPEG" - bpy.context.scene.render.filepath = f.name - - with tqdm() as pbar: - - def elapsed(dummy): - pbar.update() - - bpy.app.handlers.render_stats.append(elapsed) - bpy.context.scene.frame_set(1) - bpy.context.scene.frame_current = 1 - - # bpy.ops.render.render(animation=False, write_still=True) - # bpy.ops.render.render(animation=False, write_still=True) - bpy.ops.render.render(animation=False, write_still=True) - - bpy.data.images["Render Result"].save_render( - filepath=bpy.context.scene.render.filepath - ) - bpy.app.handlers.render_stats.clear() - return f.name - - -# generate("#ffffff", "#aaa", 1) -with gr.Blocks() as demo: - gr.Markdown("""# Gradio with Blender bpy - based on [kolibril13](https://github.com/kolibril13/ipyblender-experimental) - """) - with gr.Row(): - with gr.Column(): - color1 = gr.ColorPicker(value="#59C173") - color2 = gr.ColorPicker(value="#5D26C1") - torus_X = gr.Slider(minimum=-pi, maximum=pi, value=0, label="Torus φ") - torus_Y = gr.Slider(minimum=-pi, maximum=pi, value=-3, label="Torus θ") - torus_Z = gr.Slider(minimum=-pi, maximum=pi, value=1.5, label="Torus ψ") - fov = gr.Slider(minimum=0.0, maximum=pi, value=pi/3, label="FOV") - camera_X = gr.Slider(minimum=-100, maximum=100, value=5, label="Camera X") - camera_Y = gr.Slider(minimum=-100, maximum=100, value=-3, label="Camera Y") - camera_Z = gr.Slider(minimum=-100, maximum=100, value=4, label="Camera Z") - - render_btn = gr.Button("Render") - with gr.Column(scale=3): - image = gr.Image(type="filepath") - - render_btn.click( - generate, - inputs=[ - color1, - color2, - camera_X, - camera_Y, - camera_Z, - fov, - torus_X, - torus_Y, - torus_Z, - ], - outputs=[image], - ) - -demo.queue(concurrency_count=1) -demo.launch(debug=True, inline=True) \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/AniceSoft EPUB Converter For Windows V9 8 9 Incl Keygen-AMPED Serial Key Keygen.md b/spaces/raedeXanto/academic-chatgpt-beta/AniceSoft EPUB Converter For Windows V9 8 9 Incl Keygen-AMPED Serial Key Keygen.md deleted file mode 100644 index a0706a46c290526c86f1cd0d07716e04e0c65ee1..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/AniceSoft EPUB Converter For Windows V9 8 9 Incl Keygen-AMPED Serial Key Keygen.md +++ /dev/null @@ -1,15 +0,0 @@ - -

    AniceSoft EPUB Converter for Windows v9 8 9: A Powerful Tool to Convert eBooks

    -

    If you are looking for a reliable and easy-to-use software to convert your eBooks to different formats, you might want to check out AniceSoft EPUB Converter for Windows v9 8 9. This software can help you convert EPUB files to PDF, MOBI, AZW, TXT, HTML and more. It can also convert other formats to EPUB, such as PDF, DOCX, TXT, HTML and more.

    -

    AniceSoft EPUB Converter for Windows v9 8 9 has a simple and intuitive interface that allows you to add files by drag and drop or by browsing your computer. You can also batch convert multiple files at once and customize the output settings according to your preferences. You can choose the output format, the output folder, the page size, the font size, the margin and more.

    -

    AniceSoft EPUB Converter for Windows v9 8 9 Incl Keygen-AMPED Serial Key keygen


    Downloadhttps://tinourl.com/2uKZdP



    -

    One of the best features of AniceSoft EPUB Converter for Windows v9 8 9 is that it can preserve the original layout, formatting, images and metadata of your eBooks. It can also handle encrypted EPUB files and remove DRM protection from them. You can also preview the converted files before saving them to your computer.

    -

    AniceSoft EPUB Converter for Windows v9 8 9 is compatible with Windows XP, Vista, 7, 8 and 10. It requires a minimum of 512 MB RAM and 100 MB free disk space. It supports multiple languages and has a user manual for reference.

    -

    If you want to try AniceSoft EPUB Converter for Windows v9 8 9 for yourself, you can download it from the official website or from various online sources. However, if you want to enjoy the full features of the software, you will need to purchase a license key. Fortunately, there is a way to get a serial key for free by using a keygen tool.

    -

    A keygen tool is a program that can generate valid serial keys for various software products. By using a keygen tool, you can activate AniceSoft EPUB Converter for Windows v9 8 9 without paying anything. However, you should be careful when downloading and using keygen tools as they may contain viruses or malware that can harm your computer.

    -

    One of the most trusted and popular keygen tools for AniceSoft EPUB Converter for Windows v9 8 9 is AMPED Keygen. This tool can generate unlimited serial keys for AniceSoft EPUB Converter for Windows v9 8 9 in a matter of seconds. It is also safe and easy to use. All you have to do is download AMPED Keygen from a reliable source, run it on your computer, copy the generated serial key and paste it into the registration window of AniceSoft EPUB Converter for Windows v9 8 9.

    -

    -

    By using AMPED Keygen, you can enjoy AniceSoft EPUB Converter for Windows v9 8 9 without any limitations or restrictions. You can convert as many eBooks as you want and enjoy them on any device or platform. You can also share them with your friends or family without any hassle.

    -

    AniceSoft EPUB Converter for Windows v9 8 9 is a powerful tool to convert eBooks that you should not miss. With AMPED Keygen, you can get it for free and enjoy its full benefits. Download AMPED Keygen today and start converting your eBooks with AniceSoft EPUB Converter for Windows v9 8 9.

    cec2833e83
    -
    -
    \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Blitzkrieg Commander Rulebook Pdf [2021] Free.md b/spaces/raedeXanto/academic-chatgpt-beta/Blitzkrieg Commander Rulebook Pdf [2021] Free.md deleted file mode 100644 index d630232001010b560cbe3f03715e4da75974c437..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Blitzkrieg Commander Rulebook Pdf [2021] Free.md +++ /dev/null @@ -1,37 +0,0 @@ - -

    How to Play Blitzkrieg Commander: A Fast and Fun Wargame for World War 2

    -

    Blitzkrieg Commander is a set of wargame rules that allow you to re-create the battles of World War 2 using miniatures on a tabletop. The rules are suitable for solo, two-player and multi-player games, at home, at the club or at a convention. Blitzkrieg Commander is designed to be fast and fun, with streamlined mechanics and intuitive gameplay that let you focus on the action and not the rulebook.

    -

    The third edition of Blitzkrieg Commander has been updated and improved with new features, such as:

    -

    Blitzkrieg Commander Rulebook Pdf Free


    Download ––– https://tinourl.com/2uL0qX



    -
      -
    • A revised command system that gives more flexibility and realism to the orders you can give to your units.
    • -
    • A refined combat system that balances firepower, morale and suppression effects.
    • -
    • A comprehensive army list section that covers all the major and minor nations involved in World War 2, with over 400 unit types and variants.
    • -
    • A scenario generator that helps you create balanced and interesting games with minimal preparation.
    • -
    • A campaign system that allows you to link your battles into a larger narrative.
    • -
    -

    If you are looking for a wargame that is easy to learn, quick to play and fun to master, then Blitzkrieg Commander is the game for you. You can download a free pdf of the rulebook from the official website: http://www.blitzkrieg-commander.com/Content/Downloads/default.aspx

    -

    Blitzkrieg Commander is compatible with any scale of miniatures, from 2mm to 28mm, and any basing system. You can use any models you already have, or buy new ones from the many manufacturers that produce World War 2 miniatures. You can also use paper counters or tokens if you prefer.

    -

    Blitzkrieg Commander is a game that can be enjoyed by anyone who loves history, strategy and fun. Whether you are a veteran wargamer or a beginner, you will find something to like in Blitzkrieg Commander. So grab your dice, your miniatures and your friends, and get ready for some World War 2 action!

    - -

    How to Get Started with Blitzkrieg Commander

    -

    If you are interested in playing Blitzkrieg Commander, you will need the following items:

    -
      -
    1. The rulebook: You can download a free pdf of the rulebook from the official website, or buy a printed copy from Pendraken Miniatures.
    2. -
    3. Miniatures: You can use any scale of miniatures you like, from 2mm to 28mm. You will need miniatures to represent infantry, tanks, artillery, aircraft and other units. You can use any models you already have, or buy new ones from the many manufacturers that produce World War 2 miniatures. You can also use paper counters or tokens if you prefer.
    4. -
    5. Terrain: You will need a tabletop to play on, preferably at least 4 feet by 6 feet. You will also need some terrain features to make your battlefield more realistic and interesting, such as hills, woods, buildings, roads, rivers and bridges. You can use any terrain you already have, or buy new ones from the many manufacturers that produce wargaming terrain. You can also make your own terrain using cardboard, foam, plastic or other materials.
    6. -
    7. Dice: You will need some six-sided dice (D6) to play the game. You will also need some different coloured dice or markers to indicate command points and suppression points.
    8. -
    9. Measuring tools: You will need some measuring tools to measure distances and angles on the tabletop. You can use a tape measure, a ruler or a template.
    10. -
    11. Army lists: You will need some army lists to choose your forces and calculate their points value. The rulebook contains over 400 unit types and variants for all the major and minor nations involved in World War 2. You can also find additional army lists on the official website and the forum.
    12. -
    -

    Once you have all these items, you are ready to play your first game of Blitzkrieg Commander. Here are some tips and tricks to help you get started:

    -
      -
    • Start small: For your first game, it is recommended that you play a small scenario with only a few units on each side. This will help you learn the basic rules and mechanics without getting overwhelmed by too many options and details.
    • -
    • Use the quick-reference sheet: The rulebook comes with a double-sided quick-reference sheet that summarizes the most important rules and tables. You can print it out or keep it on your device for easy reference during the game.
    • -
    • Use the scenario generator: The rulebook contains a scenario generator that helps you create balanced and interesting games with minimal preparation. You can use it to determine the size of your forces, the type of mission, the objectives, the deployment zones, the terrain and the special rules for each game.
    • -
    • Use the forum: The official website has a forum where you can ask questions, share your experiences, post your battle reports and get feedback from other players. The forum is also a great place to find new opponents and join online campaigns.
    • -
    -

    Blitzkrieg Commander is a game that can be played in many different ways and styles. You can play historical scenarios based on real battles or campaigns, or create your own fictional scenarios using your imagination. You can play solo against yourself or an AI opponent, or play with friends online or face-to-face. You can play casually for fun or competitively for glory. The choice is yours!

    -

    cec2833e83
    -
    -
    \ No newline at end of file diff --git a/spaces/raghuram13/extract_text_from_image/README.md b/spaces/raghuram13/extract_text_from_image/README.md deleted file mode 100644 index 940517c5ce9d12593744444e9619f02db1207a82..0000000000000000000000000000000000000000 --- a/spaces/raghuram13/extract_text_from_image/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Extract Text From Image -emoji: 🤗 -colorFrom: indigo -colorTo: gray -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: cc ---- - - -# extracttextfromimage -deploy 🤗spaces using streamlit app diff --git a/spaces/rahul999r/Rahul_Kannada_TTS/src/glow_tts/stft.py b/spaces/rahul999r/Rahul_Kannada_TTS/src/glow_tts/stft.py deleted file mode 100644 index 5852bd20904c9c206030523737ce3fbd64300a0c..0000000000000000000000000000000000000000 --- a/spaces/rahul999r/Rahul_Kannada_TTS/src/glow_tts/stft.py +++ /dev/null @@ -1,185 +0,0 @@ -""" -BSD 3-Clause License - -Copyright (c) 2017, Prem Seetharaman -All rights reserved. - -* Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -* Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -""" - -import torch -import numpy as np -import torch.nn.functional as F -from torch.autograd import Variable -from scipy.signal import get_window -from librosa.util import pad_center, tiny -from librosa import stft, istft -from audio_processing import window_sumsquare - - -class STFT(torch.nn.Module): - """adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft""" - - def __init__( - self, filter_length=800, hop_length=200, win_length=800, window="hann" - ): - super(STFT, self).__init__() - self.filter_length = filter_length - self.hop_length = hop_length - self.win_length = win_length - self.window = window - self.forward_transform = None - scale = self.filter_length / self.hop_length - fourier_basis = np.fft.fft(np.eye(self.filter_length)) - - cutoff = int((self.filter_length / 2 + 1)) - fourier_basis = np.vstack( - [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])] - ) - - forward_basis = torch.FloatTensor(fourier_basis[:, None, :]) - inverse_basis = torch.FloatTensor( - np.linalg.pinv(scale * fourier_basis).T[:, None, :] - ) - - if window is not None: - assert filter_length >= win_length - # get window and zero center pad it to filter_length - fft_window = get_window(window, win_length, fftbins=True) - fft_window = pad_center(fft_window, filter_length) - fft_window = torch.from_numpy(fft_window).float() - - # window the bases - forward_basis *= fft_window - inverse_basis *= fft_window - - self.register_buffer("forward_basis", forward_basis.float()) - self.register_buffer("inverse_basis", inverse_basis.float()) - - def transform(self, input_data): - num_batches = input_data.size(0) - num_samples = input_data.size(1) - - self.num_samples = num_samples - - if input_data.device.type == "cuda": - # similar to librosa, reflect-pad the input - input_data = input_data.view(num_batches, 1, num_samples) - input_data = F.pad( - input_data.unsqueeze(1), - (int(self.filter_length / 2), int(self.filter_length / 2), 0, 0), - mode="reflect", - ) - input_data = input_data.squeeze(1) - - forward_transform = F.conv1d( - input_data, self.forward_basis, stride=self.hop_length, padding=0 - ) - - cutoff = int((self.filter_length / 2) + 1) - real_part = forward_transform[:, :cutoff, :] - imag_part = forward_transform[:, cutoff:, :] - else: - x = input_data.detach().numpy() - real_part = [] - imag_part = [] - for y in x: - y_ = stft( - y, self.filter_length, self.hop_length, self.win_length, self.window - ) - real_part.append(y_.real[None, :, :]) - imag_part.append(y_.imag[None, :, :]) - real_part = np.concatenate(real_part, 0) - imag_part = np.concatenate(imag_part, 0) - - real_part = torch.from_numpy(real_part).to(input_data.dtype) - imag_part = torch.from_numpy(imag_part).to(input_data.dtype) - - magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2) - phase = torch.atan2(imag_part.data, real_part.data) - - return magnitude, phase - - def inverse(self, magnitude, phase): - recombine_magnitude_phase = torch.cat( - [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1 - ) - - if magnitude.device.type == "cuda": - inverse_transform = F.conv_transpose1d( - recombine_magnitude_phase, - self.inverse_basis, - stride=self.hop_length, - padding=0, - ) - - if self.window is not None: - window_sum = window_sumsquare( - self.window, - magnitude.size(-1), - hop_length=self.hop_length, - win_length=self.win_length, - n_fft=self.filter_length, - dtype=np.float32, - ) - # remove modulation effects - approx_nonzero_indices = torch.from_numpy( - np.where(window_sum > tiny(window_sum))[0] - ) - window_sum = torch.from_numpy(window_sum).to(inverse_transform.device) - inverse_transform[:, :, approx_nonzero_indices] /= window_sum[ - approx_nonzero_indices - ] - - # scale by hop ratio - inverse_transform *= float(self.filter_length) / self.hop_length - - inverse_transform = inverse_transform[:, :, int(self.filter_length / 2) :] - inverse_transform = inverse_transform[ - :, :, : -int(self.filter_length / 2) : - ] - inverse_transform = inverse_transform.squeeze(1) - else: - x_org = recombine_magnitude_phase.detach().numpy() - n_b, n_f, n_t = x_org.shape - x = np.empty([n_b, n_f // 2, n_t], dtype=np.complex64) - x.real = x_org[:, : n_f // 2] - x.imag = x_org[:, n_f // 2 :] - inverse_transform = [] - for y in x: - y_ = istft(y, self.hop_length, self.win_length, self.window) - inverse_transform.append(y_[None, :]) - inverse_transform = np.concatenate(inverse_transform, 0) - inverse_transform = torch.from_numpy(inverse_transform).to( - recombine_magnitude_phase.dtype - ) - - return inverse_transform - - def forward(self, input_data): - self.magnitude, self.phase = self.transform(input_data) - reconstruction = self.inverse(self.magnitude, self.phase) - return reconstruction diff --git "a/spaces/rainy3/chatgpt_academic/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" "b/spaces/rainy3/chatgpt_academic/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" deleted file mode 100644 index 742c7abc30ed7b0c74deca2c5a616d3d201402e8..0000000000000000000000000000000000000000 --- "a/spaces/rainy3/chatgpt_academic/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" +++ /dev/null @@ -1,139 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_execption, write_results_to_file -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -fast_debug = False - - -def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import time, os - # pip install python-docx 用于docx格式,跨平台 - # pip install pywin32 用于doc格式,仅支持Win平台 - - print('begin analysis on:', file_manifest) - for index, fp in enumerate(file_manifest): - if fp.split(".")[-1] == "docx": - from docx import Document - doc = Document(fp) - file_content = "\n".join([para.text for para in doc.paragraphs]) - else: - import win32com.client - word = win32com.client.Dispatch("Word.Application") - word.visible = False - # 打开文件 - print('fp', os.getcwd()) - doc = word.Documents.Open(os.getcwd() + '/' + fp) - # file_content = doc.Content.Text - doc = word.ActiveDocument - file_content = doc.Range().Text - doc.Close() - word.Quit() - - print(file_content) - - prefix = "接下来请你逐文件分析下面的论文文件," if index == 0 else "" - # private_upload里面的文件名在解压zip后容易出现乱码(rar和7z格式正常),故可以只分析文章内容,不输入文件名 - i_say = prefix + f'请对下面的文章片段用中英文做概述,文件名是{os.path.relpath(fp, project_folder)},' \ - f'文章内容是 ```{file_content}```' - i_say_show_user = prefix + f'[{index+1}/{len(file_manifest)}] 假设你是论文审稿专家,请对下面的文章片段做概述: {os.path.abspath(fp)}' - chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=[], - sys_prompt="总结文章。" - ) # 带超时倒计时 - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user) - history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - if not fast_debug: time.sleep(2) - - """ - # 可按需启用 - i_say = f'根据你上述的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一篇英文的。' - chatbot.append((i_say, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - - i_say = f'我想让你做一个论文写作导师。您的任务是使用人工智能工具(例如自然语言处理)提供有关如何改进其上述文章的反馈。' \ - f'您还应该利用您在有效写作技巧方面的修辞知识和经验来建议作者可以更好地以书面形式表达他们的想法和想法的方法。' \ - f'根据你之前的分析,提出建议' - chatbot.append((i_say, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - """ - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=history, - sys_prompt="总结文章。" - ) # 带超时倒计时 - chatbot[-1] = (i_say, gpt_say) - history.append(i_say) - history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - res = write_results_to_file(history) - chatbot.append(("完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - - -@CatchException -def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - import glob, os - - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "批量总结Word文档。函数插件贡献者: JasonGuo1"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - from docx import Document - except: - report_execption(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 清空历史,以免输入溢出 - history = [] - - # 检测输入参数,如没有给定输入参数,直接退出 - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 搜索需要处理的文件清单 - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.docx', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.doc', recursive=True)] - # [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \ - # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ - # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] - - # 如果没找到任何文件 - if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 开始正式执行任务 - yield from 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git a/spaces/ramiin2/AutoGPT/autogpt/utils.py b/spaces/ramiin2/AutoGPT/autogpt/utils.py deleted file mode 100644 index e93d5ac740097ee144d1809aea31c0f7fb242fa5..0000000000000000000000000000000000000000 --- a/spaces/ramiin2/AutoGPT/autogpt/utils.py +++ /dev/null @@ -1,77 +0,0 @@ -import os - -import requests -import yaml -from colorama import Fore -from git import Repo - - -def clean_input(prompt: str = ""): - try: - return input(prompt) - except KeyboardInterrupt: - print("You interrupted Auto-GPT") - print("Quitting...") - exit(0) - - -def validate_yaml_file(file: str): - try: - with open(file, encoding="utf-8") as fp: - yaml.load(fp.read(), Loader=yaml.FullLoader) - except FileNotFoundError: - return (False, f"The file {Fore.CYAN}`{file}`{Fore.RESET} wasn't found") - except yaml.YAMLError as e: - return ( - False, - f"There was an issue while trying to read with your AI Settings file: {e}", - ) - - return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!") - - -def readable_file_size(size, decimal_places=2): - """Converts the given size in bytes to a readable format. - Args: - size: Size in bytes - decimal_places (int): Number of decimal places to display - """ - for unit in ["B", "KB", "MB", "GB", "TB"]: - if size < 1024.0: - break - size /= 1024.0 - return f"{size:.{decimal_places}f} {unit}" - - -def get_bulletin_from_web() -> str: - try: - response = requests.get( - "https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md" - ) - if response.status_code == 200: - return response.text - except: - return "" - - -def get_current_git_branch() -> str: - try: - repo = Repo(search_parent_directories=True) - branch = repo.active_branch - return branch.name - except: - return "" - - -def get_latest_bulletin() -> str: - exists = os.path.exists("CURRENT_BULLETIN.md") - current_bulletin = "" - if exists: - current_bulletin = open("CURRENT_BULLETIN.md", "r", encoding="utf-8").read() - new_bulletin = get_bulletin_from_web() - is_new_news = new_bulletin != current_bulletin - - if new_bulletin and is_new_news: - open("CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin) - return f" {Fore.RED}::UPDATED:: {Fore.CYAN}{new_bulletin}{Fore.RESET}" - return current_bulletin diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/ts4.8/assert/strict.d.ts b/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/ts4.8/assert/strict.d.ts deleted file mode 100644 index b4319b974861f6cad84b745485af55264b13c3d8..0000000000000000000000000000000000000000 --- a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/ts4.8/assert/strict.d.ts +++ /dev/null @@ -1,8 +0,0 @@ -declare module 'assert/strict' { - import { strict } from 'node:assert'; - export = strict; -} -declare module 'node:assert/strict' { - import { strict } from 'node:assert'; - export = strict; -} diff --git a/spaces/razfar/anything-counter/utils/datasets.py b/spaces/razfar/anything-counter/utils/datasets.py deleted file mode 100644 index 0cdc72ccb3de0d9e7408830369b22bdc2bfe0e5f..0000000000000000000000000000000000000000 --- a/spaces/razfar/anything-counter/utils/datasets.py +++ /dev/null @@ -1,1320 +0,0 @@ -# Dataset utils and dataloaders - -import glob -import logging -import math -import os -import random -import shutil -import time -from itertools import repeat -from multiprocessing.pool import ThreadPool -from pathlib import Path -from threading import Thread - -import cv2 -import numpy as np -import torch -import torch.nn.functional as F -from PIL import Image, ExifTags -from torch.utils.data import Dataset -from tqdm import tqdm - -import pickle -from copy import deepcopy -#from pycocotools import mask as maskUtils -from torchvision.utils import save_image -from torchvision.ops import roi_pool, roi_align, ps_roi_pool, ps_roi_align - -from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \ - resample_segments, clean_str -from utils.torch_utils import torch_distributed_zero_first - -# Parameters -help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' -img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes -vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes -logger = logging.getLogger(__name__) - -# Get orientation exif tag -for orientation in ExifTags.TAGS.keys(): - if ExifTags.TAGS[orientation] == 'Orientation': - break - - -def get_hash(files): - # Returns a single hash value of a list of files - return sum(os.path.getsize(f) for f in files if os.path.isfile(f)) - - -def exif_size(img): - # Returns exif-corrected PIL size - s = img.size # (width, height) - try: - rotation = dict(img._getexif().items())[orientation] - if rotation == 6: # rotation 270 - s = (s[1], s[0]) - elif rotation == 8: # rotation 90 - s = (s[1], s[0]) - except: - pass - - return s - - -def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False, - rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''): - # Make sure only the first process in DDP process the dataset first, and the following others can use the cache - with torch_distributed_zero_first(rank): - dataset = LoadImagesAndLabels(path, imgsz, batch_size, - augment=augment, # augment images - hyp=hyp, # augmentation hyperparameters - rect=rect, # rectangular training - cache_images=cache, - single_cls=opt.single_cls, - stride=int(stride), - pad=pad, - image_weights=image_weights, - prefix=prefix) - - batch_size = min(batch_size, len(dataset)) - nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers - sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None - loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader - # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader() - dataloader = loader(dataset, - batch_size=batch_size, - num_workers=nw, - sampler=sampler, - pin_memory=True, - collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn) - return dataloader, dataset - - -class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader): - """ Dataloader that reuses workers - - Uses same syntax as vanilla DataLoader - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) - self.iterator = super().__iter__() - - def __len__(self): - return len(self.batch_sampler.sampler) - - def __iter__(self): - for i in range(len(self)): - yield next(self.iterator) - - -class _RepeatSampler(object): - """ Sampler that repeats forever - - Args: - sampler (Sampler) - """ - - def __init__(self, sampler): - self.sampler = sampler - - def __iter__(self): - while True: - yield from iter(self.sampler) - - -class LoadImages: # for inference - def __init__(self, path, img_size=640, stride=32): - p = str(Path(path).absolute()) # os-agnostic absolute path - if '*' in p: - files = sorted(glob.glob(p, recursive=True)) # glob - elif os.path.isdir(p): - files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir - elif os.path.isfile(p): - files = [p] # files - else: - raise Exception(f'ERROR: {p} does not exist') - - images = [x for x in files if x.split('.')[-1].lower() in img_formats] - videos = [x for x in files if x.split('.')[-1].lower() in vid_formats] - ni, nv = len(images), len(videos) - - self.img_size = img_size - self.stride = stride - self.files = images + videos - self.nf = ni + nv # number of files - self.video_flag = [False] * ni + [True] * nv - self.mode = 'image' - if any(videos): - self.new_video(videos[0]) # new video - else: - self.cap = None - assert self.nf > 0, f'No images or videos found in {p}. ' \ - f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}' - - def __iter__(self): - self.count = 0 - return self - - def __next__(self): - if self.count == self.nf: - raise StopIteration - path = self.files[self.count] - - if self.video_flag[self.count]: - # Read video - self.mode = 'video' - ret_val, img0 = self.cap.read() - if not ret_val: - self.count += 1 - self.cap.release() - if self.count == self.nf: # last video - raise StopIteration - else: - path = self.files[self.count] - self.new_video(path) - ret_val, img0 = self.cap.read() - - self.frame += 1 - print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='') - - else: - # Read image - self.count += 1 - img0 = cv2.imread(path) # BGR - assert img0 is not None, 'Image Not Found ' + path - #print(f'image {self.count}/{self.nf} {path}: ', end='') - - # Padded resize - img = letterbox(img0, self.img_size, stride=self.stride)[0] - - # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 - img = np.ascontiguousarray(img) - - return path, img, img0, self.cap - - def new_video(self, path): - self.frame = 0 - self.cap = cv2.VideoCapture(path) - self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) - - def __len__(self): - return self.nf # number of files - - -class LoadWebcam: # for inference - def __init__(self, pipe='0', img_size=640, stride=32): - self.img_size = img_size - self.stride = stride - - if pipe.isnumeric(): - pipe = eval(pipe) # local camera - # pipe = 'rtsp://192.168.1.64/1' # IP camera - # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login - # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera - - self.pipe = pipe - self.cap = cv2.VideoCapture(pipe) # video capture object - self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size - - def __iter__(self): - self.count = -1 - return self - - def __next__(self): - self.count += 1 - if cv2.waitKey(1) == ord('q'): # q to quit - self.cap.release() - cv2.destroyAllWindows() - raise StopIteration - - # Read frame - if self.pipe == 0: # local camera - ret_val, img0 = self.cap.read() - img0 = cv2.flip(img0, 1) # flip left-right - else: # IP camera - n = 0 - while True: - n += 1 - self.cap.grab() - if n % 30 == 0: # skip frames - ret_val, img0 = self.cap.retrieve() - if ret_val: - break - - # Print - assert ret_val, f'Camera Error {self.pipe}' - img_path = 'webcam.jpg' - print(f'webcam {self.count}: ', end='') - - # Padded resize - img = letterbox(img0, self.img_size, stride=self.stride)[0] - - # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 - img = np.ascontiguousarray(img) - - return img_path, img, img0, None - - def __len__(self): - return 0 - - -class LoadStreams: # multiple IP or RTSP cameras - def __init__(self, sources='streams.txt', img_size=640, stride=32): - self.mode = 'stream' - self.img_size = img_size - self.stride = stride - - if os.path.isfile(sources): - with open(sources, 'r') as f: - sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] - else: - sources = [sources] - - n = len(sources) - self.imgs = [None] * n - self.sources = [clean_str(x) for x in sources] # clean source names for later - for i, s in enumerate(sources): - # Start the thread to read frames from the video stream - print(f'{i + 1}/{n}: {s}... ', end='') - url = eval(s) if s.isnumeric() else s - if 'youtube.com/' in url or 'youtu.be/' in url: # if source is YouTube video - check_requirements(('pafy', 'youtube_dl')) - import pafy - url = pafy.new(url).getbest(preftype="mp4").url - cap = cv2.VideoCapture(url) - assert cap.isOpened(), f'Failed to open {s}' - w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - self.fps = cap.get(cv2.CAP_PROP_FPS) % 100 - - _, self.imgs[i] = cap.read() # guarantee first frame - thread = Thread(target=self.update, args=([i, cap]), daemon=True) - print(f' success ({w}x{h} at {self.fps:.2f} FPS).') - thread.start() - print('') # newline - - # check for common shapes - s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes - self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal - if not self.rect: - print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') - - def update(self, index, cap): - # Read next stream frame in a daemon thread - n = 0 - while cap.isOpened(): - n += 1 - # _, self.imgs[index] = cap.read() - cap.grab() - if n == 4: # read every 4th frame - success, im = cap.retrieve() - self.imgs[index] = im if success else self.imgs[index] * 0 - n = 0 - time.sleep(1 / self.fps) # wait time - - def __iter__(self): - self.count = -1 - return self - - def __next__(self): - self.count += 1 - img0 = self.imgs.copy() - if cv2.waitKey(1) == ord('q'): # q to quit - cv2.destroyAllWindows() - raise StopIteration - - # Letterbox - img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0] - - # Stack - img = np.stack(img, 0) - - # Convert - img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 - img = np.ascontiguousarray(img) - - return self.sources, img, img0, None - - def __len__(self): - return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years - - -def img2label_paths(img_paths): - # Define label paths as a function of image paths - sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings - return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths] - - -class LoadImagesAndLabels(Dataset): # for training/testing - def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, - cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''): - self.img_size = img_size - self.augment = augment - self.hyp = hyp - self.image_weights = image_weights - self.rect = False if image_weights else rect - self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) - self.mosaic_border = [-img_size // 2, -img_size // 2] - self.stride = stride - self.path = path - #self.albumentations = Albumentations() if augment else None - - try: - f = [] # image files - for p in path if isinstance(path, list) else [path]: - p = Path(p) # os-agnostic - if p.is_dir(): # dir - f += glob.glob(str(p / '**' / '*.*'), recursive=True) - # f = list(p.rglob('**/*.*')) # pathlib - elif p.is_file(): # file - with open(p, 'r') as t: - t = t.read().strip().splitlines() - parent = str(p.parent) + os.sep - f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path - # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) - else: - raise Exception(f'{prefix}{p} does not exist') - self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats]) - # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib - assert self.img_files, f'{prefix}No images found' - except Exception as e: - raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}') - - # Check cache - self.label_files = img2label_paths(self.img_files) # labels - cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels - if cache_path.is_file(): - cache, exists = torch.load(cache_path), True # load - #if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed - # cache, exists = self.cache_labels(cache_path, prefix), False # re-cache - else: - cache, exists = self.cache_labels(cache_path, prefix), False # cache - - # Display cache - nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total - if exists: - d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" - tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results - assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}' - - # Read cache - cache.pop('hash') # remove hash - cache.pop('version') # remove version - labels, shapes, self.segments = zip(*cache.values()) - self.labels = list(labels) - self.shapes = np.array(shapes, dtype=np.float64) - self.img_files = list(cache.keys()) # update - self.label_files = img2label_paths(cache.keys()) # update - if single_cls: - for x in self.labels: - x[:, 0] = 0 - - n = len(shapes) # number of images - bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index - nb = bi[-1] + 1 # number of batches - self.batch = bi # batch index of image - self.n = n - self.indices = range(n) - - # Rectangular Training - if self.rect: - # Sort by aspect ratio - s = self.shapes # wh - ar = s[:, 1] / s[:, 0] # aspect ratio - irect = ar.argsort() - self.img_files = [self.img_files[i] for i in irect] - self.label_files = [self.label_files[i] for i in irect] - self.labels = [self.labels[i] for i in irect] - self.shapes = s[irect] # wh - ar = ar[irect] - - # Set training image shapes - shapes = [[1, 1]] * nb - for i in range(nb): - ari = ar[bi == i] - mini, maxi = ari.min(), ari.max() - if maxi < 1: - shapes[i] = [maxi, 1] - elif mini > 1: - shapes[i] = [1, 1 / mini] - - self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride - - # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) - self.imgs = [None] * n - if cache_images: - if cache_images == 'disk': - self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy') - self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files] - self.im_cache_dir.mkdir(parents=True, exist_ok=True) - gb = 0 # Gigabytes of cached images - self.img_hw0, self.img_hw = [None] * n, [None] * n - results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) - pbar = tqdm(enumerate(results), total=n) - for i, x in pbar: - if cache_images == 'disk': - if not self.img_npy[i].exists(): - np.save(self.img_npy[i].as_posix(), x[0]) - gb += self.img_npy[i].stat().st_size - else: - self.imgs[i], self.img_hw0[i], self.img_hw[i] = x - gb += self.imgs[i].nbytes - pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' - pbar.close() - - def cache_labels(self, path=Path('./labels.cache'), prefix=''): - # Cache dataset labels, check images and read shapes - x = {} # dict - nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate - pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files)) - for i, (im_file, lb_file) in enumerate(pbar): - try: - # verify images - im = Image.open(im_file) - im.verify() # PIL verify - shape = exif_size(im) # image size - segments = [] # instance segments - assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' - assert im.format.lower() in img_formats, f'invalid image format {im.format}' - - # verify labels - if os.path.isfile(lb_file): - nf += 1 # label found - with open(lb_file, 'r') as f: - l = [x.split() for x in f.read().strip().splitlines()] - if any([len(x) > 8 for x in l]): # is segment - classes = np.array([x[0] for x in l], dtype=np.float32) - segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...) - l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) - l = np.array(l, dtype=np.float32) - if len(l): - assert l.shape[1] == 5, 'labels require 5 columns each' - assert (l >= 0).all(), 'negative labels' - assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels' - assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels' - else: - ne += 1 # label empty - l = np.zeros((0, 5), dtype=np.float32) - else: - nm += 1 # label missing - l = np.zeros((0, 5), dtype=np.float32) - x[im_file] = [l, shape, segments] - except Exception as e: - nc += 1 - print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') - - pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ - f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" - pbar.close() - - if nf == 0: - print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') - - x['hash'] = get_hash(self.label_files + self.img_files) - x['results'] = nf, nm, ne, nc, i + 1 - x['version'] = 0.1 # cache version - torch.save(x, path) # save for next time - logging.info(f'{prefix}New cache created: {path}') - return x - - def __len__(self): - return len(self.img_files) - - # def __iter__(self): - # self.count = -1 - # print('ran dataset iter') - # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) - # return self - - def __getitem__(self, index): - index = self.indices[index] # linear, shuffled, or image_weights - - hyp = self.hyp - mosaic = self.mosaic and random.random() < hyp['mosaic'] - if mosaic: - # Load mosaic - if random.random() < 0.8: - img, labels = load_mosaic(self, index) - else: - img, labels = load_mosaic9(self, index) - shapes = None - - # MixUp https://arxiv.org/pdf/1710.09412.pdf - if random.random() < hyp['mixup']: - if random.random() < 0.8: - img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1)) - else: - img2, labels2 = load_mosaic9(self, random.randint(0, len(self.labels) - 1)) - r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 - img = (img * r + img2 * (1 - r)).astype(np.uint8) - labels = np.concatenate((labels, labels2), 0) - - else: - # Load image - img, (h0, w0), (h, w) = load_image(self, index) - - # Letterbox - shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape - img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) - shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling - - labels = self.labels[index].copy() - if labels.size: # normalized xywh to pixel xyxy format - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) - - if self.augment: - # Augment imagespace - if not mosaic: - img, labels = random_perspective(img, labels, - degrees=hyp['degrees'], - translate=hyp['translate'], - scale=hyp['scale'], - shear=hyp['shear'], - perspective=hyp['perspective']) - - - #img, labels = self.albumentations(img, labels) - - # Augment colorspace - augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) - - # Apply cutouts - # if random.random() < 0.9: - # labels = cutout(img, labels) - - if random.random() < hyp['paste_in']: - sample_labels, sample_images, sample_masks = [], [], [] - while len(sample_labels) < 30: - sample_labels_, sample_images_, sample_masks_ = load_samples(self, random.randint(0, len(self.labels) - 1)) - sample_labels += sample_labels_ - sample_images += sample_images_ - sample_masks += sample_masks_ - #print(len(sample_labels)) - if len(sample_labels) == 0: - break - labels = pastein(img, labels, sample_labels, sample_images, sample_masks) - - nL = len(labels) # number of labels - if nL: - labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh - labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1 - labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1 - - if self.augment: - # flip up-down - if random.random() < hyp['flipud']: - img = np.flipud(img) - if nL: - labels[:, 2] = 1 - labels[:, 2] - - # flip left-right - if random.random() < hyp['fliplr']: - img = np.fliplr(img) - if nL: - labels[:, 1] = 1 - labels[:, 1] - - labels_out = torch.zeros((nL, 6)) - if nL: - labels_out[:, 1:] = torch.from_numpy(labels) - - # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 - img = np.ascontiguousarray(img) - - return torch.from_numpy(img), labels_out, self.img_files[index], shapes - - @staticmethod - def collate_fn(batch): - img, label, path, shapes = zip(*batch) # transposed - for i, l in enumerate(label): - l[:, 0] = i # add target image index for build_targets() - return torch.stack(img, 0), torch.cat(label, 0), path, shapes - - @staticmethod - def collate_fn4(batch): - img, label, path, shapes = zip(*batch) # transposed - n = len(shapes) // 4 - img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] - - ho = torch.tensor([[0., 0, 0, 1, 0, 0]]) - wo = torch.tensor([[0., 0, 1, 0, 0, 0]]) - s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale - for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW - i *= 4 - if random.random() < 0.5: - im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[ - 0].type(img[i].type()) - l = label[i] - else: - im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) - l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s - img4.append(im) - label4.append(l) - - for i, l in enumerate(label4): - l[:, 0] = i # add target image index for build_targets() - - return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4 - - -# Ancillary functions -------------------------------------------------------------------------------------------------- -def load_image(self, index): - # loads 1 image from dataset, returns img, original hw, resized hw - img = self.imgs[index] - if img is None: # not cached - path = self.img_files[index] - img = cv2.imread(path) # BGR - assert img is not None, 'Image Not Found ' + path - h0, w0 = img.shape[:2] # orig hw - r = self.img_size / max(h0, w0) # resize image to img_size - if r != 1: # always resize down, only resize up if training with augmentation - interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR - img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp) - return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized - else: - return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized - - -def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): - r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains - hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) - dtype = img.dtype # uint8 - - x = np.arange(0, 256, dtype=np.int16) - lut_hue = ((x * r[0]) % 180).astype(dtype) - lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) - lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - - img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) - cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed - - -def hist_equalize(img, clahe=True, bgr=False): - # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255 - yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) - if clahe: - c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) - yuv[:, :, 0] = c.apply(yuv[:, :, 0]) - else: - yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram - return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB - - -def load_mosaic(self, index): - # loads images in a 4-mosaic - - labels4, segments4 = [], [] - s = self.img_size - yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y - indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices - for i, index in enumerate(indices): - # Load image - img, _, (h, w) = load_image(self, index) - - # place img in img4 - if i == 0: # top left - img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles - x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) - x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) - elif i == 1: # top right - x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc - x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h - elif i == 2: # bottom left - x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) - x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) - elif i == 3: # bottom right - x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) - x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) - - img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] - padw = x1a - x1b - padh = y1a - y1b - - # Labels - labels, segments = self.labels[index].copy(), self.segments[index].copy() - if labels.size: - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format - segments = [xyn2xy(x, w, h, padw, padh) for x in segments] - labels4.append(labels) - segments4.extend(segments) - - # Concat/clip labels - labels4 = np.concatenate(labels4, 0) - for x in (labels4[:, 1:], *segments4): - np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() - # img4, labels4 = replicate(img4, labels4) # replicate - - # Augment - #img4, labels4, segments4 = remove_background(img4, labels4, segments4) - #sample_segments(img4, labels4, segments4, probability=self.hyp['copy_paste']) - img4, labels4, segments4 = copy_paste(img4, labels4, segments4, probability=self.hyp['copy_paste']) - img4, labels4 = random_perspective(img4, labels4, segments4, - degrees=self.hyp['degrees'], - translate=self.hyp['translate'], - scale=self.hyp['scale'], - shear=self.hyp['shear'], - perspective=self.hyp['perspective'], - border=self.mosaic_border) # border to remove - - return img4, labels4 - - -def load_mosaic9(self, index): - # loads images in a 9-mosaic - - labels9, segments9 = [], [] - s = self.img_size - indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices - for i, index in enumerate(indices): - # Load image - img, _, (h, w) = load_image(self, index) - - # place img in img9 - if i == 0: # center - img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles - h0, w0 = h, w - c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates - elif i == 1: # top - c = s, s - h, s + w, s - elif i == 2: # top right - c = s + wp, s - h, s + wp + w, s - elif i == 3: # right - c = s + w0, s, s + w0 + w, s + h - elif i == 4: # bottom right - c = s + w0, s + hp, s + w0 + w, s + hp + h - elif i == 5: # bottom - c = s + w0 - w, s + h0, s + w0, s + h0 + h - elif i == 6: # bottom left - c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h - elif i == 7: # left - c = s - w, s + h0 - h, s, s + h0 - elif i == 8: # top left - c = s - w, s + h0 - hp - h, s, s + h0 - hp - - padx, pady = c[:2] - x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords - - # Labels - labels, segments = self.labels[index].copy(), self.segments[index].copy() - if labels.size: - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format - segments = [xyn2xy(x, w, h, padx, pady) for x in segments] - labels9.append(labels) - segments9.extend(segments) - - # Image - img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] - hp, wp = h, w # height, width previous - - # Offset - yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y - img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] - - # Concat/clip labels - labels9 = np.concatenate(labels9, 0) - labels9[:, [1, 3]] -= xc - labels9[:, [2, 4]] -= yc - c = np.array([xc, yc]) # centers - segments9 = [x - c for x in segments9] - - for x in (labels9[:, 1:], *segments9): - np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() - # img9, labels9 = replicate(img9, labels9) # replicate - - # Augment - #img9, labels9, segments9 = remove_background(img9, labels9, segments9) - img9, labels9, segments9 = copy_paste(img9, labels9, segments9, probability=self.hyp['copy_paste']) - img9, labels9 = random_perspective(img9, labels9, segments9, - degrees=self.hyp['degrees'], - translate=self.hyp['translate'], - scale=self.hyp['scale'], - shear=self.hyp['shear'], - perspective=self.hyp['perspective'], - border=self.mosaic_border) # border to remove - - return img9, labels9 - - -def load_samples(self, index): - # loads images in a 4-mosaic - - labels4, segments4 = [], [] - s = self.img_size - yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y - indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices - for i, index in enumerate(indices): - # Load image - img, _, (h, w) = load_image(self, index) - - # place img in img4 - if i == 0: # top left - img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles - x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) - x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) - elif i == 1: # top right - x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc - x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h - elif i == 2: # bottom left - x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) - x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) - elif i == 3: # bottom right - x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) - x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) - - img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] - padw = x1a - x1b - padh = y1a - y1b - - # Labels - labels, segments = self.labels[index].copy(), self.segments[index].copy() - if labels.size: - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format - segments = [xyn2xy(x, w, h, padw, padh) for x in segments] - labels4.append(labels) - segments4.extend(segments) - - # Concat/clip labels - labels4 = np.concatenate(labels4, 0) - for x in (labels4[:, 1:], *segments4): - np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() - # img4, labels4 = replicate(img4, labels4) # replicate - - # Augment - #img4, labels4, segments4 = remove_background(img4, labels4, segments4) - sample_labels, sample_images, sample_masks = sample_segments(img4, labels4, segments4, probability=0.5) - - return sample_labels, sample_images, sample_masks - - -def copy_paste(img, labels, segments, probability=0.5): - # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) - n = len(segments) - if probability and n: - h, w, c = img.shape # height, width, channels - im_new = np.zeros(img.shape, np.uint8) - for j in random.sample(range(n), k=round(probability * n)): - l, s = labels[j], segments[j] - box = w - l[3], l[2], w - l[1], l[4] - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - if (ioa < 0.30).all(): # allow 30% obscuration of existing labels - labels = np.concatenate((labels, [[l[0], *box]]), 0) - segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) - cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) - - result = cv2.bitwise_and(src1=img, src2=im_new) - result = cv2.flip(result, 1) # augment segments (flip left-right) - i = result > 0 # pixels to replace - # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch - img[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug - - return img, labels, segments - - -def remove_background(img, labels, segments): - # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) - n = len(segments) - h, w, c = img.shape # height, width, channels - im_new = np.zeros(img.shape, np.uint8) - img_new = np.ones(img.shape, np.uint8) * 114 - for j in range(n): - cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) - - result = cv2.bitwise_and(src1=img, src2=im_new) - - i = result > 0 # pixels to replace - img_new[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug - - return img_new, labels, segments - - -def sample_segments(img, labels, segments, probability=0.5): - # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) - n = len(segments) - sample_labels = [] - sample_images = [] - sample_masks = [] - if probability and n: - h, w, c = img.shape # height, width, channels - for j in random.sample(range(n), k=round(probability * n)): - l, s = labels[j], segments[j] - box = l[1].astype(int).clip(0,w-1), l[2].astype(int).clip(0,h-1), l[3].astype(int).clip(0,w-1), l[4].astype(int).clip(0,h-1) - - #print(box) - if (box[2] <= box[0]) or (box[3] <= box[1]): - continue - - sample_labels.append(l[0]) - - mask = np.zeros(img.shape, np.uint8) - - cv2.drawContours(mask, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) - sample_masks.append(mask[box[1]:box[3],box[0]:box[2],:]) - - result = cv2.bitwise_and(src1=img, src2=mask) - i = result > 0 # pixels to replace - mask[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug - #print(box) - sample_images.append(mask[box[1]:box[3],box[0]:box[2],:]) - - return sample_labels, sample_images, sample_masks - - -def replicate(img, labels): - # Replicate labels - h, w = img.shape[:2] - boxes = labels[:, 1:].astype(int) - x1, y1, x2, y2 = boxes.T - s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) - for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices - x1b, y1b, x2b, y2b = boxes[i] - bh, bw = y2b - y1b, x2b - x1b - yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y - x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] - img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] - labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) - - return img, labels - - -def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): - # Resize and pad image while meeting stride-multiple constraints - shape = img.shape[:2] # current shape [height, width] - if isinstance(new_shape, int): - new_shape = (new_shape, new_shape) - - # Scale ratio (new / old) - r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) - if not scaleup: # only scale down, do not scale up (for better test mAP) - r = min(r, 1.0) - - # Compute padding - ratio = r, r # width, height ratios - new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) - dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding - if auto: # minimum rectangle - dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding - elif scaleFill: # stretch - dw, dh = 0.0, 0.0 - new_unpad = (new_shape[1], new_shape[0]) - ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios - - dw /= 2 # divide padding into 2 sides - dh /= 2 - - if shape[::-1] != new_unpad: # resize - img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) - top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) - img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border - return img, ratio, (dw, dh) - - -def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, - border=(0, 0)): - # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) - # targets = [cls, xyxy] - - height = img.shape[0] + border[0] * 2 # shape(h,w,c) - width = img.shape[1] + border[1] * 2 - - # Center - C = np.eye(3) - C[0, 2] = -img.shape[1] / 2 # x translation (pixels) - C[1, 2] = -img.shape[0] / 2 # y translation (pixels) - - # Perspective - P = np.eye(3) - P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) - P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) - - # Rotation and Scale - R = np.eye(3) - a = random.uniform(-degrees, degrees) - # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations - s = random.uniform(1 - scale, 1.1 + scale) - # s = 2 ** random.uniform(-scale, scale) - R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) - - # Shear - S = np.eye(3) - S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) - S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) - - # Translation - T = np.eye(3) - T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) - T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) - - # Combined rotation matrix - M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT - if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed - if perspective: - img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114)) - else: # affine - img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) - - # Visualize - # import matplotlib.pyplot as plt - # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() - # ax[0].imshow(img[:, :, ::-1]) # base - # ax[1].imshow(img2[:, :, ::-1]) # warped - - # Transform label coordinates - n = len(targets) - if n: - use_segments = any(x.any() for x in segments) - new = np.zeros((n, 4)) - if use_segments: # warp segments - segments = resample_segments(segments) # upsample - for i, segment in enumerate(segments): - xy = np.ones((len(segment), 3)) - xy[:, :2] = segment - xy = xy @ M.T # transform - xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine - - # clip - new[i] = segment2box(xy, width, height) - - else: # warp boxes - xy = np.ones((n * 4, 3)) - xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 - xy = xy @ M.T # transform - xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine - - # create new boxes - x = xy[:, [0, 2, 4, 6]] - y = xy[:, [1, 3, 5, 7]] - new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T - - # clip - new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) - new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) - - # filter candidates - i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) - targets = targets[i] - targets[:, 1:5] = new[i] - - return img, targets - - -def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) - # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio - w1, h1 = box1[2] - box1[0], box1[3] - box1[1] - w2, h2 = box2[2] - box2[0], box2[3] - box2[1] - ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio - return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates - - -def bbox_ioa(box1, box2): - # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2 - box2 = box2.transpose() - - # Get the coordinates of bounding boxes - b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] - b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] - - # Intersection area - inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ - (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) - - # box2 area - box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16 - - # Intersection over box2 area - return inter_area / box2_area - - -def cutout(image, labels): - # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - h, w = image.shape[:2] - - # create random masks - scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction - for s in scales: - mask_h = random.randint(1, int(h * s)) - mask_w = random.randint(1, int(w * s)) - - # box - xmin = max(0, random.randint(0, w) - mask_w // 2) - ymin = max(0, random.randint(0, h) - mask_h // 2) - xmax = min(w, xmin + mask_w) - ymax = min(h, ymin + mask_h) - - # apply random color mask - image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] - - # return unobscured labels - if len(labels) and s > 0.03: - box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - labels = labels[ioa < 0.60] # remove >60% obscured labels - - return labels - - -def pastein(image, labels, sample_labels, sample_images, sample_masks): - # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - h, w = image.shape[:2] - - # create random masks - scales = [0.75] * 2 + [0.5] * 4 + [0.25] * 4 + [0.125] * 4 + [0.0625] * 6 # image size fraction - for s in scales: - if random.random() < 0.2: - continue - mask_h = random.randint(1, int(h * s)) - mask_w = random.randint(1, int(w * s)) - - # box - xmin = max(0, random.randint(0, w) - mask_w // 2) - ymin = max(0, random.randint(0, h) - mask_h // 2) - xmax = min(w, xmin + mask_w) - ymax = min(h, ymin + mask_h) - - box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - if len(labels): - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - else: - ioa = np.zeros(1) - - if (ioa < 0.30).all() and len(sample_labels) and (xmax > xmin+20) and (ymax > ymin+20): # allow 30% obscuration of existing labels - sel_ind = random.randint(0, len(sample_labels)-1) - #print(len(sample_labels)) - #print(sel_ind) - #print((xmax-xmin, ymax-ymin)) - #print(image[ymin:ymax, xmin:xmax].shape) - #print([[sample_labels[sel_ind], *box]]) - #print(labels.shape) - hs, ws, cs = sample_images[sel_ind].shape - r_scale = min((ymax-ymin)/hs, (xmax-xmin)/ws) - r_w = int(ws*r_scale) - r_h = int(hs*r_scale) - - if (r_w > 10) and (r_h > 10): - r_mask = cv2.resize(sample_masks[sel_ind], (r_w, r_h)) - r_image = cv2.resize(sample_images[sel_ind], (r_w, r_h)) - temp_crop = image[ymin:ymin+r_h, xmin:xmin+r_w] - m_ind = r_mask > 0 - if m_ind.astype(np.int).sum() > 60: - temp_crop[m_ind] = r_image[m_ind] - #print(sample_labels[sel_ind]) - #print(sample_images[sel_ind].shape) - #print(temp_crop.shape) - box = np.array([xmin, ymin, xmin+r_w, ymin+r_h], dtype=np.float32) - if len(labels): - labels = np.concatenate((labels, [[sample_labels[sel_ind], *box]]), 0) - else: - labels = np.array([[sample_labels[sel_ind], *box]]) - - image[ymin:ymin+r_h, xmin:xmin+r_w] = temp_crop - - return labels - -class Albumentations: - # YOLOv5 Albumentations class (optional, only used if package is installed) - def __init__(self): - self.transform = None - import albumentations as A - - self.transform = A.Compose([ - A.CLAHE(p=0.01), - A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.01), - A.RandomGamma(gamma_limit=[80, 120], p=0.01), - A.Blur(p=0.01), - A.MedianBlur(p=0.01), - A.ToGray(p=0.01), - A.ImageCompression(quality_lower=75, p=0.01),], - bbox_params=A.BboxParams(format='pascal_voc', label_fields=['class_labels'])) - - #logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) - - def __call__(self, im, labels, p=1.0): - if self.transform and random.random() < p: - new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed - im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) - return im, labels - - -def create_folder(path='./new'): - # Create folder - if os.path.exists(path): - shutil.rmtree(path) # delete output folder - os.makedirs(path) # make new output folder - - -def flatten_recursive(path='../coco'): - # Flatten a recursive directory by bringing all files to top level - new_path = Path(path + '_flat') - create_folder(new_path) - for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)): - shutil.copyfile(file, new_path / Path(file).name) - - -def extract_boxes(path='../coco/'): # from utils.datasets import *; extract_boxes('../coco128') - # Convert detection dataset into classification dataset, with one directory per class - - path = Path(path) # images dir - shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing - files = list(path.rglob('*.*')) - n = len(files) # number of files - for im_file in tqdm(files, total=n): - if im_file.suffix[1:] in img_formats: - # image - im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB - h, w = im.shape[:2] - - # labels - lb_file = Path(img2label_paths([str(im_file)])[0]) - if Path(lb_file).exists(): - with open(lb_file, 'r') as f: - lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels - - for j, x in enumerate(lb): - c = int(x[0]) # class - f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename - if not f.parent.is_dir(): - f.parent.mkdir(parents=True) - - b = x[1:] * [w, h, w, h] # box - # b[2:] = b[2:].max() # rectangle to square - b[2:] = b[2:] * 1.2 + 3 # pad - b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) - - b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image - b[[1, 3]] = np.clip(b[[1, 3]], 0, h) - assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' - - -def autosplit(path='../coco', weights=(0.9, 0.1, 0.0), annotated_only=False): - """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files - Usage: from utils.datasets import *; autosplit('../coco') - Arguments - path: Path to images directory - weights: Train, val, test weights (list) - annotated_only: Only use images with an annotated txt file - """ - path = Path(path) # images dir - files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only - n = len(files) # number of files - indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split - - txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files - [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing - - print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) - for i, img in tqdm(zip(indices, files), total=n): - if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label - with open(path / txt[i], 'a') as f: - f.write(str(img) + '\n') # add image to txt file - - -def load_segmentations(self, index): - key = '/work/handsomejw66/coco17/' + self.img_files[index] - #print(key) - # /work/handsomejw66/coco17/ - return self.segs[key] diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Bob.Ombs Modified Win10PE X64 V4.11 __FULL__.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Bob.Ombs Modified Win10PE X64 V4.11 __FULL__.md deleted file mode 100644 index e5a5a51f571fe06a41b97c58ee71c2e31cff1b60..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Bob.Ombs Modified Win10PE X64 V4.11 __FULL__.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Bob.Ombs Modified Win10PE x64 v4.11


    DOWNLOAD ••• https://urlgoal.com/2uCKNS



    -
    -Omb's Modified Win10PEx64' and ran Windows Login Unlocker as shown below. ... Ombs.Modified. ... Have just downloaded Bob Omb's Win10Pe from this site:Google Drive -- Page Not Found, prohibited. ... Win10PEx64.v4.6.7zFile size:1.91GB RARWhat to do the next, please? ... Windows 10 Pro 64-bit. 1fdad05405
    -
    -
    -

    diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (Sakasama No Patema Movie [2021] Download).md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (Sakasama No Patema Movie [2021] Download).md deleted file mode 100644 index bda4c7458f0d40e54587e72f491120f2dee704e2..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HD Online Player (Sakasama No Patema Movie [2021] Download).md +++ /dev/null @@ -1,32 +0,0 @@ - -```html -

    How to Watch Sakasama no Patema Online in HD Quality

    -

    Sakasama no Patema is a 2013 Japanese animated film directed by Yasuhiro Yoshiura. It tells the story of Patema, a young girl who lives in an underground world, and Age, a boy who lives on the surface world where gravity is reversed. When Patema falls into a deep pit, she discovers a secret that connects their two worlds and changes their lives forever.

    -

    If you are a fan of anime and sci-fi, you might want to watch Sakasama no Patema online in HD quality. Here are some ways to do that:

    -

    HD Online Player (Sakasama No Patema Movie Download)


    Download ————— https://urlgoal.com/2uCL71



    -
      -
    • One option is to stream the movie on this website [^1^], which offers high-quality video and English subtitles. You can also download the movie from there if you want to watch it offline.
    • -
    • Another option is to download the movie from this link [^2^], which provides a PDF file with the movie and English subtitles embedded. You can use an online player or a PDF reader to watch the movie on your device.
    • -
    • A third option is to download the movie from this link [^3^], which also provides a PDF file with the movie and English subtitles embedded. You can use an online player or a PDF reader to watch the movie on your device.
    • -
    -

    Whichever option you choose, make sure you have a stable internet connection and enough storage space on your device. Enjoy watching Sakasama no Patema online in HD quality!

    -``` - -```html -

    If you want to learn more about Sakasama no Patema, you can also check out some of the following resources:

    -
      -
    • The official website of the movie, which has information about the characters, the story, the staff, and the soundtrack. You can also watch the trailer and some clips from the movie there.
    • -
    • The Wikipedia page of the movie, which has a detailed plot summary, a list of voice actors, a production history, and a reception section. You can also find some trivia and references there.
    • -
    • The IMDb page of the movie, which has a rating, a user review section, a trivia section, and a photo gallery. You can also see some related movies and recommendations there.
    • -
    -

    Sakasama no Patema is a captivating and imaginative film that explores the themes of perspective, identity, and friendship. It has received positive reviews from critics and audiences alike, and has won several awards and nominations. If you are looking for a unique and memorable anime experience, you should definitely watch Sakasama no Patema online in HD quality.

    -``` - -```html -

    One of the most striking features of Sakasama no Patema is its animation style, which uses a combination of 2D and 3D techniques to create a realistic and immersive world. The movie also uses different camera angles and orientations to show the contrast and connection between the two gravity directions. The result is a stunning and dynamic visual experience that will keep you on the edge of your seat.

    -

    Another aspect of Sakasama no Patema that deserves praise is its soundtrack, which was composed by Michiru Oshima. The music matches the mood and tone of the movie perfectly, and enhances the emotional impact of the scenes. The movie also features two songs by Estelle Micheau, who sings in both Japanese and French. The songs are "Patema Inverse" and "Try Again", and they express the themes and messages of the movie beautifully.

    -

    -

    Sakasama no Patema is not only a great anime film, but also a meaningful and inspiring one. It shows us that despite our differences, we can find common ground and understanding with others. It also encourages us to challenge our assumptions and prejudices, and to see things from different perspectives. It is a movie that will make you think, feel, and wonder.

    -```

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/datasets/kitti2012_kitti2015_test.py b/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/datasets/kitti2012_kitti2015_test.py deleted file mode 100644 index ced2a99d6e987cfcd3428277d7185a1bcef72316..0000000000000000000000000000000000000000 --- a/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/datasets/kitti2012_kitti2015_test.py +++ /dev/null @@ -1,36 +0,0 @@ -img_norm_cfg = dict(mean=[0., 0., 0.], std=[255., 255., 255.], to_rgb=False) - -kitti_test_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', sparse=True), - dict(type='InputResize', exponent=6), - dict(type='Normalize', **img_norm_cfg), - dict(type='TestFormatBundle'), - dict( - type='Collect', - keys=['imgs'], - meta_keys=[ - 'flow_gt', 'valid', 'filename1', 'filename2', 'ori_filename1', - 'ori_filename2', 'ori_shape', 'img_shape', 'img_norm_cfg', - 'scale_factor', 'pad_shape' - ]) -] - -kitti2015_val_test = dict( - type='KITTI2015', - data_root='data/kitti2015', - pipeline=kitti_test_pipeline, - test_mode=True) - -kitti2012_val_test = dict( - type='KITTI2012', - data_root='data/kitti2012', - pipeline=kitti_test_pipeline, - test_mode=True) - -data = dict( - test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=2, shuffle=False), - test=dict( - type='ConcatDataset', - datasets=[kitti2012_val_test, kitti2015_val_test], - separate_eval=True)) diff --git a/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/models/liteflownet2/liteflownet2_pre_M5S5R5.py b/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/models/liteflownet2/liteflownet2_pre_M5S5R5.py deleted file mode 100644 index bbfb19d412e6fbd6c402b11a073f3ac362a7b09a..0000000000000000000000000000000000000000 --- a/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/models/liteflownet2/liteflownet2_pre_M5S5R5.py +++ /dev/null @@ -1,53 +0,0 @@ -model = dict( - type='LiteFlowNet', - encoder=dict( - type='NetC', - in_channels=3, - pyramid_levels=[ - 'level1', 'level2', 'level3', 'level4', 'level5', 'level6' - ], - out_channels=(32, 32, 64, 96, 128, 192), - strides=(1, 2, 2, 2, 2, 2), - num_convs=(1, 3, 2, 2, 1, 1), - conv_cfg=None, - norm_cfg=None, - act_cfg=dict(type='LeakyReLU', negative_slope=0.1), - init_cfg=None), - decoder=dict( - type='NetE', - in_channels=dict(level5=128, level6=192), - corr_channels=dict(level5=49, level6=49), - sin_channels=dict(level5=258, level6=386), - rin_channels=dict(level5=131, level6=195), - feat_channels=64, - mfeat_channels=(128, 128, 96, 64, 32), - sfeat_channels=(128, 128, 96, 64, 32), - rfeat_channels=(128, 128, 64, 64, 32, 32), - patch_size=dict(level5=3, level6=3), - corr_cfg=dict( - level5=dict(type='Correlation', max_displacement=3), - level6=dict(type='Correlation', max_displacement=3)), - warp_cfg=dict(type='Warp', align_corners=True, use_mask=True), - flow_div=20., - conv_cfg=None, - norm_cfg=None, - act_cfg=dict(type='LeakyReLU', negative_slope=0.1), - scaled_corr=False, - regularized_flow=True, - extra_training_loss=False, - flow_loss=dict( - type='MultiLevelEPE', - weights=dict(level6=0.32, level5=0.08), - p=2, - reduction='sum'), - init_cfg=None), - # model training and testing settings - init_cfg=dict( - type='Kaiming', - nonlinearity='leaky_relu', - layer=['Conv2d', 'ConvTranspose2d'], - mode='fan_in', - bias=0), - train_cfg=dict(), - test_cfg=dict(), -) diff --git a/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/schedules/pwcnet_plus_750k_schedule.py b/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/schedules/pwcnet_plus_750k_schedule.py deleted file mode 100644 index f27713ff47b06d323b9adfb4d96dfc5fed27b7ae..0000000000000000000000000000000000000000 --- a/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/schedules/pwcnet_plus_750k_schedule.py +++ /dev/null @@ -1,34 +0,0 @@ -# optimizer -optimizer = dict(type='Adam', lr=5e-5, weight_decay=0.0004, betas=(0.9, 0.999)) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='MultiStage', - by_epoch=False, - gammas=[0.5, 0.5, 0.5, 0.5, 0.5], - milestone_lrs=[5e-5, 3e-5, 2e-5, 1e-5, 5e-6], - milestone_iters=[0, 150000, 300000, 450000, 600000], - steps=[[ - 45000, 65000, 85000, 95000, 97500, 100000, 110000, 120000, 130000, - 140000 - ], - [ - 195000, 215000, 235000, 245000, 247500, 250000, 260000, 270000, - 280000, 290000 - ], - [ - 345000, 365000, 385000, 395000, 397500, 400000, 410000, 420000, - 430000, 440000 - ], - [ - 495000, 515000, 535000, 545000, 547500, 550000, 560000, 570000, - 580000, 590000 - ], - [ - 645000, 665000, 685000, 695000, 697500, 700000, 710000, 720000, - 730000, 740000 - ]]) - -runner = dict(type='IterBasedRunner', max_iters=750000) -checkpoint_config = dict(by_epoch=False, interval=50000) -evaluation = dict(interval=50000, metric='EPE') diff --git a/spaces/robinhad/ukrainian-tts/tests/__init__.py b/spaces/robinhad/ukrainian-tts/tests/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/rorallitri/biomedical-language-models/logs/Idm Integration Module Free Download.md b/spaces/rorallitri/biomedical-language-models/logs/Idm Integration Module Free Download.md deleted file mode 100644 index 9b3fa3e0bdb5e1038e0724ca687eae84a84e1cfe..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Idm Integration Module Free Download.md +++ /dev/null @@ -1,12 +0,0 @@ - -

    The process of actually downloading and setting up IDM is a few steps too many. In Chrome, once you've downloaded the software you're prompted to enable the IDM Integration Module extension in order for the program to work, and a screenshot is provided for efficiency's sake. Before you're able to do any of this, you have to close out the browser you're using, which can be frustrating if you're in the middle of something and just wanted to be able to use the software immediately. Once the extension has been added, a window appears informing you of some changes that it is making to Chrome, including being able to read and change all data on websites that are visited. For those that are worried about privacy and data sharing, this could be a cause for concern. In IDM's privacy policy, it says that the browsers that use the software do not collect any data besides what is absolutely necessary to run the integration module. In this case, that includes things like internet addresses, cookies, and encrypted credentials. The data collected is stored on the user's computer until IDM is deleted. IDM does not send information to third party sellers.

    -

    IDM is an incredible download manager, but unfortunately, it only runs on Windows and after the 30-day trial it costs $11.95 monthly. As for alternatives, the first would be JDownloader. It's available for Windows, Mac, and Linux, is completely free and has some similar features to IDM like the ability to pause and resume downloads. Free Download Manager is another good option. It is available for Windows and Mac, can resume errored downloads, has accelerated download speeds and more. Lastly, there's uGet. Similar functions to IDM are pausing and resuming downloads, scheduling downloads, and multi-connection downloads. The layout is a bit more modern and it has more options to customize the aesthetic of the program than most others. Light and dark modes help with extended viewing time. uGet is available for Windows, Mac, Linux, and Android.

    -

    Idm Integration Module Free Download


    Download Ziphttps://tinurll.com/2uznfj



    -





















































    A means to download the complete wording of free download full version registered. It does all for you in a very simple way. So, user can pause and resume the downloads as per his convenience. Nice bro. This will restart beaked downloads due to loss of internet connection, network issues, power breakdown or system shutdowns. IDM crackeado download files in part and combine them in the end.

    -

    All skins can be downloaded from IDM home site. Idm crack windows 10 users can design their own skins. Also you can choose the light or dark IDM theme. Choose and set up new IDM toolbar. You can try Internet Dowload Manager idm crack windows 10 free 30 days! No email or credit cards required. Download and install IDM and stop waiting for your downloads. Internet Download Manager v6. Improved media grabber to save media files from web players on web pages.

    -

    More than a free and simple video transcoder, Free HD Video Converter Factory is actually an all-around desktop program integrated with a video downloader module that allows you to download most types of streaming videos, including HLS videos, MPEG-DASH, HTTPS, RTMP, etc. Thanks to its intuitive UI, you can simply complete HLS streaming download and convert M3U8 to MP4 without knowing any technical stuff. Download and install it to your PC, and let's see how it works.

    -

    As you know, IDM is a shareware and offers a 30-day free trial. So you're able to pay for the full-featured IDM or turn to some free alternatives like Neat Download Manager, which also supports HLS video downloading with the similar operation steps. Give it a shot via

    -

    You know, there are still multiple solutions to HLS video download, but most are too esoteric for green hands. By contrast, the aforementioned 4 solutions are totally free, handy and efficient. Also, if you know a better solution, feel free to drop me a line via Facebook Messenger. Thanks for reading!

    -

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/rrighart/product-defects/README.md b/spaces/rrighart/product-defects/README.md deleted file mode 100644 index 52115778e1042e0385d5dec2bf8d1135b63f48be..0000000000000000000000000000000000000000 --- a/spaces/rrighart/product-defects/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Product Defects -emoji: 🥫 -colorFrom: purple -colorTo: gray -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/s3nh/totally-not-an-llm-AlpacaCielo2-7b-8k-GGML/app.py b/spaces/s3nh/totally-not-an-llm-AlpacaCielo2-7b-8k-GGML/app.py deleted file mode 100644 index 0809ae4b0332b7ec8b9f226262da8dfd645ca794..0000000000000000000000000000000000000000 --- a/spaces/s3nh/totally-not-an-llm-AlpacaCielo2-7b-8k-GGML/app.py +++ /dev/null @@ -1,400 +0,0 @@ -import os -import platform -import random -import time -from dataclasses import asdict, dataclass -from pathlib import Path - -import gradio as gr -import psutil -from about_time import about_time -from ctransformers import AutoModelForCausalLM -from dl_hf_model import dl_hf_model -from loguru import logger - - -URL = "https://huggingface.co/s3nh/totally-not-an-llm-AlpacaCielo2-7b-8k-GGML/resolve/main/totally-not-an-llm-AlpacaCielo2-7b-8k-GGML.ggmlv3.q5_1.bin" # 4.05G - -_ = ( - "golay" in platform.node() - or "okteto" in platform.node() - or Path("/kaggle").exists() - # or psutil.cpu_count(logical=False) < 4 - or 1 # run 7b in hf -) - -if _: - url = "https://huggingface.co/s3nh/totally-not-an-llm-AlpacaCielo2-7b-8k-GGML/resolve/main/totally-not-an-llm-AlpacaCielo2-7b-8k-GGML.ggmlv3.q5_1.bin" # 2.87G - - -prompt_template = """Below is an instruction that describes a task. Write a response that appropriately completes the request. -### Instruction: {user_prompt} -### Response: -""" - -prompt_template = """System: You are a helpful, -respectful and honest assistant. Always answer as -helpfully as possible, while being safe. Your answers -should not include any harmful, unethical, racist, -sexist, toxic, dangerous, or illegal content. Please -ensure that your responses are socially unbiased and -positive in nature. If a question does not make any -sense, or is not factually coherent, explain why instead -of answering something not correct. If you don't know -the answer to a question, please don't share false -information. -User: {prompt} -Assistant: """ - -prompt_template = """System: You are a helpful assistant. -User: {prompt} -Assistant: """ - -prompt_template = """Question: {question} -Answer: Let's work this out in a step by step way to be sure we have the right answer.""" - -prompt_template = """[INST] <> -You are a helpful, respectful and honest assistant. Always answer as helpfully as possible assistant. Think step by step. -<> -What NFL team won the Super Bowl in the year Justin Bieber was born? -[/INST]""" - -prompt_template = """[INST] <> -You are an unhelpful assistant. Always answer as helpfully as possible. Think step by step. <> -{question} [/INST] -""" - -prompt_template = """[INST] <> -You are a helpful assistant. -<> -{question} [/INST] -""" - -prompt_template = """### HUMAN: -{question} -### RESPONSE:""" - - -prompt_template = """<|prompt|>:{question} -<|answer|>:""" - - -prompt_template = """SYSTEM: -USER: {question} -ASSISTANT: """ - - -_ = [elm for elm in prompt_template.splitlines() if elm.strip()] -stop_string = [elm.split(":")[0] + ":" for elm in _][-2] - -logger.debug(f"{stop_string=} not used") - -_ = psutil.cpu_count(logical=False) - 1 -cpu_count: int = int(_) if _ else 1 -logger.debug(f"{cpu_count=}") - -LLM = None - -try: - model_loc, file_size = dl_hf_model(url) -except Exception as exc_: - logger.error(exc_) - raise SystemExit(1) from exc_ - -LLM = AutoModelForCausalLM.from_pretrained( - model_loc, - model_type="llama", -) - -logger.info(f"done load llm {model_loc=} {file_size=}G") - -os.environ["TZ"] = "Asia/Shanghai" -try: - time.tzset() - - logger.warning("Windows, cant run time.tzset()") -except Exception: - logger.warning("Windows, cant run time.tzset()") - - -@dataclass -class GenerationConfig: - temperature: float = 0.7 - top_k: int = 50 - top_p: float = 0.9 - repetition_penalty: float = 1.0 - max_new_tokens: int = 512 - seed: int = 42 - reset: bool = False - stream: bool = True - # threads: int = cpu_count - # stop: list[str] = field(default_factory=lambda: [stop_string]) - - -def generate( - question: str, - llm=LLM, - config: GenerationConfig = GenerationConfig(), -): - """Run model inference, will return a Generator if streaming is true.""" - - - prompt = prompt_template.format(question=question) - - return llm( - prompt, - **asdict(config), - ) - - -logger.debug(f"{asdict(GenerationConfig())=}") - - -def user(user_message, history): - history.append([user_message, None]) - return user_message, history - - -def user1(user_message, history): - history.append([user_message, None]) - return "", history - -def bot_(history): - user_message = history[-1][0] - resp = random.choice(["How are you?", "I love you", "I'm very hungry"]) - bot_message = user_message + ": " + resp - history[-1][1] = "" - for character in bot_message: - history[-1][1] += character - time.sleep(0.02) - yield history - - history[-1][1] = resp - yield history - - -def bot(history): - user_message = history[-1][0] - response = [] - - logger.debug(f"{user_message=}") - - with about_time() as atime: - flag = 1 - prefix = "" - then = time.time() - - logger.debug("about to generate") - - config = GenerationConfig(reset=True) - for elm in generate(user_message, config=config): - if flag == 1: - logger.debug("in the loop") - prefix = f"({time.time() - then:.2f}s) " - flag = 0 - print(prefix, end="", flush=True) - logger.debug(f"{prefix=}") - print(elm, end="", flush=True) - - response.append(elm) - history[-1][1] = prefix + "".join(response) - yield history - - _ = ( - f"(time elapsed: {atime.duration_human}, " - f"{atime.duration/len(''.join(response)):.2f}s/char)" - ) - - history[-1][1] = "".join(response) + f"\n{_}" - yield history - - -def predict_api(prompt): - logger.debug(f"{prompt=}") - try: - # user_prompt = prompt - config = GenerationConfig( - temperature=0.2, - top_k=10, - top_p=0.9, - repetition_penalty=1.0, - max_new_tokens=512, # adjust as needed - seed=42, - reset=True, - stream=False, - ) - - response = generate( - prompt, - config=config, - ) - - logger.debug(f"api: {response=}") - except Exception as exc: - logger.error(exc) - response = f"{exc=}" - return response - - -css = """ - .importantButton { - background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important; - border: none !important; - } - .importantButton:hover { - background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important; - border: none !important; - } - .disclaimer {font-variant-caps: all-small-caps; font-size: xx-small;} - .xsmall {font-size: x-small;} -""" -etext = """In America, where cars are an important part of the national psyche, a decade ago people had suddenly started to drive less, which had not happened since the oil shocks of the 1970s. """ -examples_list = [ - ["Send an email requesting that people use language models responsibly."], - ["Write a shouting match between Julius Caesar and Napoleon"], - ["Write a theory to explain why cat never existed"], - ["write a story about a grain of sand as it watches millions of years go by"], - ["What are 3 popular chess openings?"], - ["write a conversation between the sun and pluto"], - ["Did you know that Yann LeCun dropped a rap album last year? We listened to it andhere’s what we thought:"], -] - -logger.info("start block") - -with gr.Blocks( - title=f"{Path(model_loc).name}", - theme=gr.themes.Soft(text_size="sm", spacing_size="sm"), - css=css, -) as block: - # buff_var = gr.State("") - with gr.Accordion("🎈 Info", open=False): - # gr.HTML( - # """
    Duplicate and spin a CPU UPGRADE to avoid the queue
    """ - # ) - gr.Markdown( - f"""
    {Path(model_loc).name}
    - Most examples are meant for another model. - You probably should try to test - some related prompts.""", - elem_classes="xsmall", - ) - - # chatbot = gr.Chatbot().style(height=700) # 500 - chatbot = gr.Chatbot(height=500) - - # buff = gr.Textbox(show_label=False, visible=True) - - with gr.Row(): - with gr.Column(scale=5): - msg = gr.Textbox( - label="Chat Message Box", - placeholder="Ask me anything (press Shift+Enter or click Submit to send)", - show_label=False, - # container=False, - lines=6, - max_lines=30, - show_copy_button=True, - # ).style(container=False) - ) - with gr.Column(scale=1, min_width=50): - with gr.Row(): - submit = gr.Button("Submit", elem_classes="xsmall") - stop = gr.Button("Stop", visible=True) - clear = gr.Button("Clear History", visible=True) - with gr.Row(visible=False): - with gr.Accordion("Advanced Options:", open=False): - with gr.Row(): - with gr.Column(scale=2): - system = gr.Textbox( - label="System Prompt", - value=prompt_template, - show_label=False, - container=False, - # ).style(container=False) - ) - with gr.Column(): - with gr.Row(): - change = gr.Button("Change System Prompt") - reset = gr.Button("Reset System Prompt") - - with gr.Accordion("Example Inputs", open=True): - examples = gr.Examples( - examples=examples_list, - inputs=[msg], - examples_per_page=40, - ) - - # with gr.Row(): - with gr.Accordion("Disclaimer", open=True): - _ = Path(model_loc).name - gr.Markdown( - "Disclaimer: I AM NOT RESPONSIBLE FOR ANY PROMPT PROVIDED BY USER AND PROMPT RETURNED FROM THE MODEL. THIS APP SHOULD BE USED FOR EDUCATIONAL PURPOSE" - "WITHOUT ANY OFFENSIVE, AGGRESIVE INTENTS. {_} can produce factually incorrect output, and should not be relied on to produce " - f"factually accurate information. {_} was trained on various public datasets; while great efforts " - "have been taken to clean the pretraining data, it is possible that this model could generate lewd, " - "biased, or otherwise offensive outputs.", - elem_classes=["disclaimer"], - ) - - msg_submit_event = msg.submit( - # fn=conversation.user_turn, - fn=user, - inputs=[msg, chatbot], - outputs=[msg, chatbot], - queue=True, - show_progress="full", - # api_name=None, - ).then(bot, chatbot, chatbot, queue=True) - submit_click_event = submit.click( - # fn=lambda x, y: ("",) + user(x, y)[1:], # clear msg - fn=user1, # clear msg - inputs=[msg, chatbot], - outputs=[msg, chatbot], - queue=True, - # queue=False, - show_progress="full", - # api_name=None, - ).then(bot, chatbot, chatbot, queue=True) - stop.click( - fn=None, - inputs=None, - outputs=None, - cancels=[msg_submit_event, submit_click_event], - queue=False, - ) - clear.click(lambda: None, None, chatbot, queue=False) - - with gr.Accordion("For Chat/Translation API", open=False, visible=False): - input_text = gr.Text() - api_btn = gr.Button("Go", variant="primary") - out_text = gr.Text() - - api_btn.click( - predict_api, - input_text, - out_text, - api_name="api", - ) - - # block.load(update_buff, [], buff, every=1) - # block.load(update_buff, [buff_var], [buff_var, buff], every=1) - -# concurrency_count=5, max_size=20 -# max_size=36, concurrency_count=14 -# CPU cpu_count=2 16G, model 7G -# CPU UPGRADE cpu_count=8 32G, model 7G - -# does not work -_ = """ -# _ = int(psutil.virtual_memory().total / 10**9 // file_size - 1) -# concurrency_count = max(_, 1) -if psutil.cpu_count(logical=False) >= 8: - # concurrency_count = max(int(32 / file_size) - 1, 1) -else: - # concurrency_count = max(int(16 / file_size) - 1, 1) -# """ - -concurrency_count = 1 -logger.info(f"{concurrency_count=}") - -block.queue(concurrency_count=concurrency_count, max_size=5).launch(debug=True) \ No newline at end of file diff --git a/spaces/sachit-menon/classification_via_description/descriptor_strings.py b/spaces/sachit-menon/classification_via_description/descriptor_strings.py deleted file mode 100644 index ef35feb28f2c43bf2444505e507bae26fc49138f..0000000000000000000000000000000000000000 --- a/spaces/sachit-menon/classification_via_description/descriptor_strings.py +++ /dev/null @@ -1,72 +0,0 @@ -def stringtolist(description): - return [descriptor[2:] for descriptor in description.split('\n') if (descriptor != '') and (descriptor.startswith('- '))] - -def mod_stringtolist(description): - output_list = [] - for descriptor in description.split('\n'): - if descriptor == '': - continue - if descriptor.startswith('- '): - output_list.append(descriptor[2:]) - elif descriptor.startswith('-'): - output_list.append(descriptor[1:]) - return output_list - -def stringtolist_opt(description, prompt_to_trim=None): - if prompt_to_trim is not None: - description = description[len(prompt_to_trim):] - descriptorlist = [] - description = description.split('Q:')[0] - linesplit = description.split('\n') - for i, descriptor in enumerate(linesplit): - if (descriptor != ''): - if (descriptor.startswith('- ')): - descriptor = descriptor[2:] - if descriptor.startswith('- '): - descriptor = descriptor[2:] - elif descriptor == '-': - continue - descriptorlist.append(descriptor) - elif (linesplit[i-1] == '-'): - if descriptor.startswith('- '): - descriptor = descriptor[2:] - elif descriptor == '-': - continue - descriptorlist.append(descriptor) - return descriptorlist - - -def wordify(string): - word = string.replace('_', ' ') - return word - -def make_descriptor_sentence(descriptor): - if descriptor.startswith('a') or descriptor.startswith('an'): - return f"which is {descriptor}" - elif descriptor.startswith('has') or descriptor.startswith('often') or descriptor.startswith('typically') or descriptor.startswith('may') or descriptor.startswith('can'): - return f"which {descriptor}" - elif descriptor.startswith('used'): - return f"which is {descriptor}" - else: - return f"which has {descriptor}" - - -def modify_descriptor(descriptor, apply_changes): - if apply_changes: - return make_descriptor_sentence(descriptor) - return descriptor - - - -openai_imagenet_classes = ["tench", "goldfish", "great white shark", "tiger shark", "hammerhead shark", "electric ray", "stingray", "rooster", "hen", "ostrich", "brambling", "goldfinch", "house finch", "junco", "indigo bunting", "American robin", "bulbul", "jay", "magpie", "chickadee", "American dipper", "kite (bird of prey)", "bald eagle", "vulture", "great grey owl", "fire salamander", "smooth newt", "eft", - "spotted salamander", "axolotl", "American bullfrog", "tree frog", "tailed frog", "loggerhead sea turtle", "leatherback sea turtle", "mud turtle", "terrapin", "box turtle", "banded gecko", "green iguana", "Carolina anole", "desert grassland whiptail lizard", "agama", "frilled-necked lizard", "alligator lizard", "Gila monster", "European green lizard", "chameleon", "Komodo dragon", "Nile crocodile", "American alligator", "triceratops", "worm snake", "ring-necked snake", "eastern hog-nosed snake", "smooth green snake", "kingsnake", "garter snake", "water snake", "vine snake", "night snake", "boa constrictor", "African rock python", "Indian cobra", "green mamba", "sea snake", "Saharan horned viper", "eastern diamondback rattlesnake", "sidewinder rattlesnake", "trilobite", "harvestman", "scorpion", "yellow garden spider", "barn spider", "European garden spider", "southern black widow", "tarantula", "wolf spider", "tick", "centipede", "black grouse", "ptarmigan", "ruffed grouse", "prairie grouse", "peafowl", "quail", "partridge", "african grey parrot", "macaw", "sulphur-crested cockatoo", "lorikeet", "coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "duck", "red-breasted merganser", "goose", "black swan", "tusker", "echidna", "platypus", "wallaby", "koala", "wombat", "jellyfish", "sea anemone", "brain coral", "flatworm", "nematode", "conch", "snail", "slug", "sea slug", "chiton", "chambered nautilus", "Dungeness crab", "rock crab", "fiddler crab", "red king crab", "American lobster", "spiny lobster", "crayfish", "hermit crab", "isopod", "white stork", "black stork", "spoonbill", "flamingo", "little blue heron", "great egret", "bittern bird", "crane bird", "limpkin", "common gallinule", "American coot", "bustard", "ruddy turnstone", "dunlin", "common redshank", "dowitcher", "oystercatcher", "pelican", "king penguin", "albatross", "grey whale", "killer whale", "dugong", "sea lion", "Chihuahua", "Japanese Chin", "Maltese", "Pekingese", "Shih Tzu", "King Charles Spaniel", "Papillon", "toy terrier", "Rhodesian Ridgeback", "Afghan Hound", "Basset Hound", "Beagle", "Bloodhound", "Bluetick Coonhound", "Black and Tan Coonhound", "Treeing Walker Coonhound", "English foxhound", "Redbone Coonhound", "borzoi", "Irish Wolfhound", "Italian Greyhound", "Whippet", "Ibizan Hound", "Norwegian Elkhound", "Otterhound", "Saluki", "Scottish Deerhound", "Weimaraner", "Staffordshire Bull Terrier", "American Staffordshire Terrier", "Bedlington Terrier", "Border Terrier", "Kerry Blue Terrier", "Irish Terrier", "Norfolk Terrier", "Norwich Terrier", "Yorkshire Terrier", "Wire Fox Terrier", "Lakeland Terrier", "Sealyham Terrier", "Airedale Terrier", "Cairn Terrier", "Australian Terrier", "Dandie Dinmont Terrier", - "Boston Terrier", "Miniature Schnauzer", "Giant Schnauzer", "Standard Schnauzer", "Scottish Terrier", "Tibetan Terrier", "Australian Silky Terrier", "Soft-coated Wheaten Terrier", "West Highland White Terrier", "Lhasa Apso", "Flat-Coated Retriever", "Curly-coated Retriever", "Golden Retriever", "Labrador Retriever", "Chesapeake Bay Retriever", "German Shorthaired Pointer", "Vizsla", "English Setter", "Irish Setter", "Gordon Setter", "Brittany dog", "Clumber Spaniel", "English Springer Spaniel", "Welsh Springer Spaniel", "Cocker Spaniel", "Sussex Spaniel", "Irish Water Spaniel", "Kuvasz", "Schipperke", "Groenendael dog", "Malinois", "Briard", "Australian Kelpie", "Komondor", "Old English Sheepdog", "Shetland Sheepdog", "collie", "Border Collie", "Bouvier des Flandres dog", "Rottweiler", "German Shepherd Dog", "Dobermann", "Miniature Pinscher", "Greater Swiss Mountain Dog", "Bernese Mountain Dog", "Appenzeller Sennenhund", "Entlebucher Sennenhund", "Boxer", "Bullmastiff", "Tibetan Mastiff", "French Bulldog", "Great Dane", "St. Bernard", "husky", "Alaskan Malamute", "Siberian Husky", "Dalmatian", "Affenpinscher", "Basenji", "pug", "Leonberger", "Newfoundland dog", "Great Pyrenees dog", "Samoyed", "Pomeranian", "Chow Chow", "Keeshond", "brussels griffon", "Pembroke Welsh Corgi", "Cardigan Welsh Corgi", "Toy Poodle", "Miniature Poodle", "Standard Poodle", "Mexican hairless dog (xoloitzcuintli)", "grey wolf", "Alaskan tundra wolf", "red wolf or maned wolf", "coyote", "dingo", "dhole", "African wild dog", "hyena", "red fox", "kit fox", "Arctic fox", "grey fox", "tabby cat", "tiger cat", "Persian cat", "Siamese cat", "Egyptian Mau", "cougar", "lynx", "leopard", "snow leopard", "jaguar", "lion", "tiger", "cheetah", "brown bear", "American black bear", "polar bear", "sloth bear", "mongoose", "meerkat", "tiger beetle", "ladybug", "ground beetle", "longhorn beetle", "leaf beetle", "dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant", "grasshopper", "cricket insect", "stick insect", "cockroach", "praying mantis", "cicada", "leafhopper", "lacewing", "dragonfly", "damselfly", "red admiral butterfly", "ringlet butterfly", "monarch butterfly", "small white butterfly", "sulphur butterfly", "gossamer-winged butterfly", "starfish", "sea urchin", "sea cucumber", "cottontail rabbit", "hare", "Angora rabbit", "hamster", "porcupine", "fox squirrel", "marmot", "beaver", "guinea pig", "common sorrel horse", "zebra", "pig", "wild boar", "warthog", "hippopotamus", "ox", "water buffalo", "bison", "ram (adult male sheep)", "bighorn sheep", "Alpine ibex", "hartebeest", "impala (antelope)", "gazelle", "arabian camel", "llama", "weasel", "mink", "European polecat", "black-footed ferret", "otter", "skunk", "badger", "armadillo", - "three-toed sloth", "orangutan", "gorilla", "chimpanzee", "gibbon", "siamang", "guenon", "patas monkey", "baboon", "macaque", "langur", "black-and-white colobus", "proboscis monkey", "marmoset", "white-headed capuchin", "howler monkey", "titi monkey", "Geoffroy's spider monkey", "common squirrel monkey", "ring-tailed lemur", "indri", "Asian elephant", "African bush elephant", "red panda", "giant panda", "snoek fish", "eel", "silver salmon", "rock beauty fish", "clownfish", "sturgeon", "gar fish", "lionfish", "pufferfish", "abacus", "abaya", "academic gown", "accordion", "acoustic guitar", "aircraft carrier", "airliner", "airship", "altar", "ambulance", "amphibious vehicle", "analog clock", "apiary", "apron", "trash can", "assault rifle", "backpack", "bakery", "balance beam", "balloon", "ballpoint pen", "Band-Aid", "banjo", "baluster / handrail", "barbell", "barber chair", "barbershop", "barn", "barometer", "barrel", "wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "swimming cap", "bath towel", "bathtub", "station wagon", "lighthouse", "beaker", "military hat (bearskin or shako)", "beer bottle", "beer glass", "bell tower", "baby bib", "tandem bicycle", "bikini", "ring binder", "binoculars", "birdhouse", "boathouse", "bobsleigh", "bolo tie", "poke bonnet", "bookcase", "bookstore", "bottle cap", "hunting bow", "bow tie", "brass memorial plaque", "bra", "breakwater", "breastplate", "broom", "bucket", "buckle", "bulletproof vest", "high-speed train", "butcher shop", "taxicab", "cauldron", "candle", "cannon", "canoe", "can opener", "cardigan", "car mirror", "carousel", "tool kit", "cardboard box / carton", "car wheel", "automated teller machine", "cassette", "cassette player", "castle", "catamaran", "CD player", "cello", "mobile phone", "chain", "chain-link fence", "chain mail", "chainsaw", "storage chest", "chiffonier", "bell or wind chime", "china cabinet", "Christmas stocking", "church", "movie theater", "cleaver", "cliff dwelling", "cloak", "clogs", "cocktail shaker", "coffee mug", "coffeemaker", "spiral or coil", "combination lock", "computer keyboard", "candy store", "container ship", "convertible", "corkscrew", "cornet", "cowboy boot", "cowboy hat", "cradle", "construction crane", "crash helmet", "crate", "infant bed", "Crock Pot", "croquet ball", "crutch", "cuirass", "dam", "desk", "desktop computer", "rotary dial telephone", "diaper", "digital clock", "digital watch", "dining table", "dishcloth", "dishwasher", "disc brake", "dock", "dog sled", "dome", "doormat", "drilling rig", "drum", "drumstick", "dumbbell", "Dutch oven", "electric fan", "electric guitar", "electric locomotive", "entertainment center", "envelope", "espresso machine", "face powder", "feather boa", "filing cabinet", "fireboat", - "fire truck", "fire screen", "flagpole", "flute", "folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster bed", "freight car", "French horn", "frying pan", "fur coat", "garbage truck", "gas mask or respirator", "gas pump", "goblet", "go-kart", "golf ball", "golf cart", "gondola", "gong", "gown", "grand piano", "greenhouse", "radiator grille", "grocery store", "guillotine", "hair clip", "hair spray", "half-track", "hammer", "hamper", "hair dryer", "hand-held computer", "handkerchief", "hard disk drive", "harmonica", "harp", "combine harvester", "hatchet", "holster", "home theater", "honeycomb", "hook", "hoop skirt", "gymnastic horizontal bar", "horse-drawn vehicle", "hourglass", "iPod", "clothes iron", "carved pumpkin", "jeans", "jeep", "T-shirt", "jigsaw puzzle", "rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat", "ladle", "lampshade", "laptop computer", "lawn mower", "lens cap", "letter opener", "library", "lifeboat", "lighter", "limousine", "ocean liner", "lipstick", "slip-on shoe", "lotion", "music speaker", "loupe magnifying glass", "sawmill", "magnetic compass", "messenger bag", "mailbox","maillot", - "one-piece bathing suit", "manhole cover", "maraca", "marimba", "mask", "matchstick", "maypole", "maze", "measuring cup", "medicine cabinet", "megalith", "microphone", "microwave oven", "military uniform", "milk can", "minibus", "miniskirt", "minivan", "missile", "mitten", "mixing bowl", "mobile home", "ford model t", "modem", "monastery", "monitor", "moped", "mortar and pestle", "graduation cap", "mosque", "mosquito net", "vespa", "mountain bike", "tent", "computer mouse", "mousetrap", "moving van", "muzzle", "metal nail", "neck brace", "necklace", "baby pacifier", "notebook computer", "obelisk", "oboe", "ocarina", "odometer", "oil filter", "pipe organ", "oscilloscope", "overskirt", "bullock cart", "oxygen mask", "product packet / packaging", "paddle", "paddle wheel", "padlock", "paintbrush", "pajamas", "palace", "pan flute", "paper towel", "parachute", "parallel bars", "park bench", "parking meter", "railroad car", "patio", "payphone", "pedestal", "pencil case", "pencil sharpener", "perfume", "Petri dish", "photocopier", "plectrum", "Pickelhaube", "picket fence", "pickup truck", "pier", "piggy bank", "pill bottle", "pillow", "ping-pong ball", "pinwheel", "pirate ship", "drink pitcher", "block plane", "planetarium", "plastic bag", "plate rack", "farm plow", "plunger", "Polaroid camera", "pole", "police van", "poncho", "pool table", "soda bottle", "plant pot", "potter's wheel", "power drill", "prayer rug", "printer", "prison", "projectile", "projector", "hockey puck", "punching bag", "purse", "quill", "quilt", "race car", "racket", "radiator", "radio", "radio telescope", "rain barrel", "recreational vehicle", "fishing casting reel", "reflex camera", "refrigerator", "remote control", "restaurant", "revolver", "rifle", "rocking chair", "rotisserie", "eraser", "rugby ball", "ruler measuring stick", "sneaker", "safe", "safety pin", "salt shaker", "sandal", "sarong", "saxophone", "scabbard", "weighing scale", "school bus", "schooner", "scoreboard", "CRT monitor", "screw", "screwdriver", "seat belt", "sewing machine", "shield", "shoe store", "shoji screen / room divider", "shopping basket", "shopping cart", "shovel", "shower cap", "shower curtain", "ski", "balaclava ski mask", "sleeping bag", "slide rule", "sliding door", "slot machine", "snorkel", "snowmobile", "snowplow", "soap dispenser", "soccer ball", "sock", "solar thermal collector", "sombrero", "soup bowl", "keyboard space bar", "space heater", "space shuttle", "spatula", "motorboat", "spider web", "spindle", "sports car", "spotlight", "stage", "steam locomotive", "through arch bridge", "steel drum", "stethoscope", "scarf", "stone wall", "stopwatch", "stove", "strainer", "tram", "stretcher", "couch", "stupa", "submarine", "suit", "sundial", - "sunglass", "sunglasses", "sunscreen", "suspension bridge", "mop", "sweatshirt", "swim trunks / shorts", "swing", "electrical switch", "syringe", "table lamp", "tank", "tape player", "teapot", "teddy bear", "television", "tennis ball", "thatched roof", "front curtain", "thimble", "threshing machine", "throne", "tile roof", "toaster", "tobacco shop", "toilet seat", "torch", "totem pole", "tow truck", "toy store", "tractor", "semi-trailer truck", "tray", "trench coat", "tricycle", "trimaran", "tripod", "triumphal arch", "trolleybus", "trombone", "hot tub", "turnstile", "typewriter keyboard", "umbrella", "unicycle", "upright piano", "vacuum cleaner", "vase", "vaulted or arched ceiling", "velvet fabric", "vending machine", "vestment", "viaduct", "violin", "volleyball", "waffle iron", "wall clock", "wallet", "wardrobe", "military aircraft", "sink", "washing machine", "water bottle", "water jug", "water tower", "whiskey jug", "whistle", "hair wig", "window screen", "window shade", "Windsor tie", "wine bottle", "airplane wing", "wok", "wooden spoon", "wool", "split-rail fence", "shipwreck", "sailboat", "yurt", "website", "comic book", "crossword", "traffic or street sign", "traffic light", "dust jacket", "menu", "plate", "guacamole", "consomme", "hot pot", "trifle", "ice cream", "popsicle", "baguette", "bagel", "pretzel", "cheeseburger", "hot dog", "mashed potatoes", "cabbage", "broccoli", "cauliflower", "zucchini", "spaghetti squash", "acorn squash", "butternut squash", "cucumber", "artichoke", "bell pepper", "cardoon", "mushroom", "Granny Smith apple", "strawberry", "orange", "lemon", "fig", "pineapple", "banana", "jackfruit", "cherimoya (custard apple)", "pomegranate", "hay", "carbonara", "chocolate syrup", "dough", "meatloaf", "pizza", "pot pie", "burrito", "red wine", "espresso", "tea cup", "eggnog", "mountain", "bubble", "cliff", "coral reef", "geyser", "lakeshore", "promontory", "sandbar", "beach", "valley", "volcano", "baseball player", "bridegroom", "scuba diver", "rapeseed", "daisy", "yellow lady's slipper", "corn", "acorn", "rose hip", "horse chestnut seed", "coral fungus", "agaric", "gyromitra", "stinkhorn mushroom", "earth star fungus", "hen of the woods mushroom", "bolete", "corn cob", "toilet paper"] - -label_to_classname = openai_imagenet_classes - -make_category_filesafe = lambda string: string.replace("/", "AKA") -reverse_category_filesafe = lambda string: string.replace("AKA", "/") \ No newline at end of file diff --git a/spaces/sarinam/speaker-anonymization-gan/IMSToucan/Layers/MultiLayeredConv1d.py b/spaces/sarinam/speaker-anonymization-gan/IMSToucan/Layers/MultiLayeredConv1d.py deleted file mode 100644 index f2de4a06a06d891fbaca726959b0f0d34d93d7cc..0000000000000000000000000000000000000000 --- a/spaces/sarinam/speaker-anonymization-gan/IMSToucan/Layers/MultiLayeredConv1d.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2019 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) -# Adapted by Florian Lux 2021 - -""" -Layer modules for FFT block in FastSpeech (Feed-forward Transformer). -""" - -import torch - - -class MultiLayeredConv1d(torch.nn.Module): - """ - Multi-layered conv1d for Transformer block. - - This is a module of multi-layered conv1d designed - to replace positionwise feed-forward network - in Transformer block, which is introduced in - `FastSpeech: Fast, Robust and Controllable Text to Speech`_. - - .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`: - https://arxiv.org/pdf/1905.09263.pdf - """ - - def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate): - """ - Initialize MultiLayeredConv1d module. - - Args: - in_chans (int): Number of input channels. - hidden_chans (int): Number of hidden channels. - kernel_size (int): Kernel size of conv1d. - dropout_rate (float): Dropout rate. - """ - super(MultiLayeredConv1d, self).__init__() - self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2, ) - self.w_2 = torch.nn.Conv1d(hidden_chans, in_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2, ) - self.dropout = torch.nn.Dropout(dropout_rate) - - def forward(self, x): - """ - Calculate forward propagation. - - Args: - x (torch.Tensor): Batch of input tensors (B, T, in_chans). - - Returns: - torch.Tensor: Batch of output tensors (B, T, hidden_chans). - """ - x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1) - return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1) - - -class Conv1dLinear(torch.nn.Module): - """ - Conv1D + Linear for Transformer block. - - A variant of MultiLayeredConv1d, which replaces second conv-layer to linear. - """ - - def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate): - """ - Initialize Conv1dLinear module. - - Args: - in_chans (int): Number of input channels. - hidden_chans (int): Number of hidden channels. - kernel_size (int): Kernel size of conv1d. - dropout_rate (float): Dropout rate. - """ - super(Conv1dLinear, self).__init__() - self.w_1 = torch.nn.Conv1d(in_chans, hidden_chans, kernel_size, stride=1, padding=(kernel_size - 1) // 2, ) - self.w_2 = torch.nn.Linear(hidden_chans, in_chans) - self.dropout = torch.nn.Dropout(dropout_rate) - - def forward(self, x): - """ - Calculate forward propagation. - - Args: - x (torch.Tensor): Batch of input tensors (B, T, in_chans). - - Returns: - torch.Tensor: Batch of output tensors (B, T, hidden_chans). - """ - x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1) - return self.w_2(self.dropout(x)) diff --git a/spaces/sasha/WinoBiasCheck/winobias.py b/spaces/sasha/WinoBiasCheck/winobias.py deleted file mode 100644 index ad40b33f895aed977d6c4c62106c73be7acb9ba4..0000000000000000000000000000000000000000 --- a/spaces/sasha/WinoBiasCheck/winobias.py +++ /dev/null @@ -1,94 +0,0 @@ -from pathlib import Path -import math -from datasets import load_dataset -import pandas as pd -from transformers import pipeline -from evaluate import load - - -def generate_sentences(cloze_phrase, bias_pronoun, anti_bias_pronoun): - biased_phrase = cloze_phrase.replace('[MASK]', bias_pronoun) - antibiased_phrase = cloze_phrase.replace('[MASK]', anti_bias_pronoun) - return (biased_phrase, antibiased_phrase) - -def calculate_perplexity(inputlist, mname): - resultsdict={} - perplexity = load("perplexity", module_type="metric") - ppl = perplexity.compute(input_texts=inputlist, model_id=mname, add_start_token=False) - return(ppl['perplexities']) - -def calculate_biases(cloze_phrase, bias_pronoun, anti_bias_pronoun, biased_ppl, anti_biased_ppl): - p_bias = math.pow(1 / biased_ppl, len(cloze_phrase.split())) - p_anti_bias = math.pow(1 / anti_biased_ppl, len(cloze_phrase.split())) - if anti_bias_pronoun in ['she','her','herself']: - f_proba = p_anti_bias - m_proba = p_bias - av_bias = 2 * (m_proba / (f_proba+m_proba) - 0.5) - else: - m_proba = p_anti_bias - f_proba = p_bias - av_bias = 2 * (f_proba / (f_proba+m_proba) - 0.5) - m_bias = 2 * (m_proba / (f_proba+m_proba) - 0.5) - f_bias = 2 * (f_proba / (f_proba+m_proba) - 0.5) - av_bias = max(0, av_bias) - return(p_bias, p_anti_bias, m_bias, f_bias, av_bias) - -def calculate_mlm_bias(cloze_phrase, bias_p, anti_bias_p, mname): - f_bias = 0.0 - m_bias = 0.0 - if 'roberta' in mname.model.name_or_path: - preds = mname(cloze_phrase.replace('[MASK]', '')) - else: - preds = mname(cloze_phrase) - pred_toks = [i['token_str'].strip() for i in preds] - if anti_bias_p in pred_toks: - logit_anti_bias = [i['score'] for i in preds if i['token_str'].strip() == anti_bias_p][0] - else: - logit_anti_bias = 0.0 - if bias_p in pred_toks: - logit_bias = [i['score'] for i in preds if i['token_str'].strip() == bias_p][0] - else: - logit_bias = 0.0 - if anti_bias_p in ['she','her','herself']: - f_proba = 1 / (1 + math.exp(-logit_anti_bias)) - m_proba = 1 / (1 + math.exp(-logit_bias)) - av_bias = 2 * (m_proba / (f_proba+m_proba) - 0.5) - else: - m_proba = 1 / (1 + math.exp(-logit_anti_bias)) - f_proba = 1 / (1 + math.exp(-logit_bias)) - av_bias = 2 * (f_proba / (f_proba+m_proba) - 0.5) - m_bias = 2 * (m_proba / (f_proba+m_proba) - 0.5) - f_bias = 2 * (f_proba / (f_proba+m_proba) - 0.5) - av_bias = max(0, av_bias) - return(m_bias, f_bias, av_bias) - -def calculate_clm_bias(winodset, mname): - winodset[['biased_phrase','anti_biased_phrase']] = winodset.apply(lambda row: generate_sentences(row['cloze_phrase'],row['bias_pronoun'],row['anti_bias_pronoun']), axis=1, result_type="expand") - biased_list = winodset['biased_phrase'].tolist() - unbiased_list = winodset['anti_biased_phrase'].tolist() - winodset['biased_ppl'] = calculate_perplexity(biased_list, mname) - winodset['anti_biased_ppl'] = calculate_perplexity(unbiased_list, mname) - winodset[['p_bias','p_anti_bias', 'm_bias','f_bias', 'av_bias']] = winodset.apply(lambda row: calculate_biases(row['cloze_phrase'],row['bias_pronoun'],row['anti_bias_pronoun'], row['biased_ppl'], row['anti_biased_ppl']), axis=1, result_type="expand") - return(winodset) - -def calculate_wino_bias(modelname, modeltype): - winopath = modelname.replace('/','')+'_winobias.csv' - if Path(winopath).is_file(): - print("loading local data") - results_df = pd.read_csv(winopath) - else: - winobias1 = load_dataset("sasha/wino_bias_cloze1", split="test") - winobias2 = load_dataset("sasha/wino_bias_cloze2", split= "test") - wino1_df = pd.DataFrame(winobias1) - wino2_df = pd.DataFrame(winobias2) - results_df= pd.concat([wino1_df, wino2_df], axis=0) - if modeltype == "MLM": - print("Loading MLM!") - unmasker = pipeline('fill-mask', model=modelname, top_k=10) - results_df[['m_bias','f_bias', 'av_bias']] = results_df.apply(lambda x: calculate_mlm_bias(x.cloze_phrase, x.bias_pronoun, x.anti_bias_pronoun, unmasker), axis=1, result_type="expand") - results_df.to_csv(winopath) - elif modeltype == "CLM": - print("Loading CLM!") - results_df= calculate_clm_bias(results_df,modelname) - results_df.to_csv(winopath) - return(results_df) diff --git a/spaces/scedlatioru/img-to-music/example/Gemcraft 2 Chasing Shadows Cracked 12 !!LINK!!.md b/spaces/scedlatioru/img-to-music/example/Gemcraft 2 Chasing Shadows Cracked 12 !!LINK!!.md deleted file mode 100644 index 04329fc24dd113e81cef8e5a2e860643ebf5aed3..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Gemcraft 2 Chasing Shadows Cracked 12 !!LINK!!.md +++ /dev/null @@ -1,24 +0,0 @@ -
    -

    Gemcraft 2 Chasing Shadows: How to Unlock All Gems and Levels

    -

    Gemcraft 2 Chasing Shadows is a popular tower defense game that challenges you to create and combine powerful gems to fend off waves of enemies. The game has hundreds of levels, each with different gem types, enemies, and objectives. But how can you unlock all the gems and levels in the game?

    -

    Gemcraft 2 Chasing Shadows Cracked 12


    Download File » https://gohhs.com/2uEA21



    -

    One way to do that is to use a cracked version of the game that gives you access to all the features without having to pay or play through the game. However, this is not recommended, as it may harm your computer, violate the game's terms of service, and ruin the fun of the game. Instead, you should try to unlock all the gems and levels legitimately by following these tips:

    -
      -
    • Use your skill points wisely. You can earn skill points by completing levels and gaining experience. You can spend skill points on various skills that affect your gems, spells, towers, traps, and more. Some skills also unlock new gem types that you can use in any regular level. For example, if you have a particular color or gem in your skill screen, it should be available on all regular fields[^3^].
    • -
    • Complete special levels. Some levels have special features that make them more challenging or rewarding. For example, some levels have tome chambers that contain powerful spells or traits that you can unlock by breaking them open. Some levels have wizard towers that grant you new skills or abilities if you destroy them. Some levels have apparitions that drop shadow cores if you kill them. Shadow cores can be used to buy talismans or battle traits.
    • -
    • Follow the mysterious compass. There are seven compass fields in the game that have a mysterious compass that points to a direction. If you set all the compasses correctly, you can unlock a secret level that contains a powerful gem and a hidden story[^2^]. The correct directions for the compasses are: E3 - SW, G7 - NE, J4 - SE, K1 - NW, M1 - E, O2 - W, P6 - S.
    • -
    • Play vision fields. Vision fields are special challenges that show you glimpses of the past or the future. They have fixed gem types and conditions that you have to follow. They are usually harder than regular levels, but they also reward you with more experience and shadow cores. You can unlock vision fields by completing certain levels or finding vision stones[^2^].
    • -
    -

    By following these tips, you can unlock all the gems and levels in Gemcraft 2 Chasing Shadows without using a cracked version of the game. This way, you can enjoy the game more and support the developers who made it.

    - -

    Now that you know how to unlock all the gems and levels in Gemcraft 2 Chasing Shadows, you may wonder how to beat them effectively. Here are some tips and tricks that can help you improve your gameplay and score:

    -

    -
      -
    • Combine different gem types. Gems have different effects depending on their color and grade. You can combine gems of different colors to create dual or triple gems that have multiple effects. For example, you can combine a mana leech gem (orange) with a critical hit gem (yellow) to create a gem that drains mana and deals extra damage. You can also combine gems of the same color and grade to create higher grade gems that have stronger effects.
    • -
    • Use different structures. You can place your gems in different structures to enhance their performance. Towers are the basic structure that allow your gems to shoot at enemies. Traps are structures that damage enemies when they step on them. Amplifiers are structures that boost the power of nearby gems. Lanters are structures that increase the range of nearby gems. Pylons are structures that store mana and unleash powerful beams when full.
    • -
    • Cast spells. Spells are abilities that you can use to affect the battlefield. You can cast spells by spending mana or using gem bombs. Some spells are offensive, such as bolt, beam, or barrage, which deal damage to enemies. Some spells are defensive, such as freeze, curse, or enrage, which slow down, weaken, or anger enemies. Some spells are supportive, such as gem enhancement, gem duplication, or summoning, which improve your gems or create more enemies.
    • -
    • Adjust battle traits. Battle traits are modifiers that you can apply to levels to make them harder or easier. You can increase or decrease the level of each trait using shadow cores or skill points. Increasing the level of a trait makes the level harder but also increases the experience and shadow core rewards. Decreasing the level of a trait makes the level easier but also decreases the rewards.
    • -
    -

    By following these tips and tricks, you can beat any level in Gemcraft 2 Chasing Shadows with ease and style. Have fun playing this amazing game!

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/segments-tobias/conex/espnet/bin/vc_train.py b/spaces/segments-tobias/conex/espnet/bin/vc_train.py deleted file mode 100644 index 30ecfb6ac2f7adde70f28bbdd5a43c248e1c2101..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/bin/vc_train.py +++ /dev/null @@ -1,368 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2020 Nagoya University (Wen-Chin Huang) -# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) - -"""Voice conversion model training script.""" - -import logging -import os -import random -import subprocess -import sys - -import configargparse -import numpy as np - -from espnet import __version__ -from espnet.nets.tts_interface import TTSInterface -from espnet.utils.cli_utils import strtobool -from espnet.utils.training.batchfy import BATCH_COUNT_CHOICES - - -# NOTE: you need this func to generate our sphinx doc -def get_parser(): - """Get parser of training arguments.""" - parser = configargparse.ArgumentParser( - description="Train a new voice conversion (VC) model on one CPU, " - "one or multiple GPUs", - config_file_parser_class=configargparse.YAMLConfigFileParser, - formatter_class=configargparse.ArgumentDefaultsHelpFormatter, - ) - - # general configuration - parser.add("--config", is_config_file=True, help="config file path") - parser.add( - "--config2", - is_config_file=True, - help="second config file path that overwrites the settings in `--config`.", - ) - parser.add( - "--config3", - is_config_file=True, - help="third config file path that overwrites the settings " - "in `--config` and `--config2`.", - ) - - parser.add_argument( - "--ngpu", - default=None, - type=int, - help="Number of GPUs. If not given, use all visible devices", - ) - parser.add_argument( - "--backend", - default="pytorch", - type=str, - choices=["chainer", "pytorch"], - help="Backend library", - ) - parser.add_argument("--outdir", type=str, required=True, help="Output directory") - parser.add_argument("--debugmode", default=1, type=int, help="Debugmode") - parser.add_argument("--seed", default=1, type=int, help="Random seed") - parser.add_argument( - "--resume", - "-r", - default="", - type=str, - nargs="?", - help="Resume the training from snapshot", - ) - parser.add_argument( - "--minibatches", - "-N", - type=int, - default="-1", - help="Process only N minibatches (for debug)", - ) - parser.add_argument("--verbose", "-V", default=0, type=int, help="Verbose option") - parser.add_argument( - "--tensorboard-dir", - default=None, - type=str, - nargs="?", - help="Tensorboard log directory path", - ) - parser.add_argument( - "--eval-interval-epochs", - default=100, - type=int, - help="Evaluation interval epochs", - ) - parser.add_argument( - "--save-interval-epochs", default=1, type=int, help="Save interval epochs" - ) - parser.add_argument( - "--report-interval-iters", - default=10, - type=int, - help="Report interval iterations", - ) - # task related - parser.add_argument("--srcspk", type=str, help="Source speaker") - parser.add_argument("--trgspk", type=str, help="Target speaker") - parser.add_argument( - "--train-json", type=str, required=True, help="Filename of training json" - ) - parser.add_argument( - "--valid-json", type=str, required=True, help="Filename of validation json" - ) - - # network architecture - parser.add_argument( - "--model-module", - type=str, - default="espnet.nets.pytorch_backend.e2e_tts_tacotron2:Tacotron2", - help="model defined module", - ) - # minibatch related - parser.add_argument( - "--sortagrad", - default=0, - type=int, - nargs="?", - help="How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs", - ) - parser.add_argument( - "--batch-sort-key", - default="shuffle", - type=str, - choices=["shuffle", "output", "input"], - nargs="?", - help='Batch sorting key. "shuffle" only work with --batch-count "seq".', - ) - parser.add_argument( - "--batch-count", - default="auto", - choices=BATCH_COUNT_CHOICES, - help="How to count batch_size. " - "The default (auto) will find how to count by args.", - ) - parser.add_argument( - "--batch-size", - "--batch-seqs", - "-b", - default=0, - type=int, - help="Maximum seqs in a minibatch (0 to disable)", - ) - parser.add_argument( - "--batch-bins", - default=0, - type=int, - help="Maximum bins in a minibatch (0 to disable)", - ) - parser.add_argument( - "--batch-frames-in", - default=0, - type=int, - help="Maximum input frames in a minibatch (0 to disable)", - ) - parser.add_argument( - "--batch-frames-out", - default=0, - type=int, - help="Maximum output frames in a minibatch (0 to disable)", - ) - parser.add_argument( - "--batch-frames-inout", - default=0, - type=int, - help="Maximum input+output frames in a minibatch (0 to disable)", - ) - parser.add_argument( - "--maxlen-in", - "--batch-seq-maxlen-in", - default=100, - type=int, - metavar="ML", - help="When --batch-count=seq, " - "batch size is reduced if the input sequence length > ML.", - ) - parser.add_argument( - "--maxlen-out", - "--batch-seq-maxlen-out", - default=200, - type=int, - metavar="ML", - help="When --batch-count=seq, " - "batch size is reduced if the output sequence length > ML", - ) - parser.add_argument( - "--num-iter-processes", - default=0, - type=int, - help="Number of processes of iterator", - ) - parser.add_argument( - "--preprocess-conf", - type=str, - default=None, - help="The configuration file for the pre-processing", - ) - parser.add_argument( - "--use-speaker-embedding", - default=False, - type=strtobool, - help="Whether to use speaker embedding", - ) - parser.add_argument( - "--use-second-target", - default=False, - type=strtobool, - help="Whether to use second target", - ) - # optimization related - parser.add_argument( - "--opt", - default="adam", - type=str, - choices=["adam", "noam", "lamb"], - help="Optimizer", - ) - parser.add_argument( - "--accum-grad", default=1, type=int, help="Number of gradient accumuration" - ) - parser.add_argument( - "--lr", default=1e-3, type=float, help="Learning rate for optimizer" - ) - parser.add_argument("--eps", default=1e-6, type=float, help="Epsilon for optimizer") - parser.add_argument( - "--weight-decay", - default=1e-6, - type=float, - help="Weight decay coefficient for optimizer", - ) - parser.add_argument( - "--epochs", "-e", default=30, type=int, help="Number of maximum epochs" - ) - parser.add_argument( - "--early-stop-criterion", - default="validation/main/loss", - type=str, - nargs="?", - help="Value to monitor to trigger an early stopping of the training", - ) - parser.add_argument( - "--patience", - default=3, - type=int, - nargs="?", - help="Number of epochs to wait without improvement " - "before stopping the training", - ) - parser.add_argument( - "--grad-clip", default=1, type=float, help="Gradient norm threshold to clip" - ) - parser.add_argument( - "--num-save-attention", - default=5, - type=int, - help="Number of samples of attention to be saved", - ) - parser.add_argument( - "--keep-all-data-on-mem", - default=False, - type=strtobool, - help="Whether to keep all data on memory", - ) - - parser.add_argument( - "--enc-init", - default=None, - type=str, - help="Pre-trained model path to initialize encoder.", - ) - parser.add_argument( - "--enc-init-mods", - default="enc.", - type=lambda s: [str(mod) for mod in s.split(",") if s != ""], - help="List of encoder modules to initialize, separated by a comma.", - ) - parser.add_argument( - "--dec-init", - default=None, - type=str, - help="Pre-trained model path to initialize decoder.", - ) - parser.add_argument( - "--dec-init-mods", - default="dec.", - type=lambda s: [str(mod) for mod in s.split(",") if s != ""], - help="List of decoder modules to initialize, separated by a comma.", - ) - parser.add_argument( - "--freeze-mods", - default=None, - type=lambda s: [str(mod) for mod in s.split(",") if s != ""], - help="List of modules to freeze (not to train), separated by a comma.", - ) - - return parser - - -def main(cmd_args): - """Run training.""" - parser = get_parser() - args, _ = parser.parse_known_args(cmd_args) - - from espnet.utils.dynamic_import import dynamic_import - - model_class = dynamic_import(args.model_module) - assert issubclass(model_class, TTSInterface) - model_class.add_arguments(parser) - args = parser.parse_args(cmd_args) - - # add version info in args - args.version = __version__ - - # logging info - if args.verbose > 0: - logging.basicConfig( - level=logging.INFO, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", - ) - else: - logging.basicConfig( - level=logging.WARN, - format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", - ) - logging.warning("Skip DEBUG/INFO messages") - - # If --ngpu is not given, - # 1. if CUDA_VISIBLE_DEVICES is set, all visible devices - # 2. if nvidia-smi exists, use all devices - # 3. else ngpu=0 - if args.ngpu is None: - cvd = os.environ.get("CUDA_VISIBLE_DEVICES") - if cvd is not None: - ngpu = len(cvd.split(",")) - else: - logging.warning("CUDA_VISIBLE_DEVICES is not set.") - try: - p = subprocess.run( - ["nvidia-smi", "-L"], stdout=subprocess.PIPE, stderr=subprocess.PIPE - ) - except (subprocess.CalledProcessError, FileNotFoundError): - ngpu = 0 - else: - ngpu = len(p.stderr.decode().split("\n")) - 1 - else: - ngpu = args.ngpu - logging.info(f"ngpu: {ngpu}") - - # set random seed - logging.info("random seed = %d" % args.seed) - random.seed(args.seed) - np.random.seed(args.seed) - - if args.backend == "pytorch": - from espnet.vc.pytorch_backend.vc import train - - train(args) - else: - raise NotImplementedError("Only pytorch is supported.") - - -if __name__ == "__main__": - main(sys.argv[1:]) diff --git a/spaces/segments-tobias/conex/espnet2/tts/abs_tts.py b/spaces/segments-tobias/conex/espnet2/tts/abs_tts.py deleted file mode 100644 index d226b678069327fc5b1581e073140a08ad5e1c04..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet2/tts/abs_tts.py +++ /dev/null @@ -1,30 +0,0 @@ -from abc import ABC -from abc import abstractmethod -from typing import Dict -from typing import Tuple - -import torch - - -class AbsTTS(torch.nn.Module, ABC): - @abstractmethod - def forward( - self, - text: torch.Tensor, - text_lengths: torch.Tensor, - speech: torch.Tensor, - speech_lengths: torch.Tensor, - spembs: torch.Tensor = None, - spcs: torch.Tensor = None, - spcs_lengths: torch.Tensor = None, - ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], torch.Tensor]: - raise NotImplementedError - - @abstractmethod - def inference( - self, - text: torch.Tensor, - spembs: torch.Tensor = None, - **kwargs, - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - raise NotImplementedError diff --git a/spaces/segments/panoptic-segment-anything-api/GroundingDINO/groundingdino/models/GroundingDINO/utils.py b/spaces/segments/panoptic-segment-anything-api/GroundingDINO/groundingdino/models/GroundingDINO/utils.py deleted file mode 100644 index 5bd18f70225e12b2e27fdb4eabcde91d959f8e31..0000000000000000000000000000000000000000 --- a/spaces/segments/panoptic-segment-anything-api/GroundingDINO/groundingdino/models/GroundingDINO/utils.py +++ /dev/null @@ -1,268 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ - -import copy -import math - -import torch -import torch.nn.functional as F -from torch import Tensor, nn - - -def _get_clones(module, N, layer_share=False): - # import ipdb; ipdb.set_trace() - if layer_share: - return nn.ModuleList([module for i in range(N)]) - else: - return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) - - -def get_sine_pos_embed( - pos_tensor: torch.Tensor, - num_pos_feats: int = 128, - temperature: int = 10000, - exchange_xy: bool = True, -): - """generate sine position embedding from a position tensor - Args: - pos_tensor (torch.Tensor): shape: [..., n]. - num_pos_feats (int): projected shape for each float in the tensor. - temperature (int): temperature in the sine/cosine function. - exchange_xy (bool, optional): exchange pos x and pos y. \ - For example, input tensor is [x,y], the results will be [pos(y), pos(x)]. Defaults to True. - Returns: - pos_embed (torch.Tensor): shape: [..., n*num_pos_feats]. - """ - scale = 2 * math.pi - dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device) - dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats) - - def sine_func(x: torch.Tensor): - sin_x = x * scale / dim_t - sin_x = torch.stack((sin_x[..., 0::2].sin(), sin_x[..., 1::2].cos()), dim=3).flatten(2) - return sin_x - - pos_res = [sine_func(x) for x in pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1)] - if exchange_xy: - pos_res[0], pos_res[1] = pos_res[1], pos_res[0] - pos_res = torch.cat(pos_res, dim=-1) - return pos_res - - -def gen_encoder_output_proposals( - memory: Tensor, memory_padding_mask: Tensor, spatial_shapes: Tensor, learnedwh=None -): - """ - Input: - - memory: bs, \sum{hw}, d_model - - memory_padding_mask: bs, \sum{hw} - - spatial_shapes: nlevel, 2 - - learnedwh: 2 - Output: - - output_memory: bs, \sum{hw}, d_model - - output_proposals: bs, \sum{hw}, 4 - """ - N_, S_, C_ = memory.shape - proposals = [] - _cur = 0 - for lvl, (H_, W_) in enumerate(spatial_shapes): - mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H_ * W_)].view(N_, H_, W_, 1) - valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1) - valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1) - - # import ipdb; ipdb.set_trace() - - grid_y, grid_x = torch.meshgrid( - torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device), - torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device), - ) - grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) # H_, W_, 2 - - scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2) - grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale - - if learnedwh is not None: - # import ipdb; ipdb.set_trace() - wh = torch.ones_like(grid) * learnedwh.sigmoid() * (2.0**lvl) - else: - wh = torch.ones_like(grid) * 0.05 * (2.0**lvl) - - # scale = torch.cat([W_[None].unsqueeze(-1), H_[None].unsqueeze(-1)], 1).view(1, 1, 1, 2).repeat(N_, 1, 1, 1) - # grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale - # wh = torch.ones_like(grid) / scale - proposal = torch.cat((grid, wh), -1).view(N_, -1, 4) - proposals.append(proposal) - _cur += H_ * W_ - # import ipdb; ipdb.set_trace() - output_proposals = torch.cat(proposals, 1) - output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all( - -1, keepdim=True - ) - output_proposals = torch.log(output_proposals / (1 - output_proposals)) # unsigmoid - output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float("inf")) - output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf")) - - output_memory = memory - output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0)) - output_memory = output_memory.masked_fill(~output_proposals_valid, float(0)) - - # output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf')) - # output_memory = output_memory.masked_fill(~output_proposals_valid, float('inf')) - - return output_memory, output_proposals - - -class RandomBoxPerturber: - def __init__( - self, x_noise_scale=0.2, y_noise_scale=0.2, w_noise_scale=0.2, h_noise_scale=0.2 - ) -> None: - self.noise_scale = torch.Tensor( - [x_noise_scale, y_noise_scale, w_noise_scale, h_noise_scale] - ) - - def __call__(self, refanchors: Tensor) -> Tensor: - nq, bs, query_dim = refanchors.shape - device = refanchors.device - - noise_raw = torch.rand_like(refanchors) - noise_scale = self.noise_scale.to(device)[:query_dim] - - new_refanchors = refanchors * (1 + (noise_raw - 0.5) * noise_scale) - return new_refanchors.clamp_(0, 1) - - -def sigmoid_focal_loss( - inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2, no_reduction=False -): - """ - Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - alpha: (optional) Weighting factor in range (0,1) to balance - positive vs negative examples. Default = -1 (no weighting). - gamma: Exponent of the modulating factor (1 - p_t) to - balance easy vs hard examples. - Returns: - Loss tensor - """ - prob = inputs.sigmoid() - ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") - p_t = prob * targets + (1 - prob) * (1 - targets) - loss = ce_loss * ((1 - p_t) ** gamma) - - if alpha >= 0: - alpha_t = alpha * targets + (1 - alpha) * (1 - targets) - loss = alpha_t * loss - - if no_reduction: - return loss - - return loss.mean(1).sum() / num_boxes - - -class MLP(nn.Module): - """Very simple multi-layer perceptron (also called FFN)""" - - def __init__(self, input_dim, hidden_dim, output_dim, num_layers): - super().__init__() - self.num_layers = num_layers - h = [hidden_dim] * (num_layers - 1) - self.layers = nn.ModuleList( - nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) - ) - - def forward(self, x): - for i, layer in enumerate(self.layers): - x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) - return x - - -def _get_activation_fn(activation, d_model=256, batch_dim=0): - """Return an activation function given a string""" - if activation == "relu": - return F.relu - if activation == "gelu": - return F.gelu - if activation == "glu": - return F.glu - if activation == "prelu": - return nn.PReLU() - if activation == "selu": - return F.selu - - raise RuntimeError(f"activation should be relu/gelu, not {activation}.") - - -def gen_sineembed_for_position(pos_tensor): - # n_query, bs, _ = pos_tensor.size() - # sineembed_tensor = torch.zeros(n_query, bs, 256) - scale = 2 * math.pi - dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device) - dim_t = 10000 ** (2 * (torch.div(dim_t, 2, rounding_mode='floor')) / 128) - x_embed = pos_tensor[:, :, 0] * scale - y_embed = pos_tensor[:, :, 1] * scale - pos_x = x_embed[:, :, None] / dim_t - pos_y = y_embed[:, :, None] / dim_t - pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2) - pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2) - if pos_tensor.size(-1) == 2: - pos = torch.cat((pos_y, pos_x), dim=2) - elif pos_tensor.size(-1) == 4: - w_embed = pos_tensor[:, :, 2] * scale - pos_w = w_embed[:, :, None] / dim_t - pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2) - - h_embed = pos_tensor[:, :, 3] * scale - pos_h = h_embed[:, :, None] / dim_t - pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2) - - pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2) - else: - raise ValueError("Unknown pos_tensor shape(-1):{}".format(pos_tensor.size(-1))) - return pos - - -class ContrastiveEmbed(nn.Module): - def __init__(self, max_text_len=256): - """ - Args: - max_text_len: max length of text. - """ - super().__init__() - self.max_text_len = max_text_len - - def forward(self, x, text_dict): - """_summary_ - - Args: - x (_type_): _description_ - text_dict (_type_): _description_ - { - 'encoded_text': encoded_text, # bs, 195, d_model - 'text_token_mask': text_token_mask, # bs, 195 - # True for used tokens. False for padding tokens - } - Returns: - _type_: _description_ - """ - assert isinstance(text_dict, dict) - - y = text_dict["encoded_text"] - text_token_mask = text_dict["text_token_mask"] - - res = x @ y.transpose(-1, -2) - res.masked_fill_(~text_token_mask[:, None, :], float("-inf")) - - # padding to max_text_len - new_res = torch.full((*res.shape[:-1], self.max_text_len), float("-inf"), device=res.device) - new_res[..., : res.shape[-1]] = res - - return new_res diff --git a/spaces/segments/panoptic-segment-anything-api/GroundingDINO/groundingdino/util/visualizer.py b/spaces/segments/panoptic-segment-anything-api/GroundingDINO/groundingdino/util/visualizer.py deleted file mode 100644 index 7a1b7b101e9b73f75f9136bc67f2063c7c1cf1c1..0000000000000000000000000000000000000000 --- a/spaces/segments/panoptic-segment-anything-api/GroundingDINO/groundingdino/util/visualizer.py +++ /dev/null @@ -1,318 +0,0 @@ -# -*- coding: utf-8 -*- -""" -@File : visualizer.py -@Time : 2022/04/05 11:39:33 -@Author : Shilong Liu -@Contact : slongliu86@gmail.com -""" - -import datetime -import os - -import cv2 -import matplotlib.pyplot as plt -import numpy as np -import torch -from matplotlib import transforms -from matplotlib.collections import PatchCollection -from matplotlib.patches import Polygon -from pycocotools import mask as maskUtils - - -def renorm( - img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] -) -> torch.FloatTensor: - # img: tensor(3,H,W) or tensor(B,3,H,W) - # return: same as img - assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim() - if img.dim() == 3: - assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % ( - img.size(0), - str(img.size()), - ) - img_perm = img.permute(1, 2, 0) - mean = torch.Tensor(mean) - std = torch.Tensor(std) - img_res = img_perm * std + mean - return img_res.permute(2, 0, 1) - else: # img.dim() == 4 - assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % ( - img.size(1), - str(img.size()), - ) - img_perm = img.permute(0, 2, 3, 1) - mean = torch.Tensor(mean) - std = torch.Tensor(std) - img_res = img_perm * std + mean - return img_res.permute(0, 3, 1, 2) - - -class ColorMap: - def __init__(self, basergb=[255, 255, 0]): - self.basergb = np.array(basergb) - - def __call__(self, attnmap): - # attnmap: h, w. np.uint8. - # return: h, w, 4. np.uint8. - assert attnmap.dtype == np.uint8 - h, w = attnmap.shape - res = self.basergb.copy() - res = res[None][None].repeat(h, 0).repeat(w, 1) # h, w, 3 - attn1 = attnmap.copy()[..., None] # h, w, 1 - res = np.concatenate((res, attn1), axis=-1).astype(np.uint8) - return res - - -def rainbow_text(x, y, ls, lc, **kw): - """ - Take a list of strings ``ls`` and colors ``lc`` and place them next to each - other, with text ls[i] being shown in color lc[i]. - - This example shows how to do both vertical and horizontal text, and will - pass all keyword arguments to plt.text, so you can set the font size, - family, etc. - """ - t = plt.gca().transData - fig = plt.gcf() - plt.show() - - # horizontal version - for s, c in zip(ls, lc): - text = plt.text(x, y, " " + s + " ", color=c, transform=t, **kw) - text.draw(fig.canvas.get_renderer()) - ex = text.get_window_extent() - t = transforms.offset_copy(text._transform, x=ex.width, units="dots") - - # #vertical version - # for s,c in zip(ls,lc): - # text = plt.text(x,y," "+s+" ",color=c, transform=t, - # rotation=90,va='bottom',ha='center',**kw) - # text.draw(fig.canvas.get_renderer()) - # ex = text.get_window_extent() - # t = transforms.offset_copy(text._transform, y=ex.height, units='dots') - - -class COCOVisualizer: - def __init__(self, coco=None, tokenlizer=None) -> None: - self.coco = coco - - def visualize(self, img, tgt, caption=None, dpi=180, savedir="vis"): - """ - img: tensor(3, H, W) - tgt: make sure they are all on cpu. - must have items: 'image_id', 'boxes', 'size' - """ - plt.figure(dpi=dpi) - plt.rcParams["font.size"] = "5" - ax = plt.gca() - img = renorm(img).permute(1, 2, 0) - # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': - # import ipdb; ipdb.set_trace() - ax.imshow(img) - - self.addtgt(tgt) - - if tgt is None: - image_id = 0 - elif "image_id" not in tgt: - image_id = 0 - else: - image_id = tgt["image_id"] - - if caption is None: - savename = "{}/{}-{}.png".format( - savedir, int(image_id), str(datetime.datetime.now()).replace(" ", "-") - ) - else: - savename = "{}/{}-{}-{}.png".format( - savedir, caption, int(image_id), str(datetime.datetime.now()).replace(" ", "-") - ) - print("savename: {}".format(savename)) - os.makedirs(os.path.dirname(savename), exist_ok=True) - plt.savefig(savename) - plt.close() - - def addtgt(self, tgt): - """ """ - if tgt is None or not "boxes" in tgt: - ax = plt.gca() - - if "caption" in tgt: - ax.set_title(tgt["caption"], wrap=True) - - ax.set_axis_off() - return - - ax = plt.gca() - H, W = tgt["size"] - numbox = tgt["boxes"].shape[0] - - color = [] - polygons = [] - boxes = [] - for box in tgt["boxes"].cpu(): - unnormbbox = box * torch.Tensor([W, H, W, H]) - unnormbbox[:2] -= unnormbbox[2:] / 2 - [bbox_x, bbox_y, bbox_w, bbox_h] = unnormbbox.tolist() - boxes.append([bbox_x, bbox_y, bbox_w, bbox_h]) - poly = [ - [bbox_x, bbox_y], - [bbox_x, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y], - ] - np_poly = np.array(poly).reshape((4, 2)) - polygons.append(Polygon(np_poly)) - c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] - color.append(c) - - p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.1) - ax.add_collection(p) - p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2) - ax.add_collection(p) - - if "strings_positive" in tgt and len(tgt["strings_positive"]) > 0: - assert ( - len(tgt["strings_positive"]) == numbox - ), f"{len(tgt['strings_positive'])} = {numbox}, " - for idx, strlist in enumerate(tgt["strings_positive"]): - cate_id = int(tgt["labels"][idx]) - _string = str(cate_id) + ":" + " ".join(strlist) - bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx] - # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1}) - ax.text( - bbox_x, - bbox_y, - _string, - color="black", - bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1}, - ) - - if "box_label" in tgt: - assert len(tgt["box_label"]) == numbox, f"{len(tgt['box_label'])} = {numbox}, " - for idx, bl in enumerate(tgt["box_label"]): - _string = str(bl) - bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx] - # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1}) - ax.text( - bbox_x, - bbox_y, - _string, - color="black", - bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1}, - ) - - if "caption" in tgt: - ax.set_title(tgt["caption"], wrap=True) - # plt.figure() - # rainbow_text(0.0,0.0,"all unicorns poop rainbows ! ! !".split(), - # ['red', 'orange', 'brown', 'green', 'blue', 'purple', 'black']) - - if "attn" in tgt: - # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': - # import ipdb; ipdb.set_trace() - if isinstance(tgt["attn"], tuple): - tgt["attn"] = [tgt["attn"]] - for item in tgt["attn"]: - attn_map, basergb = item - attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min() + 1e-3) - attn_map = (attn_map * 255).astype(np.uint8) - cm = ColorMap(basergb) - heatmap = cm(attn_map) - ax.imshow(heatmap) - ax.set_axis_off() - - def showAnns(self, anns, draw_bbox=False): - """ - Display the specified annotations. - :param anns (array of object): annotations to display - :return: None - """ - if len(anns) == 0: - return 0 - if "segmentation" in anns[0] or "keypoints" in anns[0]: - datasetType = "instances" - elif "caption" in anns[0]: - datasetType = "captions" - else: - raise Exception("datasetType not supported") - if datasetType == "instances": - ax = plt.gca() - ax.set_autoscale_on(False) - polygons = [] - color = [] - for ann in anns: - c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] - if "segmentation" in ann: - if type(ann["segmentation"]) == list: - # polygon - for seg in ann["segmentation"]: - poly = np.array(seg).reshape((int(len(seg) / 2), 2)) - polygons.append(Polygon(poly)) - color.append(c) - else: - # mask - t = self.imgs[ann["image_id"]] - if type(ann["segmentation"]["counts"]) == list: - rle = maskUtils.frPyObjects( - [ann["segmentation"]], t["height"], t["width"] - ) - else: - rle = [ann["segmentation"]] - m = maskUtils.decode(rle) - img = np.ones((m.shape[0], m.shape[1], 3)) - if ann["iscrowd"] == 1: - color_mask = np.array([2.0, 166.0, 101.0]) / 255 - if ann["iscrowd"] == 0: - color_mask = np.random.random((1, 3)).tolist()[0] - for i in range(3): - img[:, :, i] = color_mask[i] - ax.imshow(np.dstack((img, m * 0.5))) - if "keypoints" in ann and type(ann["keypoints"]) == list: - # turn skeleton into zero-based index - sks = np.array(self.loadCats(ann["category_id"])[0]["skeleton"]) - 1 - kp = np.array(ann["keypoints"]) - x = kp[0::3] - y = kp[1::3] - v = kp[2::3] - for sk in sks: - if np.all(v[sk] > 0): - plt.plot(x[sk], y[sk], linewidth=3, color=c) - plt.plot( - x[v > 0], - y[v > 0], - "o", - markersize=8, - markerfacecolor=c, - markeredgecolor="k", - markeredgewidth=2, - ) - plt.plot( - x[v > 1], - y[v > 1], - "o", - markersize=8, - markerfacecolor=c, - markeredgecolor=c, - markeredgewidth=2, - ) - - if draw_bbox: - [bbox_x, bbox_y, bbox_w, bbox_h] = ann["bbox"] - poly = [ - [bbox_x, bbox_y], - [bbox_x, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y], - ] - np_poly = np.array(poly).reshape((4, 2)) - polygons.append(Polygon(np_poly)) - color.append(c) - - # p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) - # ax.add_collection(p) - p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2) - ax.add_collection(p) - elif datasetType == "captions": - for ann in anns: - print(ann["caption"]) diff --git a/spaces/shariqfarooq/ZoeDepth/gradio_pano_to_3d.py b/spaces/shariqfarooq/ZoeDepth/gradio_pano_to_3d.py deleted file mode 100644 index fff147876c3d87a625134c5f0739bbcb039e69a7..0000000000000000000000000000000000000000 --- a/spaces/shariqfarooq/ZoeDepth/gradio_pano_to_3d.py +++ /dev/null @@ -1,96 +0,0 @@ -import gradio as gr -import numpy as np -import trimesh -from geometry import create_triangles -from functools import partial -import tempfile - -def depth_edges_mask(depth): - """Returns a mask of edges in the depth map. - Args: - depth: 2D numpy array of shape (H, W) with dtype float32. - Returns: - mask: 2D numpy array of shape (H, W) with dtype bool. - """ - # Compute the x and y gradients of the depth map. - depth_dx, depth_dy = np.gradient(depth) - # Compute the gradient magnitude. - depth_grad = np.sqrt(depth_dx ** 2 + depth_dy ** 2) - # Compute the edge mask. - mask = depth_grad > 0.05 - return mask - - -def pano_depth_to_world_points(depth): - """ - 360 depth to world points - given 2D depth is an equirectangular projection of a spherical image - Treat depth as radius - - longitude : -pi to pi - latitude : -pi/2 to pi/2 - """ - - # Convert depth to radius - radius = depth.flatten() - - lon = np.linspace(-np.pi, np.pi, depth.shape[1]) - lat = np.linspace(-np.pi/2, np.pi/2, depth.shape[0]) - - lon, lat = np.meshgrid(lon, lat) - lon = lon.flatten() - lat = lat.flatten() - - # Convert to cartesian coordinates - x = radius * np.cos(lat) * np.cos(lon) - y = radius * np.cos(lat) * np.sin(lon) - z = radius * np.sin(lat) - - pts3d = np.stack([x, y, z], axis=1) - - return pts3d - - -def predict_depth(model, image): - depth = model.infer_pil(image) - return depth - -def get_mesh(model, image, keep_edges=False): - image.thumbnail((1024,1024)) # limit the size of the image - depth = predict_depth(model, image) - pts3d = pano_depth_to_world_points(depth) - - # Create a trimesh mesh from the points - # Each pixel is connected to its 4 neighbors - # colors are the RGB values of the image - - verts = pts3d.reshape(-1, 3) - image = np.array(image) - if keep_edges: - triangles = create_triangles(image.shape[0], image.shape[1]) - else: - triangles = create_triangles(image.shape[0], image.shape[1], mask=~depth_edges_mask(depth)) - colors = image.reshape(-1, 3) - mesh = trimesh.Trimesh(vertices=verts, faces=triangles, vertex_colors=colors) - - # Save as glb - glb_file = tempfile.NamedTemporaryFile(suffix='.glb', delete=False) - glb_path = glb_file.name - mesh.export(glb_path) - return glb_path - -def create_demo(model): - gr.Markdown("### Panorama to 3D mesh") - gr.Markdown("Convert a 360 spherical panorama to a 3D mesh") - gr.Markdown("ZoeDepth was not trained on panoramic images. It doesn't know anything about panoramas or spherical projection. Here, we just treat the estimated depth as radius and some projection errors are expected. Nonetheless, ZoeDepth still works surprisingly well on 360 reconstruction.") - - with gr.Row(): - input_image = gr.Image(label="Input Image", type='pil') - result = gr.Model3D(label="3d mesh reconstruction", clear_color=[ - 1.0, 1.0, 1.0, 1.0]) - - checkbox = gr.Checkbox(label="Keep occlusion edges", value=True) - submit = gr.Button("Submit") - submit.click(partial(get_mesh, model), inputs=[input_image, checkbox], outputs=[result]) - examples = gr.Examples(examples=["examples/pano_1.jpeg", "examples/pano_2.jpeg", "examples/pano_3.jpeg"], - inputs=[input_image]) \ No newline at end of file diff --git a/spaces/shikunl/prismer/prismer/experts/obj_detection/unidet/data/datasets/viper.py b/spaces/shikunl/prismer/prismer/experts/obj_detection/unidet/data/datasets/viper.py deleted file mode 100644 index 02df61ddd2daf54d0a81c7ccdeb918e3c0f0a0d4..0000000000000000000000000000000000000000 --- a/spaces/shikunl/prismer/prismer/experts/obj_detection/unidet/data/datasets/viper.py +++ /dev/null @@ -1,38 +0,0 @@ -from detectron2.data.datasets.register_coco import register_coco_instances -import os - -categories = [ - {'id': 13, 'name': 'trafficlight', 'supercategory': ''}, - {'id': 16, 'name': 'firehydrant', 'supercategory': ''}, - {'id': 17, 'name': 'chair', 'supercategory': ''}, - {'id': 19, 'name': 'trashcan', 'supercategory': ''}, - {'id': 20, 'name': 'person', 'supercategory': ''}, - {'id': 23, 'name': 'motorcycle', 'supercategory': ''}, - {'id': 24, 'name': 'car', 'supercategory': ''}, - {'id': 25, 'name': 'van', 'supercategory': ''}, - {'id': 26, 'name': 'bus', 'supercategory': ''}, - {'id': 27, 'name': 'truck', 'supercategory': ''}, -] - - -def _get_builtin_metadata(): - thing_dataset_id_to_contiguous_id = { - x['id']: i for i, x in enumerate(sorted(categories, key=lambda x: x['id']))} - thing_classes = [x['name'] for x in sorted(categories, key=lambda x: x['id'])] - return { - "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id, - "thing_classes": thing_classes} - -_PREDEFINED_SPLITS_VIPER = { - "viper_train": ("viper/train/img", "viper/train/viper_instances_train.json"), - "viper_val": ("viper/val/img", "viper/val/viper_instances_val.json"), - "viper_test": ("viper/test/img", "viper/test/viper_instances_test_image_info.json"), -} - -for key, (image_root, json_file) in _PREDEFINED_SPLITS_VIPER.items(): - register_coco_instances( - key, - _get_builtin_metadata(), - os.path.join("datasets", json_file) if "://" not in json_file else json_file, - os.path.join("datasets", image_root), - ) diff --git a/spaces/sil-ai/model-license/SIL-AI-RAIL-M.html b/spaces/sil-ai/model-license/SIL-AI-RAIL-M.html deleted file mode 100644 index aeb03ed62f5dda3aa1817cfcc721a99a474c1a41..0000000000000000000000000000000000000000 --- a/spaces/sil-ai/model-license/SIL-AI-RAIL-M.html +++ /dev/null @@ -1 +0,0 @@ -

    This is a license (the “License”) between you (“You”) and SIL International (“Licensor”). This Responsible AI License (RAIL) aims at SIL’s goal of working openly and permissively while striving for responsible use of the Model.

    Section I: PREAMBLE

    The lack of freely-available, pre-trained Natural Language Processing (“NLP) models in the majority of the world’s languages is concerning, and there is a need to greatly expand the linguistic diversity represented in mainstream NLP research. However, the release of open access models and datasets representing Indigenous People can also backfire into exploitation of or discrimination against these same communities.

    This RAIL-M License, created by SIL International, strives to facilitate the public release of NLP models representing indigenous languages while safeguarding against downstream usage that might harm those in indigenous communities.

    As it relates to access, distribution, and general responsible use, we took inspiration from Open RAIL-M licenses, such as the CreativeML Open RAIL-M license. The notable difference here (which shifts the current license into a RAIL-M category rather than an Open RAIL-M category) is a restriction on downstream commercial use of the Model. This use restriction is meant to prevent exploitative downstream use without any clear benefits flowing back to indigenous communities.

    Further, we introduce use-based restrictions not permitting the use of the Model in scenarios that would harm or enable discrimination against Indigenous People as outlined in Articles 2; 13-16; and 31 of the United Nations Declaration on the Rights of Indigenous People.

    This License governs the use of the Model (and its derivatives) and is informed by the Model card associated with the Model. Publicly released derivative versions of the Model will always have to include - at minimum - the same use-based restrictions as the ones in the original

    license (this license).

    NOW THEREFORE, You and Licensor agree as follows:

    1.  Definitions

    (a) “License” means the terms and conditions for use, reproduction, and Distribution as defined in this document.

    (b) “Data” means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License.

    (c) “Output” means the results of operating a Model as embodied in informational content resulting therefrom.

    (d) “Model” means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material.

    (e) “Derivatives of the Model” means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model.

    (f) “Complementary Material” means the accompanying source code and scripts used to define,

    run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any.

    (g) “Distribution” means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access.

    (h) “Licensor” means the copyright owner or entity authorized by the copyright owner that is

    granting the License, including the persons or entities that may have rights in the Model and/or

    distributing the Model.

    (i) “You” (or “Your”) means an individual or Legal Entity exercising permissions granted by this

    License and/or making use of the Model for whichever purpose and in any field of use, including

    usage of the Model in an end-use application - e.g. chatbot, translator, image generator.

    (j) “Third Parties” means individuals or legal entities that are not under common control with

    Licensor or You.

    (k) “Contribution” means any work of authorship, including the original version of the Model and

    any modifications or additions to that Model or Derivatives of the Model thereof, that is

    intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an

    individual or Legal Entity authorized to submit on behalf of the copyright owner. For the

    purposes of this definition, “submitted” means any form of electronic, verbal, or written

    communication sent to the Licensor or its representatives, including but not limited to

    communication on electronic mailing lists, source code control systems, and issue tracking

    systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and

    improving the Model, but excluding communication that is conspicuously marked or otherwise

    designated in writing by the copyright owner as "Not a Contribution."

    (l) “Contributor” means Licensor and any individual or Legal Entity on behalf of whom a

    Contribution has been received by Licensor and subsequently incorporated within the Model.

    (m) “Indigenous People” means, in accordance with The UN Declaration on the Rights of Indigenous Peoples, any group of people that self-identifies as indigenous in accordance with their customs and traditions. By way of example and without limitation, Indigenous People may include: (i) tribal peoples in independent countries whose social, cultural and economic conditions distinguish them from other sections of the national community and whose status is regulated wholly or partially by their own customs or traditions or by special laws or regulations; (ii) peoples in independent countries who regard themselves and are regarded by others as indigenous on account of their descent from the populations which inhabited the country, or a geographical region to which the country belongs, at the time of conquest or colonization or the establishment of present state boundaries and who irrespective of their legal status, retain some or all of their own social, economic, cultural and political institutions; (iii) peoples otherwise described by i and/or ii who have been forcefully displaced from their country of origin or those who traditionally have maintained and continue to maintain a nomadic or itinerant lifestyle.

    (n) “Commercial Use” means any use of the Model, Derivatives of the Model and Complementary Material primarily intended for or directed toward commercial advantage or monetary compensation.

    (o) “Natural Language” means spoken, signed, or written communication as used by a community of people in their daily life and for their own purposes.

    (p) “Natural Language Processing” (or “NLP”) means machine or computer methods (often a Model) that allow computers to understand and communicate using Natural Language

    Section II: INTELLECTUAL PROPERTY RIGHTS

    Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III.

    2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model.

    3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed.

    Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION

    4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g.

    software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions:

    a. Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material.

    b. You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License;

    c. You must cause any modified files to carry prominent notices stating that You changed the files;

    d. You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License.

    5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5).

    6. The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License.

    Section IV: OTHER PROVISIONS

    7. Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License, update the Model through electronic means, or modify the Output of the Model based on updates. You shall undertake reasonable efforts to use the latest version of the Model.

    8. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors.

    9. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License.

    10. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.

    11. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the

    Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.

    12. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining

    provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein.

    END OF TERMS AND CONDITIONS

    Attachment A

    Use Restrictions

    You agree not to use the Model or Derivatives of the Model:

    (a) For any commercial purposes primarily intended for or directed toward commercial advantage or monetary compensation;

    (b) In any way that violates any applicable national, federal, state, local or international law

    or regulation;

    (c) For the purpose of exploiting, harming or attempting to exploit or harm minors in any

    way;

    (d) To generate or disseminate verifiably false information and/or content with the purpose of

    harming others;

    (e) To generate or disseminate personal identifiable information that can be used to harm an

    individual;

    (f) To generate or disseminate information and/or content (e.g. images, code, posts, articles),

    and place the information and/or content in any context (e.g. bot generating tweets) without expressly and intelligibly disclaiming that the information and/or content is machine generated;

    (g) To defame, disparage or otherwise harass others;

    (h) To impersonate or attempt to impersonate (e.g. deepfakes) others without their consent;

    (i) For fully automated decision making that adversely impacts an individual’s legal rights or

    otherwise creates or modifies a binding, enforceable obligation;

    (j) For any use intended to or which has the effect of discriminating against or harming

    individuals or groups based on online or offline social behavior or known or predicted

    personal or personality characteristics;

    (k) To exploit any of the vulnerabilities of a specific group of persons based on their age,

    social, physical or mental characteristics, in order to materially distort the behavior of a

    person pertaining to that group in a manner that causes or is likely to cause that person or

    another person physical or psychological harm;

    (l) For any use intended to or which has the effect of harming or enabling discrimination against individuals or groups based on legally protected characteristics or categories, including but not limited to discrimination against Indigenous People as outlined in Articles 2; 13-16; and 31 of the United Nations Declaration on the Rights of Indigenous People, 13 September 2007 and as subsequently amended and revised;

    (m) To provide medical advice and medical results interpretation;

    (n) To generate or disseminate information for the purpose to be used for administration of

    justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use).

    \ No newline at end of file diff --git a/spaces/silencewing/server/youyou/.history/math_20230613232455.html b/spaces/silencewing/server/youyou/.history/math_20230613232455.html deleted file mode 100644 index 86563d122bd05560731f3c625bc10f7b7cb710c8..0000000000000000000000000000000000000000 --- a/spaces/silencewing/server/youyou/.history/math_20230613232455.html +++ /dev/null @@ -1,234 +0,0 @@ - - - - - - - - - - Document - - - - -
    - - - - - - - - - - - - - - - - - - - - - - - - -
    题目
    答案
    正误
    得分
    -
    - - - - diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download 32 bit Java for Windows - Offline and Online Installers.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download 32 bit Java for Windows - Offline and Online Installers.md deleted file mode 100644 index 78b825f8f0f8f25d874a6667594f8115cfe67b9d..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download 32 bit Java for Windows - Offline and Online Installers.md +++ /dev/null @@ -1,166 +0,0 @@ - -

    How to Download 32 Bit Java for Windows

    -

    Java is one of the most popular and widely used programming languages in the world. It enables you to run applications and games on your computer, as well as on various devices such as smartphones, tablets, smart TVs, and more. In this article, we will show you how to download 32 bit Java for Windows, as well as how to check if you have Java installed, how to download 64 bit Java, and how to update Java on your computer.

    -

    What is Java and Why Do You Need It?

    -

    Java is a programming language and a platform

    -

    Java is not only a programming language, but also a platform that consists of a set of tools, libraries, and frameworks that allow developers to create applications and games for different purposes. Java is known for its portability, meaning that it can run on different operating systems and devices without requiring any modifications. This is possible because of the Java Virtual Machine (JVM), which is a software component that interprets and executes Java code.

    -

    download 32 bit java


    Download File 🔗 https://ssurll.com/2uNShn



    -

    Java enables you to run applications and games on your computer

    -

    Many applications and games that you use on your computer are written in Java or require Java to run. For example, some web browsers, such as Google Chrome and Mozilla Firefox, use Java to display interactive web content. Some online games, such as Minecraft and Runescape, also use Java to provide a rich gaming experience. Without Java, you would not be able to access these applications and games.

    -

    Java is compatible with different operating systems and browsers

    -

    One of the advantages of Java is that it is compatible with different operating systems, such as Windows, Mac OS X, Linux, Solaris, etc. This means that you can run the same application or game on different computers without any problems. Moreover, Java is also compatible with different browsers, such as Internet Explorer, Edge, Chrome, Firefox, Safari, Opera, etc. This means that you can access web content that requires Java on different browsers without any issues.

    -

    How to Check If You Have Java Installed on Your Computer

    -

    Open the Command Prompt and type java -version

    -

    The easiest way to check if you have Java installed on your computer is to use the Command Prompt. To do this, follow these steps:

    -
      -
    1. Press the Windows key + R to open the Run dialog box.Type cmd and press Enter to open the Command Prompt.
    2. -
    3. Type java -version and press Enter to check the Java version.
    4. -
    -

    If you see a message with the Java version number, such as java version "1.8.0_301", you have Java installed on your computer. If you see an error message, such as 'java' is not recognized as an internal or external command, you need to download and install Java.

    -

    If you see a message with the Java version number, you have Java installed

    -

    If you have Java installed on your computer, you can skip the next section and go to the section on how to download 64 bit Java if you need it. However, you should also check if your Java is up to date and update it if necessary. You can do this by following the steps in the section on how to update Java on your computer.

    -

    If you see an error message, you need to download and install Java

    -

    If you do not have Java installed on your computer, or if your Java is outdated, you need to download and install the latest version of Java. You can do this by following the steps in the next section on how to download 32 bit Java for Windows.

    -

    How to Download 32 Bit Java for Windows

    -

    Visit the official Java website and choose the Windows option

    -

    The first step to download 32 bit Java for Windows is to visit the official Java website at https://www.java.com/en/. On the homepage, you will see a button that says "Java Download". Click on it and you will be taken to a page where you can choose the Windows option. Alternatively, you can directly go to https://www.java.com/en/download/manual.jsp and choose the Windows option from there.

    -

    download 32 bit java for windows 10
    -download 32 bit java offline installer
    -download 32 bit java jdk
    -download 32 bit java runtime environment
    -download 32 bit java for windows 7
    -download 32 bit java for mac
    -download 32 bit java for linux
    -download 32 bit java latest version
    -download 32 bit java update
    -download 32 bit java se
    -download 32 bit java development kit
    -download 32 bit java virtual machine
    -download 32 bit java oracle
    -download 32 bit java web start
    -download 32 bit java eclipse
    -download 32 bit java netbeans
    -download 32 bit java minecraft
    -download 32 bit java plugin for chrome
    -download 32 bit java plugin for firefox
    -download 32 bit java plugin for internet explorer
    -download 32 bit java software free
    -download 32 bit java old version
    -download 32 bit java jre8
    -download 32 bit java jre9
    -download 32 bit java jre10
    -download 32 bit java jre11
    -download 32 bit java jre12
    -download 32 bit java jre13
    -download 32 bit java jre14
    -download 32 bit java jre15
    -download 32 bit java jre16
    -download 32 bit java jre17
    -download 32 bit java jdk8
    -download 32 bit java jdk9
    -download 32 bit java jdk10
    -download 32 bit java jdk11
    -download 32 bit java jdk12
    -download 32 bit java jdk13
    -download 32 bit java jdk14
    -download 32 bit java jdk15
    -download 32 bit java jdk16
    -download 32 bit java jdk17
    -how to download and install 32-bit Java on Windows PC?

    -

    Select the Windows Online or Windows Offline file depending on your preference

    -

    On the Windows download page, you will see two options: Windows Online and Windows Offline. The Windows Online file is smaller and requires an internet connection during the installation process. The Windows Offline file is larger and does not require an internet connection during the installation process. You can choose either option depending on your preference and internet speed. Click on the file name and save it to your computer.

    -

    Save the file and run it to start the installation process

    -

    Once you have saved the file, locate it on your computer and double-click on it to run it. This will start the installation process of 32 bit Java for Windows. You will see a window that asks you to confirm that you want to run the file. Click on Run and proceed to the next step.

    -

    Follow the instructions on the screen and agree to the terms and conditions

    -

    The installation process of 32 bit Java for Windows is simple and straightforward. You just need to follow the instructions on the screen and agree to the terms and conditions. You can also change the installation directory if you want, but it is recommended to leave it as default. Click on Install and wait for the installation to complete.

    -

    Restart your browser to enable Java in your browser

    -

    After the installation is complete, you will see a message that says "You have successfully installed Java". You will also see a button that says "Close". Click on it and restart your browser to enable Java in your browser. You can also verify that Java is working by visiting https://www.java.com/en/download/installed.jsp and clicking on "Verify Java version". If you see a message that says "Congratulations! You have the recommended Java installed", you have successfully downloaded 32 bit Java for Windows.

    -

    How to Download 64 Bit Java for Windows

    -

    If you use 32-bit and 64-bit browsers interchangeably, you need to install both versions of Java

    -

    If you use both 32-bit and 64-bit browsers interchangeably on your computer, such as Internet Explorer 11 (32-bit) and Chrome (64-bit), you need to install both versions of Java: 32 bit and 64 bit. This is because each browser requires a different version of Java to run properly. If you only install one version of Java, some browsers may not be able to access web content that requires Java.

    -

    Visit the official Java website and choose the Windows option

    -

    The first step to download 64 bit Java for Windows is similar to the first step for downloading 32 bit Java for Windows. You need to visit the official Java website at https://www.java.com/en/ or https://www.java.com/en/download/manual.jsp and choose the Windows option from there.

    -

    Select the Windows Offline (64-bit) file and save it

    -

    On the Windows download page, you will see an option for Windows Offline (64-bit). This is the file that you need to download 64 bit Java for Windows. Click on the file name and save it to your computer.

    -

    Run the file and follow the instructions on the screen

    -

    Once you have saved the file, locate it on your computer and double-click on it to run it. This will start the installation process of 64 bit Java for Windows. You will see a window that asks you to confirm that you want to run the file. Click on Run and proceed to the next step.

    -

    Follow the instructions on the screen and agree to the terms and conditions

    -

    The installation process of 64 bit Java for Windows is similar to the installation process of 32 bit Java for Windows. You just need to follow the instructions on the screen and agree to the terms and conditions. You can also change the installation directory if you want, but it is recommended to leave it as default. Click on Install and wait for the installation to complete.

    -

    Restart your browser to enable Java in your browser

    -

    After the installation is complete, you will see a message that says "You have successfully installed Java". You will also see a button that says "Close". Click on it and restart your browser to enable Java in your browser. You can also verify that Java is working by visiting https://www.java.com/en/download/installed.jsp and clicking on "Verify Java version". If you see a message that says "Congratulations! You have the recommended Java installed", you have successfully downloaded 64 bit Java for Windows.

    -

    How to Update Java on Your Computer

    -

    Java updates are important for security and performance reasons

    -

    Java updates are important for security and performance reasons. They fix bugs, improve compatibility, enhance features, and protect your computer from malicious attacks. Therefore, you should always keep your Java up to date and install the latest version as soon as possible.

    -

    You can check for updates manually or automatically

    -

    You can check for updates manually or automatically depending on your preference. To check for updates manually, you need to open the Java Control Panel and click on Update. To check for updates automatically, you need to enable the automatic update feature in the Java Control Panel. You can also download the latest version of Java from the official website.

    -

    To check for updates manually, open the Java Control Panel and click on Update

    -

    To check for updates manually, you need to open the Java Control Panel and click on Update. To do this, follow these steps:

    -
      -
    1. Press the Windows key + R to open the Run dialog box.
    2. -
    3. Type control panel and press Enter to open the Control Panel.
    4. -
    5. Click on Programs and then click on Java.
    6. -
    7. This will open the Java Control Panel. Click on Update.
    8. -
    9. This will open a window that shows if there are any updates available. If there are, click on Update Now.
    10. -
    11. This will start the update process. Follow the instructions on the screen and agree to the terms and conditions.
    12. -
    13. Restart your browser to enable the updated version of Java in your browser.
    14. -
    -

    To check for updates automatically, enable the automatic update feature in the Java Control Panel

    -

    To check for updates automatically, you need to enable the automatic update feature in the Java Control Panel. To do this, follow these steps:

    -
      -
    1. Press the Windows key + R to open the Run dialog box.
    2. -
    3. Type control panel and press Enter to open the Control Panel.
    4. -
    5. Click on Programs and then click on Java.
    6. -
    7. This will open the Java Control Panel. Click on Update.
    8. -
    9. This will open a window that shows the current update settings. Click on Change Settings.
    10. -
    11. This will open a window that allows you to change the update settings. Check the box that says "Check for Updates Automatically".
    12. -
    13. You can also choose how often you want to check for updates and when you want to be notified. Click on OK to save the changes.
    14. -
    -

    Once you have enabled the automatic update feature, Java will check for updates periodically and notify you if there are any available. You can then choose to install them or not.

    -

    You can also download the latest version of Java from the official website

    -

    Another way to update Java on your computer is to download the latest version of Java from the official website. To do this, follow these steps:

    -
      -
    1. Visit the official Java website at https://www.java.com/en/.
    2. -
    3. Click on "Java Download" and choose the Windows option.
    4. -
    5. Select the file that matches your system requirements and save it to your computer.
    6. -
    7. Run the file and follow the instructions on the screen.
    8. -
    9. Restart your browser to enable the updated version of Java in your browser.
    10. -
    -

    Conclusion

    -

    In this article, we have shown you how to download 32 bit Java for Windows, as well as how to check if you have Java installed, how to download 64 bit Java, and how to update Java on your computer. We hope that this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

    -

    FAQs

    -

    What is the difference between 32 bit and 64 bit Java?

    -

    The difference between 32 bit and 64 bit Java is mainly related to the amount of memory that they can use. 32 bit Java can use up to 4 GB of memory, while 64 bit Java can use more than that. This means that 64 bit Java can run faster and more efficiently than 32 bit Java, especially for applications and games that require a lot of memory. However, 64 bit Java also requires a 64 bit operating system and a 64 bit browser to run properly.

    -

    How do I know if I have a 32 bit or a 64 bit operating system?

    -

    To know if you have a 32 bit or a 64 bit operating system, you can follow these steps:

    -
      -
    1. Press the Windows key + R to open the Run dialog box.
    2. -
    3. Type msinfo32 and press Enter to open the System Information window.
    4. -
    5. Look for the System Type field under System Summary.
    6. -
    7. If it says x86-based PC, you have a 32 bit operating system. If it says x64-based PC, you have a 64 bit operating system.
    8. -
    -

    How do I know if I have a 32 bit or a 64 bit browser?

    -

    To know if you have a 32 bit or a 64 bit browser, you can follow these steps:

    -
      -
    1. Open your browser and go to https://www.java.com/en/download/help/browser_java_version.xml.
    2. This will show you the Java version and the browser version that you are using. -
    3. If it says 32-bit, you have a 32 bit browser. If it says 64-bit, you have a 64 bit browser.
    4. -
    -

    How do I uninstall Java from my computer?

    -

    To uninstall Java from your computer, you can follow these steps:

    -
      -
    1. Press the Windows key + R to open the Run dialog box.
    2. -
    3. Type appwiz.cpl and press Enter to open the Programs and Features window.
    4. -
    5. Look for Java in the list of programs and click on it.
    6. -
    7. Click on Uninstall and follow the instructions on the screen.
    8. -
    9. Restart your computer to complete the uninstallation process.
    10. -
    -

    How do I disable Java in my browser?

    -

    To disable Java in your browser, you can follow these steps:

    -
      -
    1. Open your browser and go to the settings or options menu.
    2. -
    3. Look for the advanced settings or privacy and security settings.
    4. -
    5. Look for the content settings or site settings.
    6. -
    7. Look for the JavaScript settings or Java settings.
    8. -
    9. Toggle the switch or check the box to disable Java in your browser.
    10. -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Fast like a Fox for Free and Enjoy the Fastest Platformer Ever.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Fast like a Fox for Free and Enjoy the Fastest Platformer Ever.md deleted file mode 100644 index 7920d416f78b7a69d2f65c72e4453b41d35b86cb..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Fast like a Fox for Free and Enjoy the Fastest Platformer Ever.md +++ /dev/null @@ -1,95 +0,0 @@ -
    -

    Fast like a Fox Download: How to Play One of the Most Fun and Fastest Platformers Ever

    -

    If you are looking for a fun and fast platformer game that will test your skills and reflexes, you should try Fast like a Fox. This game is one of the most innovative and exciting games ever created with unique tap control. In this article, we will tell you what Fast like a Fox is, how to download it, and how to play it.

    -

    What is Fast like a Fox?

    -

    Fast like a Fox is a game developed by WayBefore Ltd. It was released in 2015 and has received over 5 million downloads and 4.4 stars on Google Play Store. It is also available on GameLoop and CNET Download.

    -

    fast like a fox download


    Download File > https://ssurll.com/2uNU0E



    -

    The story and the gameplay

    -

    The game is about a fox who has to return the stolen treasures of the Golden Fox tribe to their rightful owners. The fox is controlled by tapping the back of your device, which uses your device's internal sensors to detect movement. You have to dash through various worlds, such as cloud temple, snow-covered valleys, mountain tops, fiery caves, subways and city highways. You have to avoid obstacles, enemies, and traps along the way.

    -

    The graphics and the music

    -

    The game has a cool geometry and vector-based graphics inspired by retro console games. The colors are vibrant and the animations are smooth. The game also has an upbeat soundtrack that matches the fast-paced action. The music changes with each world and does not reset or cut off between levels or level resets, which creates a seamless experience.

    -

    The features and the challenges

    -

    The game has many features and challenges that make it fun and replayable. You can unlock everything by playing, without any in-app purchases or ads. You can collect all coins, emeralds and diamonds in each level, which are used to unlock new worlds and characters. You can also beat the time records to get the best score and rank on the leaderboard. The game has a challenging Final Run that will test your skills and reflexes. If you manage to beat it, you will unlock a secret character with special power.

    -

    How to download Fast like a Fox?

    -

    There are different ways to download Fast like a Fox, depending on your device and preference. Here are some of them:

    -

    Download from Google Play Store

    -

    If you have an Android device, you can download Fast like a Fox from Google Play Store for free. Just search for "Fast like a Fox" on the store or click on this link: [Fast like a Fox - Apps on Google Play](^1^). Then, tap on "Install" and wait for the game to download and install on your device.

    -

    Download from GameLoop

    -

    If you want to play Fast like a Fox on your PC, you can download it from GameLoop, which is an emulator that allows you to play Android games on your computer. To do this, you need to download GameLoop from this link: [Download Fast like a Fox android on PC - GameLoop](^2^). Then, install GameLoop on your PC and launch it. Search for "Fast like a Fox" on GameLoop or click on this link: [Fast like a Fox - GameLoop](^2^). Then, click on "Download" and wait for the game to download and install on GameLoop.

    -

    Download from CNET Download

    -

    If you have an iOS device

    If you have an iOS device, you can download Fast like a Fox from CNET Download, which is a website that offers free software downloads for various platforms. To do this, you need to visit this link: [Fast like a Fox for iOS - Free download and software reviews - CNET Download]. Then, tap on "Download Now" and wait for the game to download on your device. Then, open the game and enjoy.

    -

    fast like a fox game download
    -download fast like a fox apk
    -fast like a fox free download for pc
    -how to download fast like a fox on android
    -fast like a fox mod apk download
    -download fast like a fox for windows 10
    -fast like a fox online game no download
    -fast like a fox app download
    -fast like a fox full version download
    -fast like a fox download ios
    -fast like a fox android game download
    -download fast like a fox for mac
    -fast like a fox offline game download
    -fast like a fox hack apk download
    -fast like a fox play store download
    -fast like a fox pc game download
    -fast like a fox apk free download latest version
    -fast like a fox game download for laptop
    -fast like a fox unlimited gems apk download
    -fast like a fox apk pure download
    -fast like a fox game free download for android
    -fast like a fox apk mirror download
    -fast like a fox old version apk download
    -fast like a fox game online play without download
    -fast like a fox apk mod unlimited money download
    -fast like a fox game download apkpure
    -how to play fast like a fox without downloading it
    -fast like a fox apk file download
    -fast like a fox game free download for pc windows 7
    -fast like a fox revdl apk download
    -fast like a fox uptodown apk download
    -how to install fast like a fox on pc
    -fast like a fox rexdl apk download
    -where can i download fast like a fox for free
    -how to update fast like a fox game
    -how to uninstall fast like a fox from android device
    -how to get more gems in fast like a fox game
    -how to unlock all levels in fast like a fox game
    -how to play fast like a fox with keyboard on pc
    -how to fix fast like a fox not working issue

    -

    How to play Fast like a Fox?

    -

    Playing Fast like a Fox is easy and fun, but it also requires skill and practice. Here are some tips on how to play the game:

    -

    Learn the tapping technique

    -

    The most important thing to learn in Fast like a Fox is how to tap the back of your device to control the fox. You need to tap lightly and quickly to make the fox run fast, and tap harder and longer to make the fox jump high. You can also tap the screen to jump, but it is less responsive and accurate. You can adjust the sensitivity of the tapping in the settings menu. You can also choose between two modes: normal mode and hard mode. In normal mode, you can tap anywhere on the back of your device, while in hard mode, you have to tap on a specific area marked by a fox paw.

    -

    Collect all coins, emeralds and diamonds

    -

    In each level, there are coins, emeralds and diamonds that you can collect. Coins are the most common and easy to find, while emeralds and diamonds are more rare and hidden. Collecting them will increase your score and help you unlock new worlds and characters. You can see how many coins, emeralds and diamonds you have collected in each level on the map screen. You can also see how many you have missed and replay the level to find them.

    -

    Beat the time records and unlock a secret character

    -

    Another challenge in Fast like a Fox is to beat the time records in each level. You can see the time record for each level on the map screen, as well as your own best time. If you manage to beat the time record, you will get a gold medal and a higher rank on the leaderboard. You can also unlock a secret character if you beat all the time records in all levels. The secret character is faster and more agile than the fox, but also harder to control.

    -

    Conclusion

    -

    Fast like a Fox is a game that will keep you entertained and challenged for hours. It has a unique tap control that makes it different from other platformers. It has a cool graphics and music that create a fun atmosphere. It has many features and challenges that make it replayable and rewarding. If you want to play one of the most fun and fastest platformers ever, you should download Fast like a Fox today.

    -

    Why you should try Fast like a Fox?

    -

    Here are some reasons why you should try Fast like a Fox:

    -
      -
    • It is free and easy to download.
    • -
    • It is fun and fast-paced.
    • -
    • It is innovative and original.
    • -
    • It is challenging and skill-based.
    • -
    • It is rewarding and satisfying.
    • -
    -

    FAQs

    -

    Here are some frequently asked questions about Fast like a Fox:

    -
      -
    1. How many levels are there in Fast like a Fox?
    2. -

      There are 12 worlds in Fast like a Fox, each with 4 levels. That makes 48 levels in total.

      -
    3. How many characters are there in Fast like a Fox?
    4. -

      There are 6 characters in Fast like a Fox: the fox, the panda, the tiger, the bear, the rabbit, and the secret character. You can unlock them by collecting coins, emeralds and diamonds.

      -
    5. What is the Final Run in Fast like a Fox?
    6. -

      The Final Run is the last challenge in Fast like a Fox. It is a long and hard level that combines all the worlds in one. You have to complete it without dying or restarting. If you do, you will unlock the secret character.

      -
    7. How can I play Fast like a Fox with my friends?
    8. -

      You can play Fast like a Fox with your friends by competing on the leaderboard. You can see your rank and your friends' ranks on the map screen. You can also share your achievements on social media.

      -
    9. How can I contact the developers of Fast like a Fox?
    10. -

      You can contact the developers of Fast like a Fox by visiting their website: [WayBefore Ltd](http://www.waybefore.com/). You can also follow them on Facebook: [WayBefore [WayBefore Ltd](https://www.facebook.com/WayBeforeLtd/). You can also send them an email: [info@waybefore.com](mailto:info@waybefore.com).

      -

      I hope you enjoyed this article and learned something new about Fast like a Fox. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading and happy gaming!

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Eren Yeager Wallpapers The Best Anime Artwork and Illustrations.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Eren Yeager Wallpapers The Best Anime Artwork and Illustrations.md deleted file mode 100644 index c2e208b159ea31ef1ebcedf5dfb57359a473fde8..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Eren Yeager Wallpapers The Best Anime Artwork and Illustrations.md +++ /dev/null @@ -1,92 +0,0 @@ - -

      Eren Yeager Images Download: How to Find and Enjoy the Best Wallpapers of the Attack on Titan Protagonist

      -

      If you are a fan of Attack on Titan, you probably know who Eren Yeager is. He is the main character of the popular anime and manga series, and one of the most iconic and influential figures in the story. He is a young man who vows to exterminate the Titans, the giant humanoid creatures that have brought humanity to the brink of extinction. He also possesses the power of transforming into a Titan himself, and holds three of the nine special Titan abilities: the Attack Titan, the Founding Titan, and the War Hammer Titan.

      -

      eren yeager images download


      Download ————— https://ssurll.com/2uNVB5



      -

      In this article, we will show you how to download and enjoy the best images of Eren Yeager, whether you want to admire his heroic deeds, his fierce expressions, or his handsome looks. We will also give you some tips on how to choose the right resolution and format for your device, and how to set Eren Yeager images as your wallpaper.

      -

      Introduction

      -

      Who is Eren Yeager?

      -

      Eren Yeager is a fictional character created by Hajime Isayama, the author and illustrator of Attack on Titan. He was born and raised in Shiganshina District, a town located on the southern edge of Wall Maria, one of the three concentric walls that protect humanity from the Titans. He is the only son of Grisha and Carla Yeager, and the younger half-brother of Zeke Yeager, who is also known as the Beast Titan.

      -

      When he was 10 years old, he witnessed the destruction of his hometown and the death of his mother by the Colossal Titan and the Armored Titan, two of the nine special Titans that have unique abilities. This traumatic event ignited his hatred for the Titans and his desire to join the Survey Corps, an elite military branch that fights against the Titans outside the walls.

      -

      During his training in the 104th Training Corps, he befriended Mikasa Ackerman, his adoptive sister who is also a skilled fighter, and Armin Arlert, his childhood friend who is a brilliant strategist. He also met other cadets who would later become his comrades in arms, such as Jean Kirstein, Connie Springer, Sasha Braus, and Historia Reiss.

      -

      After graduating as one of the top cadets in his class, he joined the Survey Corps and discovered that he had the ability to transform into a Titan. He later learned that he inherited this power from his father, who was also a Titan shifter and a member of a persecuted ethnic group called Eldians. He also realized that he possessed two other Titan powers: the Founding Titan, which can control all other Titans and alter the memories of Eldians; and the War Hammer Titan, which can create weapons and structures out of hardened flesh.

      -

      eren yeager wallpapers hd free
      -4k ultra hd eren yeager backgrounds
      -eren jeager 1080p 2k 5k anime wallpapers
      -attack on titan eren yeager digital art
      -shingeki no kyojin eren jeager illustration
      -eren yeager artwork anime boys wallpaper
      -attack on titan eren jaeger wallpaper abyss
      -eren jeager multicolored digital wallpaper
      -eren yeager character anime illustration
      -shingeki no kyojin eren yeager fanart
      -eren jaeger wallpapers for desktop pc
      -4k eren yeager wallpapers for phone
      -eren jeager hd wallpapers for laptop
      -attack on titan eren yeager artwork
      -shingeki no kyojin eren jeager anime
      -eren yeager anime boys digital art
      -attack on titan eren jaeger backgrounds
      -eren jeager multicolored anime wallpaper
      -eren yeager character shingeki no kyojin
      -shingeki no kyojin eren yeager illustration
      -eren jaeger wallpapers for tablet
      -4k eren yeager backgrounds for computer
      -eren jeager hd backgrounds for device
      -attack on titan eren yeager fanart
      -shingeki no kyojin eren jeager wallpaper
      -eren yeager anime boys artwork
      -attack on titan eren jaeger digital wallpaper
      -eren jeager multicolored attack on titan
      -eren yeager character illustration wallpaper
      -shingeki no kyojin eren yeager anime art
      -eren jaeger wallpapers free download
      -4k eren yeager backgrounds download
      -eren jeager hd backgrounds download
      -attack on titan eren yeager download
      -shingeki no kyojin eren jeager download
      -eren yeager anime boys download
      -attack on titan eren jaeger free download
      -eren jeager multicolored free download
      -eren yeager character free download
      -shingeki no kyojin eren yeager free download

      -

      Throughout his journey, he faced many enemies and challenges, both human and Titan. He also uncovered many secrets and mysteries about the origin and history of the Titans, as well as his own destiny. He became a key figure in the conflict between Paradis Island, where he lived, and Marley, a nation that oppressed Eldians and used them as weapons of war.

      -

      Why is he popular among anime fans?

      -

      Eren Yeager is one of the most popular and beloved characters in the anime world. He has a strong fan base that admires his courage, determination, and growth. Here are some of the reasons why he is so popular among anime fans: - He is a complex and dynamic character. He is not a typical hero who is always righteous and noble. He has flaws, doubts, and conflicts. He makes mistakes and learns from them. He changes and evolves throughout the story, sometimes in unexpected and controversial ways. He is a realistic and relatable character who faces difficult choices and consequences. - He is a passionate and driven character. He has a clear goal and a strong motivation. He is willing to do whatever it takes to achieve his dream of freedom and justice. He does not give up easily and always fights back against his enemies. He inspires others with his spirit and charisma. - He is a powerful and badass character. He has amazing abilities that make him stand out among his peers. He can transform into a Titan that can rival any other Titan in strength and speed. He can also use the Founding Titan's power to command all other Titans and alter the memories of Eldians. He can also wield the War Hammer Titan's power to create weapons and structures out of hardened flesh. He is a formidable foe and a valuable ally in any battle. - He is a handsome and attractive character. He has a distinctive appearance that makes him easy to recognize. He has spiky brown hair, bright green eyes, and a muscular physique. He also has various outfits and hairstyles that suit his personality and mood. He can look cute, cool, or hot depending on the situation.

      What are some of his best moments in the anime?

      -

      Eren Yeager has many memorable moments in the anime that showcase his character development, his skills, and his emotions. Here are some of his best moments in the anime: - His first transformation into a Titan. This happened in episode 8 of season 1, when he was swallowed by a Titan after saving Armin from being eaten. He then emerged from the Titan's body as a Titan himself, shocking everyone who witnessed it. He then proceeded to fight against the other Titans, saving Mikasa and the other soldiers from being killed. - His fight with Annie Leonhart, the Female Titan. This happened in episodes 24 and 25 of season 1, when he confronted Annie, who was revealed to be the Female Titan who killed many of his comrades in the Survey Corps. He transformed into a Titan and engaged in a brutal battle with her, eventually defeating her by ripping off her limbs and exposing her human form. - His confrontation with Reiner Braun and Bertolt Hoover, the Armored Titan and the Colossal Titan. This happened in episodes 6 and 7 of season 2, when he learned that Reiner and Bertolt, who were his friends and fellow cadets, were actually the Titans who destroyed his hometown and killed his mother. He transformed into a Titan and fought with Reiner, while Bertolt dropped from the sky as the Colossal Titan, creating a huge explosion. - His rescue from Reiner and Bertolt by Levi Ackerman, Mikasa, Armin, Erwin Smith, and Hange Zoe. This happened in episodes 11 and 12 of season 2, when he was captured by Reiner and Bertolt and taken to a forest. He was then rescued by a team of Survey Corps members, led by Levi, who managed to injure Reiner and Bertolt and retrieve Eren. However, they also lost many lives in the process, including Erwin, who sacrificed himself to distract the Beast Titan. - His fight with Zeke Yeager, the Beast Titan. This happened in episodes 14 and 15 of season 3, when he faced his half-brother, who was also the leader of the Marleyan warriors and the mastermind behind the attack on Paradis Island. He transformed into a Titan and clashed with Zeke, who had superior strength and intelligence. He also learned that Zeke had a secret plan to use the Founding Titan's power to sterilize all Eldians and end their suffering. - His declaration of war on Marley. This happened in episode 6 of season 4, when he infiltrated Marley as a spy and attended a festival where Willy Tybur, the leader of the Tybur family and the holder of the War Hammer Titan, was giving a speech. He then revealed himself as Eren Yeager and transformed into a Titan, killing Willy and many other civilians and soldiers. He then declared war on Marley and the rest of the world, stating that he would fight for his freedom and his people's survival.

      How to download Eren Yeager images

      -

      What are the best sources for Eren Yeager wallpapers?

      -

      If you want to download Eren Yeager images for your device, you have many options to choose from. There are many websites that offer high-quality wallpapers of Eren Yeager in various styles and themes. Here are some of the best sources for Eren Yeager wallpapers:

      -

      Anime websites

      -

      Anime websites are the most obvious choice for finding Eren Yeager images. They usually have a large collection of official and fan-made wallpapers of Attack on Titan characters, including Eren Yeager. Some of the most popular anime websites that have Eren Yeager wallpapers are:

      - - [Anime Planet]: This website has over 300 Eren Yeager wallpapers that you can download for free. You can filter them by resolution, aspect ratio, popularity, or date added. You can also browse other Attack on Titan wallpapers or wallpapers of other anime series. - [Zerochan]: This website has over 2000 Eren Yeager wallpapers that you can download for free. You can filter them by tags, such as Eren Yeager (Cosplay), Eren Yeager (Female), or Eren Yeager (Titan). You can also browse other Attack on Titan wallpapers or wallpapers of other anime characters. - [Wallpaper Abyss]: This website has over 400 Eren Yeager wallpapers that you can download for free. You can filter them by resolution, category, rating, or views. You can also browse other Attack on Titan wallpapers or wallpapers of other anime genres.

      Wallpaper websites

      -

      Wallpaper websites are another good option for finding Eren Yeager images. They usually have a wide variety of wallpapers for different devices and preferences. Some of the most popular wallpaper websites that have Eren Yeager wallpapers are:

      - - [Wallpaper Cave]: This website has over 1000 Eren Yeager wallpapers that you can download for free. You can filter them by resolution, type, or color. You can also browse other Attack on Titan wallpapers or wallpapers of other categories. - [Wallpapersden]: This website has over 200 Eren Yeager wallpapers that you can download for free. You can filter them by resolution, device, or orientation. You can also browse other Attack on Titan wallpapers or wallpapers of other topics. - [HD Wallpapers]: This website has over 100 Eren Yeager wallpapers that you can download for free. You can filter them by resolution, quality, or category. You can also browse other Attack on Titan wallpapers or wallpapers of other themes.

      Fan art websites

      -

      Fan art websites are another great option for finding Eren Yeager images. They usually have a lot of creative and original wallpapers of Eren Yeager made by talented artists and fans. Some of the most popular fan art websites that have Eren Yeager wallpapers are:

      - - [DeviantArt]: This website has over 5000 Eren Yeager wallpapers that you can download for free or purchase with points. You can filter them by media type, style, genre, or popularity. You can also browse other Attack on Titan wallpapers or wallpapers of other fandoms. - [Pixiv]: This website has over 10000 Eren Yeager wallpapers that you can download for free or purchase with coins. You can filter them by tags, ratings, views, or bookmarks. You can also browse other Attack on Titan wallpapers or wallpapers of other illustrations. - [Pinterest]: This website has over 1000 Eren Yeager wallpapers that you can download for free or pin to your boards. You can filter them by keywords, categories, or boards. You can also browse other Attack on Titan wallpapers or wallpapers of other interests.

      How to choose the right resolution and format for your device

      -

      Before you download Eren Yeager images, you need to consider the resolution and format of the wallpapers. The resolution is the number of pixels that make up the image, and the format is the type of file that stores the image. The resolution and format affect the quality and size of the image, as well as the compatibility and performance of your device. Here are some tips on how to choose the right resolution and format for your device:

      -

      HD vs 4K vs 5K

      -

      HD stands for high definition, and it refers to a resolution of 1920 x 1080 pixels, which is the standard for most monitors and TVs. 4K stands for ultra high definition, and it refers to a resolution of 3840 x 2160 pixels, which is four times more than HD. 5K stands for super ultra high definition, and it refers to a resolution of 5120 x 2880 pixels, which is five times more than HD.

      -

      The higher the resolution, the sharper and clearer the image. However, the higher the resolution, the larger the file size and the more resources your device needs to display the image. Therefore, you need to balance between quality and performance when choosing the resolution.

      -

      The best way to choose the resolution is to match it with the native resolution of your device's screen. The native resolution is the optimal resolution that your device's screen can support without scaling or stretching the image. You can find out your device's native resolution by checking its specifications or settings.

      -

      For example, if your device's screen has a native resolution of 1920 x 1080 pixels, you should choose HD wallpapers. If your device's screen has a native resolution of 3840 x 2160 pixels, you should choose 4K wallpapers. If your device's screen has a native resolution of 5120 x 2880 pixels, you should choose 5K wallpapers.

      -

      JPG vs PNG vs GIF

      -

      JPG, PNG, and GIF are three common formats for storing images. They have different advantages and disadvantages depending on the type and purpose of the image. Here are some differences between them:

      - - JPG: This format is good for compressing images with many colors and details, such as photos or realistic paintings. It reduces the file size by discarding some information that is not noticeable to the human eye. However, it also lowers the quality of the image and creates artifacts or distortions in some areas. It does not support transparency or animation. - PNG: This format is good for preserving images with sharp edges and contrast, such as logos or icons. It maintains the quality of the image by using lossless compression, which means that no information is lost in the process. However, it also increases the file size and takes more time to load. It supports transparency but not animation. - GIF: This format is good for creating images with simple colors and shapes, such as cartoons or diagrams. It uses a limited color palette of up to 256 colors, which reduces the file size and makes it easy to animate. However, it also lowers the quality of the image and makes it look pixelated or dithered. It supports animation but not transparency.

      The best way to choose the format is to match it with the type and purpose of the image. The type of the image refers to the style and content of the image, such as photo, painting, logo, or cartoon. The purpose of the image refers to the intended use and effect of the image, such as background, decoration, or illustration.

      -

      For example, if you want to download Eren Yeager images that are photos or realistic paintings, you should choose JPG format. If you want to download Eren Yeager images that are logos or icons, you should choose PNG format. If you want to download Eren Yeager images that are cartoons or animations, you should choose GIF format.

      -

      How to set Eren Yeager images as your wallpaper

      -

      After you download Eren Yeager images, you can set them as your wallpaper for your device. The wallpaper is the image that appears on your device's screen when it is idle or locked. The wallpaper can enhance the appearance and mood of your device, as well as express your personality and preferences. Here are some steps on how to set Eren Yeager images as your wallpaper for different devices:

      -

      For Windows PC

      -

      If you have a Windows PC, you can follow these steps to set Eren Yeager images as your wallpaper:

      - - Locate the Eren Yeager image that you downloaded on your PC. You can use the File Explorer or the Search function to find it. - Right-click on the Eren Yeager image and select "Set as desktop background". This will automatically apply the image as your wallpaper. - Alternatively, you can also right-click on an empty space on your desktop and select "Personalize". This will open the Settings app and show you the Personalization options. - Click on "Background" and select "Picture" from the drop-down menu. Then click on "Browse" and locate the Eren Yeager image that you downloaded. Select the image and click on "Choose picture". This will apply the image as your wallpaper. - You can also adjust the fit of the image by selecting "Fill", "Fit", "Stretch", "Tile", or "Center" from the drop-down menu below the image preview.

      For Mac OS

      -

      If you have a Mac OS, you can follow these steps to set Eren Yeager images as your wallpaper:

      - - Locate the Eren Yeager image that you downloaded on your Mac. You can use the Finder or the Spotlight function to find it. - Right-click on the Eren Yeager image and select "Set Desktop Picture". This will automatically apply the image as your wallpaper. - Alternatively, you can also click on the Apple logo on the top left corner of your screen and select "System Preferences". This will open the System Preferences app and show you the various settings for your Mac. - Click on "Desktop & Screen Saver" and select "Desktop" from the tabs at the top. Then click on "+" and locate the Eren Yeager image that you downloaded. Select the image and click on "Choose". This will apply the image as your wallpaper. - You can also adjust the fit of the image by selecting "Fill Screen", "Fit to Screen", "Stretch to Fill Screen", "Center", or "Tile" from the drop-down menu below the image preview.

      For Android

      -

      If you have an Android device, you can follow these steps to set Eren Yeager images as your wallpaper:

      - - Locate the Eren Yeager image that you downloaded on your device. You can use the Files app or the Gallery app to find it. - Tap and hold on the Eren Yeager image and select "Set as wallpaper". This will open the Wallpaper app and show you the preview of the image as your wallpaper. - Alternatively, you can also open the Settings app and tap on "Wallpaper". This will open the Wallpaper app and show you the various options for your wallpaper. - Tap on "My photos" and locate the Eren Yeager image that you downloaded. Select the image and tap on "Set wallpaper". This will apply the image as your wallpaper. - You can also adjust the crop and position of the image by pinching, zooming, or dragging the image on the screen.

      For iOS

      -

      If you have an iOS device, you can follow these steps to set Eren Yeager images as your wallpaper:

      - - Locate the Eren Yeager image that you downloaded on your device. You can use the Files app or the Photos app to find it. - Tap on the Eren Yeager image and select the share icon on the bottom left corner of the screen. This will open the Share menu and show you various options for sharing or using the image. - Tap on "Use as Wallpaper". This will open the Wallpaper settings and show you the preview of the image as your wallpaper. - Alternatively, you can also open the Settings app and tap on "Wallpaper". This will open the Wallpaper settings and show you various options for your wallpaper. - Tap on "Choose a New Wallpaper" and locate the Eren Yeager image that you downloaded. Select the image and tap on "Set". This will apply the image as your wallpaper. - You can also adjust the perspective and position of the image by pinching, zooming, or dragging the image on the screen.

      Conclusion

      -

      Eren Yeager is a fascinating and popular character from Attack on Titan, who has many fans around the world. If you are one of them, you might want to download and enjoy some of the best images of Eren Yeager for your device. In this article, we have shown you how to find and download Eren Yeager images from various sources, how to choose the right resolution and format for your device, and how to set Eren Yeager images as your wallpaper. We hope you have found this article helpful and informative, and that you have enjoyed browsing through some of the amazing wallpapers of Eren Yeager.

      -

      Now that you have learned how to download and enjoy Eren Yeager images, why not share them with your friends and fellow fans? You can also leave a comment below and tell us which Eren Yeager image is your favorite, or if you have any questions or suggestions for us. We would love to hear from you!

      -

      FAQs

      -

      Here are some of the frequently asked questions about Eren Yeager images:

      - - Q: Where can I watch Attack on Titan? - A: You can watch Attack on Titan on various streaming platforms, such as Crunchyroll, Funimation, Netflix, Hulu, or Amazon Prime Video. You can also buy or rent the DVDs or Blu-rays of the anime series. - Q: How many seasons and episodes are there in Attack on Titan? - A: There are four seasons and 75 episodes in Attack on Titan. The first season has 25 episodes, the second season has 12 episodes, the third season has 22 episodes, and the fourth season has 16 episodes. The fourth season is also the final season of the anime series. - Q: How many chapters and volumes are there in Attack on Titan manga? - A: There are 139 chapters and 34 volumes in Attack on Titan manga. The manga series started in 2009 and ended in 2021. The manga series is the original source material for the anime series. - Q: Who is the voice actor of Eren Yeager? - A: The voice actor of Eren Yeager is Yuki Kaji in Japanese and Bryce Papenbrook in English. They are both famous and talented voice actors who have voiced many other anime characters. - Q: What is the meaning of Eren Yeager's name? - A: The meaning of Eren Yeager's name is not officially confirmed, but there are some possible interpretations based on the origin and spelling of his name. Eren is a Turkish name that means "saint" or "holy person". Yeager is a German surname that means "hunter" or "fighter". Therefore, Eren Yeager's name could mean "holy hunter" or "saintly fighter".

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free Download MobCup APK for Android - Change Your Phones Look and Sound with MobCup.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free Download MobCup APK for Android - Change Your Phones Look and Sound with MobCup.md deleted file mode 100644 index 1b6aa9cee98665ce58f0f8c1cc68416cee1a2d43..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free Download MobCup APK for Android - Change Your Phones Look and Sound with MobCup.md +++ /dev/null @@ -1,122 +0,0 @@ -
      -

      MobCup APK Download: How to Personalize Your Android Device with Ringtones and Wallpapers

      -

      Do you want to make your Android device more exciting and unique? Do you want to change your ringtones and wallpapers according to your mood and preferences? If yes, then you should try MobCup APK, a free personalization app that offers a collection of ringtones and wallpapers for your mobile device. In this article, we will tell you what MobCup APK is, what features it has, how to download and install it on your Android device, how to use it to customize your ringtones and wallpapers, and what are the pros and cons of using it. By the end of this article, you will be able to personalize your Android device with ringtones and wallpapers that suit your style and personality.

      -

      Features of MobCup APK

      -

      MobCup APK is a personalization app that lets you access thousands of high-quality ringtones, notification sounds, and alert tones for your Android device. You can also find a selection of wallpapers to change the look of your home screen. Here are some of the features of MobCup APK that make it a great app for personalizing your device:

      -

      mobcup apk download


      Download Filehttps://ssurll.com/2uO0VQ



      -
        -
      • Popular ringtones and notification sounds: You can find some of the most popular songs and sounds today in the popular ringtones section. You can choose from various genres, such as pop, rock, hip hop, Bollywood, etc. You can also find some funny ringtones and sounds that will make you laugh.
      • -
      • Collections of ringtones and wallpapers by category: You can browse through different categories of ringtones and wallpapers, such as animals, nature, games, movies, etc. You can find something that matches your interest and taste.
      • -
      • Search function and favorites section: You can use the search bar to find specific ringtones or wallpapers by keywords. You can also heart the media that you like and save them to your favorites section for easy access.
      • -
      • Simple and easy to use interface: MobCup APK has a clean and simple user interface that is well laid out. You can easily navigate through the app and find what you are looking for. You can also preview and download the media with just a few taps.
      • -
      -

      How to Download and Install MobCup APK on Your Android Device

      -

      If you want to try MobCup APK on your Android device, you need to download and install it manually from an external source. Here are the steps you need to follow:

      -
        -
      1. Step 1: Go to the official website of MobCup or a trusted source that offers the APK file. You can use this link to download the APK file: MobCup APK Download.
      2. -
      3. Step 2: Download the APK file to your device. You may need to allow the download from your browser or file manager.
      4. -
      5. Step 3: Enable unknown sources in your settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.
      6. -
      7. Step 4: Install the APK file and launch the app. You may need to grant some permissions to the app, such as access to your storage, contacts, etc.
      8. -
      -

      How to Use MobCup APK to Customize Your Ringtones and Wallpapers

      -

      Once you have installed MobCup APK on your device, you can start using it to customize your ringtones and wallpapers. Here are the steps you need to follow:

      -
        -
      1. Step 1: Browse or search for the ringtones or wallpapers you like. You can use the tabs at the bottom of the screen to switch between ringtones and wallpapers. You can also use the menu at the top left corner to access different categories of media.
      2. -
      3. Step 2: Preview and download the media to your device. You can tap on any ringtone or wallpaper to preview it. You can also swipe left or right to see more options. To download the media, tap on the download icon at the bottom right corner of the screen.
      4. -
      5. Step 3: Set the media as your ringtone, notification, alarm, or contact tone. After downloading the media, you can tap on the set icon at the bottom left corner of the screen. You can choose to set the media as your default ringtone, notification sound, alarm sound, or contact tone for a specific person.
      6. -
      7. Step 4: Change your wallpaper from the app or your settings. After downloading the wallpaper, you can tap on the apply icon at the bottom center of the screen. You can choose to apply the wallpaper to your home screen, lock screen, or both. You can also change your wallpaper from your settings by going to Settings > Display > Wallpaper and selecting MobCup as your source.
      8. -
      -

      Pros and Cons of MobCup APK

      -

      MobCup APK is a great app for personalizing your Android device with ringtones and wallpapers, but it also has some drawbacks. Here are some of the pros and cons of using MobCup APK:

      - - - - - - - - - - - - - - - - - - - - - - - - - -
      ProsCons
      - Free and easy to use- Low quality wallpapers
      - Wide selection of ringtones and wallpapers- Likes don't always get saved
      - Popular and funny sounds and songs- Ads and pop-ups
      - Customizable ringtones and wallpapers- Requires internet connection
      - Compatible with most Android devices- May not work with some devices or apps
      -

      Conclusion and FAQs

      -

      MobCup APK is a personalization app that allows you to customize your Android device with ringtones and wallpapers that suit your style and personality. You can find thousands of high-quality ringtones, notification sounds, alert tones, and wallpapers in various categories and genres. You can also search for specific media by keywords and save them to your favorites section. You can easily download and install MobCup APK on your device from an external source and use it to set your ringtones and wallpapers with just a few taps. However, you should also be aware of some of the drawbacks of using MobCup APK, such as low quality wallpapers, likes not getting saved, ads and pop-ups, internet connection requirement, and compatibility issues.

      -

      If you want to personalize your Android device with ringtones and wallpapers that match your mood and preferences, you should give MobCup APK a try. It is a free and simple app that offers a wide selection of popular and funny sounds and songs that will make your device more exciting and unique.

      -

      mobcup ringtones and wallpapers apk download
      -mobcup app download for android apk
      -mobcup apk free download latest version
      -mobcup apk download for pc
      -mobcup apk download uptodown
      -mobcup apk download softonic
      -mobcup apk download apkpure
      -mobcup apk download old version
      -mobcup apk download 2023
      -mobcup apk download 2022
      -mobcup ringtone maker apk download
      -mobcup ringtone cutter apk download
      -mobcup ringtone editor apk download
      -mobcup ringtone downloader apk download
      -mobcup ringtone app apk download
      -mobcup wallpaper hd apk download
      -mobcup wallpaper 4k apk download
      -mobcup wallpaper live apk download
      -mobcup wallpaper maker apk download
      -mobcup wallpaper downloader apk download
      -mobcup notification sounds apk download
      -mobcup alert tones apk download
      -mobcup alarm sounds apk download
      -mobcup message tones apk download
      -mobcup call tones apk download
      -mobcup popular ringtones 2021 apk download
      -mobcup new ringtones 2021 free apk download
      -mobcup best ringtones 2021 offline apk download
      -mobcup top ringtones 2021 online apk download
      -mobcup latest ringtones 2021 update apk download
      -mobcup music ringtones 2021 premium apk download
      -mobcup hindi ringtones 2021 mod apk download
      -mobcup bollywood ringtones 2021 pro apk download
      -mobcup tamil ringtones 2021 full apk download
      -mobcup telugu ringtones 2021 cracked apk download
      -mobcup punjabi ringtones 2021 hack apk download
      -mobcup marathi ringtones 2021 unlocked apk download
      -mobcup kannada ringtones 2021 paid apk download
      -mobcup malayalam ringtones 2021 vip apk download
      -mobcup gujarati ringtones 2021 adfree apk download
      -mobcup funny ringtones 2021 no ads apk download
      -mobcup cool ringtones 2021 without ads apk download
      -mobcup cute ringtones 2021 adblocker apk download
      -mobcup romantic ringtones 2021 ad remover apk download
      -mobcup love ringtones 2021 remove ads apk download
      -mobcup sad ringtones 2021 block ads apk download
      -mobcup scary ringtones 2021 disable ads apk download
      -mobcup horror ringtones 2021 stop ads apk download

      -

      Here are some FAQs about MobCup APK that you may find helpful:

      -
        -
      • Q: Is MobCup APK safe to use?
      • -
      • A: MobCup APK is generally safe to use as long as you download it from a trusted source and scan it for viruses or malware before installing it. You should also be careful about the permissions you grant to the app and the data you share with it.
      • -
      • Q: How can I update MobCup APK?
      • -
      • A: MobCup APK does not have an automatic update feature, so you need to check for updates manually from the official website or the source you downloaded it from. You can also follow MobCup on social media platforms, such as Facebook and Twitter, to get notified of the latest updates and news.
      • -
      • Q: How can I delete MobCup APK from my device?
      • -
      • A: If you want to uninstall MobCup APK from your device, you can go to Settings > Apps > MobCup and tap on Uninstall. You can also delete the APK file from your storage if you don't need it anymore.
      • -
      • Q: How can I contact MobCup support?
      • -
      • A: If you have any questions, feedback, or issues with MobCup APK, you can contact MobCup support by sending an email to support@mobcup.net. You can also visit their website and fill out the contact form or leave a comment on their blog.
      • -
      • Q: How can I share MobCup ringtones and wallpapers with my friends?
      • -
      • A: You can share MobCup ringtones and wallpapers with your friends by using the share icon at the bottom right corner of the screen. You can choose to share the media via email, SMS, WhatsApp, Facebook, Twitter, etc. You can also copy the link of the media and paste it on any platform you want.
      • -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/sklearn-docs/early_stopping_of_gradient_boosting/app.py b/spaces/sklearn-docs/early_stopping_of_gradient_boosting/app.py deleted file mode 100644 index 2b91c2e0c0571cc0ed2831e7c5f753761ccb0b9c..0000000000000000000000000000000000000000 --- a/spaces/sklearn-docs/early_stopping_of_gradient_boosting/app.py +++ /dev/null @@ -1,164 +0,0 @@ -import gradio as gr -import time - -import numpy as np -import matplotlib.pyplot as plt - -from sklearn import ensemble -from sklearn import datasets -from sklearn.model_selection import train_test_split - -theme = gr.themes.Monochrome( - primary_hue="indigo", - secondary_hue="blue", - neutral_hue="slate", -) -model_card = f""" -## Description - -**Gradient boosting** is a machine learning technique that combines several regression trees to create a powerful model in an iterative manner. -**Early stopping** is a technique used in **gradient boosting** to determine the least number of iterations required to create a model that generalizes well to new data. -It involves specifying a validation set and using it to evaluate the model after each stage of tree building. -The process is continued until the model's scores do not improve for a specified number of stages. -Using early stopping can significantly reduce training time, memory usage, and prediction latency while achieving almost the same accuracy as a model built without early stopping using many more estimators. -You can play around with different ``number of samples`` and ``number of new estimators`` to see the effect - -## Dataset - -Iris dataset, Classification dataset, Hastie dataset -""" - - -def do_train(n_samples, n_estimators, progress=gr.Progress()): - - data_list = [ - datasets.load_iris(return_X_y=True), - datasets.make_classification(n_samples=n_samples, random_state=0), - datasets.make_hastie_10_2(n_samples=n_samples, random_state=0), - ] - names = ["Iris Data", "Classification Data", "Hastie Data"] - - n_gb = [] - score_gb = [] - time_gb = [] - n_gbes = [] - score_gbes = [] - time_gbes = [] - - for X, y in progress.tqdm(data_list): - X_train, X_test, y_train, y_test = train_test_split( - X, y, test_size=0.2, random_state=0 - ) - # We specify that if the scores don't improve by at least 0.01 for the last - # 10 stages, stop fitting additional stages - gbes = ensemble.GradientBoostingClassifier( - n_estimators=n_estimators, - validation_fraction=0.2, - n_iter_no_change=5, - tol=0.01, - random_state=0, - ) - gb = ensemble.GradientBoostingClassifier(n_estimators=n_estimators, random_state=0) - start = time.time() - gb.fit(X_train, y_train) - time_gb.append(time.time() - start) - - start = time.time() - gbes.fit(X_train, y_train) - time_gbes.append(time.time() - start) - - score_gb.append(gb.score(X_test, y_test)) - score_gbes.append(gbes.score(X_test, y_test)) - - n_gb.append(gb.n_estimators_) - n_gbes.append(gbes.n_estimators_) - - bar_width = 0.2 - n = len(data_list) - index = np.arange(0, n * bar_width, bar_width) * 2.5 - index = index[0:n] - - fig1, axes1 = plt.subplots(figsize=(9, 5)) - - bar1 = axes1.bar( - index, score_gb, bar_width, label="Without early stopping", color="crimson" - ) - bar2 = axes1.bar( - index + bar_width, score_gbes, bar_width, label="With early stopping", color="coral" - ) - axes1.set_xticks(index + bar_width, names); - axes1.set_yticks(np.arange(0, 1.3, 0.1)); - - def autolabel(ax, rects, n_estimators): - """ - Attach a text label above each bar displaying n_estimators of each model - """ - for i, rect in enumerate(rects): - ax.text( - rect.get_x() + rect.get_width() / 2.0, - 1.05 * rect.get_height(), - "n_est=%d" % n_estimators[i], - ha="center", - va="bottom", - ) - autolabel(axes1, bar1, n_gb) - autolabel(axes1, bar2, n_gbes) - plt.xlabel("Datasets") - plt.ylabel("Test score") - - axes1.set_xlabel("Datasets") - axes1.set_ylabel("Test score") - axes1.set_ylim([0, 1.3]) - axes1.legend(loc="best") - axes1.grid(True) - - - fig2, axes2 = plt.subplots(figsize=(9, 5)) - - bar1 = axes2.bar( - index, time_gb, bar_width, label="Without early stopping", color="crimson" - ) - bar2 = axes2.bar( - index + bar_width, time_gbes, bar_width, label="With early stopping", color="coral" - ) - - max_y = np.amax(np.maximum(time_gb, time_gbes)) - - axes2.set_xticks(index + bar_width, names) - axes2.set_yticks(np.linspace(0, 1.3 * max_y, 13)) - - autolabel(axes2, bar1, n_gb) - autolabel(axes2, bar2, n_gbes) - - axes2.set_ylim([0, 1.3 * max_y]) - axes2.legend(loc="best") - axes2.grid(True) - - axes2.set_xlabel("Datasets") - axes2.set_ylabel("Fit Time") - - - return fig1, fig2 - - - -with gr.Blocks(theme=theme) as demo: - gr.Markdown(''' -
      -

      Early stopping of Gradient Boosting

      -
      - ''') - gr.Markdown(model_card) - gr.Markdown("Author: Vu Minh Chien. Based on the example from scikit-learn") - n_samples = gr.Slider(minimum=500, maximum=10000, step=500, value=1000, label="Number of samples") - n_estimators = gr.Slider(minimum=50, maximum=300, step=50, value=100, label="Number of estimators") - with gr.Row(): - with gr.Column(): - plot1 = gr.Plot(label="Test score") - with gr.Column(): - plot2 = gr.Plot(label="Running time") - - n_samples.change(fn=do_train, inputs=[n_samples, n_estimators], outputs=[plot1, plot2]) - n_estimators.change(fn=do_train, inputs=[n_samples, n_estimators], outputs=[plot1, plot2]) - -demo.launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/sklearn-docs/sklearn-spectral-clustering/README.md b/spaces/sklearn-docs/sklearn-spectral-clustering/README.md deleted file mode 100644 index 09c2bc09ca5170e530c57de92d0e91981cbe0229..0000000000000000000000000000000000000000 --- a/spaces/sklearn-docs/sklearn-spectral-clustering/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: sklearn Spectral Clustering -emoji: 🔴🔵🔴 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/simultaneous_translation/models/__init__.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/simultaneous_translation/models/__init__.py deleted file mode 100644 index 257a96593ff7af93c206c066d8db4ad795b2ae36..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/simultaneous_translation/models/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import importlib -import os - - -for file in sorted(os.listdir(os.path.dirname(__file__))): - if file.endswith(".py") and not file.startswith("_"): - model_name = file[: file.find(".py")] - importlib.import_module( - "examples.simultaneous_translation.models." + model_name - ) diff --git a/spaces/sshaileshk/feedsGPT/README.md b/spaces/sshaileshk/feedsGPT/README.md deleted file mode 100644 index 3b14b172cb2a5cf046b7302686350329b815aeba..0000000000000000000000000000000000000000 --- a/spaces/sshaileshk/feedsGPT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Chat Your Data State Of The Union -emoji: 📊 -colorFrom: gray -colorTo: purple -sdk: gradio -sdk_version: 3.17.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/stomexserde/gpt4-ui/Examples/48 Laws Of Attraction Free Pdf VERIFIED.md b/spaces/stomexserde/gpt4-ui/Examples/48 Laws Of Attraction Free Pdf VERIFIED.md deleted file mode 100644 index b4fcf4e787ce6f1d1ba01862711c6526eabd26c4..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/48 Laws Of Attraction Free Pdf VERIFIED.md +++ /dev/null @@ -1,28 +0,0 @@ -
      -

      How to Download 48 Laws of Attraction Free PDF

      -

      If you are interested in learning more about the law of attraction, you might be wondering if there is a way to download 48 laws of attraction free pdf. The law of attraction is a philosophy that suggests that positive thoughts bring positive results into a person's life, while negative thoughts bring negative outcomes[^3^]. It is based on the belief that thoughts are a form of energy and that positive energy attracts success in all areas of life, including health, finances, and relationships.

      -

      48 laws of attraction free pdf


      Downloadhttps://urlgoal.com/2uI6ED



      -

      There are many books that explore the concept of the law of attraction, but one of the most popular ones is The 48 Laws of Power by Robert Greene. This book is not specifically about the law of attraction, but it contains 48 principles that can help you achieve power, influence, and mastery over your own destiny. Some of these principles are:

      -
        -
      • Law 1: Never outshine the master.
      • -
      • Law 2: Never put too much trust in friends; learn how to use enemies.
      • -
      • Law 3: Conceal your intentions.
      • -
      • Law 4: Always say less than necessary.
      • -
      • Law 5: So much depends on reputation; guard it with your life.
      • -
      • Law 6: Court attention at all costs.
      • -
      • Law 7: Get others to do the work for you, but always take the credit.
      • -
      • Law 8: Make other people come to you; use bait if necessary.
      • -
      • Law 9: Win through your actions, not your words.
      • -
      • Law 10: Infection: avoid the unhappy and unlucky.
      • -
      -

      And so on. The book is full of examples from history, literature, and politics that illustrate how these laws have been used by successful people throughout history. The book also warns about the dangers of breaking these laws and provides tips on how to reverse the situation if you do.

      -

      If you want to download 48 laws of attraction free pdf, you might be disappointed to find out that there is no official or legal way to do so. The book is protected by copyright and you would need to purchase it from a reputable source if you want to read it. However, there are some unofficial and illegal ways to download 48 laws of attraction free pdf, such as:

      -

      -
        -
      • Finding a pirated copy on a file-sharing website or app. This is risky because you might download a virus or malware along with the file, or face legal consequences for violating intellectual property rights.
      • -
      • Finding a scanned copy on a Google Drive link. This is also risky because the link might be broken or removed at any time, or the quality of the scan might be poor or unreadable.
      • -
      • Finding a summary or review of the book on a blog or website. This is not risky but it is not the same as reading the full book. You might miss out on some important details or insights that the author provides in the original text.
      • -
      -

      The best way to download 48 laws of attraction free pdf is to not download it at all. Instead, you should buy the book from a legitimate source and support the author who spent years researching and writing it. You can also borrow it from a library or a friend if you don't want to spend money on it. By reading the book legally and ethically, you will not only learn more about the law of attraction and how to apply it in your life, but you will also attract more positive energy and karma into your life.

      81aa517590
      -
      -
      \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Adobe Premiere Pro CC 2015 .3-10.3 Mac Os X.md b/spaces/stomexserde/gpt4-ui/Examples/Adobe Premiere Pro CC 2015 .3-10.3 Mac Os X.md deleted file mode 100644 index cec1fcad1dd04ad174b0d73ce2514f29e2e71b63..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Adobe Premiere Pro CC 2015 .3-10.3 Mac Os X.md +++ /dev/null @@ -1,279 +0,0 @@ - - - -
      - Adobe Premiere Pro CC 2015.3-10.3 Mac OS X: A Comprehensive Review

      Introduction

      -

      If you are looking for a powerful and professional video editing software for your Mac, you might want to consider Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.

      -

      Adobe Premiere Pro CC 2015 .3-10.3 Mac Os X


      Downloadhttps://urlgoal.com/2uIc64



      -

      Adobe Premiere Pro is one of the most popular and widely used video editing applications in the world, trusted by millions of filmmakers, broadcasters, journalists, students, and hobbyists.

      -

      It is part of the Adobe Creative Cloud suite of applications, which means you can access it anytime and anywhere with your Adobe account, as well as integrate it with other Adobe apps like Photoshop, After Effects, Audition, Media Encoder, and more.

      -

      In this article, we will review Adobe Premiere Pro CC 2015.3-10.3 Mac OS X, which is the latest version available for Mac users as of June 2021.

      -

      We will cover what this version is, why it is important for video editing, what are its main features and benefits, how to install it on your Mac, how to use it for your video projects, how to troubleshoot it if you encounter any issues, and how to update or rollback to a previous version if needed.

      -

      By the end of this article, you will have a comprehensive understanding of Adobe Premiere Pro CC 2015.3-10.3 Mac OS X, and you will be able to decide if it is the right video editing software for you.

      -

      How to install Adobe Premiere Pro CC 2015.3-10.3 Mac OS X

      -

      Before you can start using Adobe Premiere Pro CC 2015.3-10.3 Mac OS X, you need to install it on your Mac computer.

      -

      -

      Here are the steps to follow:

      -

      What are the system requirements?

      -

      First, you need to make sure that your Mac meets the minimum system requirements for running Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.

      -

      According to Adobe, these are the system requirements:

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      ComponentRequirement
      Operating systemMac OS X v10.10, v10.11, or v10.12
      ProcessorMulticore Intel processor with 64-bit support
      RAM8 GB of RAM (16 GB or more recommended)
      Hard disk space8 GB of available hard-disk space for installation; additional free space required during installation (cannot install on a volume that uses a case-sensitive file system or on removable flash storage devices)
      Graphics cardOptional: Adobe-certified GPU card for GPU-accelerated performance
      Internet connectionInternet connection and registration are necessary for required software activation, validation of subscriptions, and access to online services.
      -

      If your Mac does not meet these requirements, you may experience performance issues or errors when using Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.

      -

      In that case, you may want to upgrade your Mac hardware, or use an older version of Adobe Premiere Pro that is compatible with your Mac.

      -

      What are the direct download links?

      -

      Next, you need to download the Adobe Premiere Pro CC 2015.3-10.3 Mac OS X installer from the official Adobe website.

      -

      You can use the direct download links below to get the installer without using the Creative Cloud desktop app:

      - -

      Note that you need to have a valid Adobe account and be logged in to access these links.

      -

      If you have any trouble downloading the installer, you can try using a different browser, computer, or network connection.

      -

      How to follow the installation instructions?

      -

      Once you have downloaded the installer, you need to follow the installation instructions to complete the process.

      -

      The installation instructions vary depending on whether you have a subscription plan or a serial number for Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.

      -

      If you have a subscription plan, you can follow these steps:

      -
        -
      1. Double-click the downloaded file to extract its contents.
      2. -
      3. The extraction process creates a folder named "AdobePatchInstaller". Open this folder and double-click "AdobePatchInstaller.app". Enter your administrator password if prompted.
      4. -
      5. The update begins installing. Follow any onscreen instructions.
      6. -
      7. When the installation is complete, click "Close".
      8. -
      9. You can now launch Adobe Premiere Pro CC 2015.3-10.3 Mac OS X from your Applications folder or the Creative Cloud desktop app.
      10. -
      -

      If you have a serial number, you can follow these steps:

      -
        -
      1. Double-click the downloaded file to extract its contents.
      2. -
      3. The extraction process creates a folder named "Build". Open this folder and double-click "Install.app". Enter your administrator password if prompted.
      4. -
      5. The installer starts and displays the Welcome screen. Click "Continue".
      6. -
      7. The installer prompts you to enter your Adobe ID. Sign in with your Adobe account or create a new one.
      8. -
      9. The installer prompts you to enter your serial number. Enter the 24-digit serial number that you received when you purchased Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.
      10. -
      11. The installer validates your serial number and displays the License Agreement screen. Click "Accept".
      12. -
      13. The installer displays the Install Options screen. Choose the language and components that you want to install, and click "Continue".
      14. -
      15. The installer displays the Installation Type screen. Choose the destination folder for the installation, and click "Install".
      16. -
      17. The installation begins. Follow any onscreen instructions.
      18. -
      19. When the installation is complete, click "Close".
      20. -
      21. You can now launch Adobe Premiere Pro CC 2015.3-10.3 Mac OS X from your Applications folder or the Creative Cloud desktop app.
      22. -
      -

      Congratulations, you have successfully installed Adobe Premiere Pro CC 2015.3-10.3 Mac OS X on your Mac!

      -

      How to use Adobe Premiere Pro CC 2015.3-10.3 Mac OS X

      -

      Now that you have installed Adobe Premiere Pro CC 2015.3-10.3 Mac OS X, you are ready to use it for your video editing projects.

      -

      In this section, we will show you how to use some of the basic features and functions of Adobe Premiere Pro CC 2015.3-10.3 Mac OS X, such as creating a new project, importing media, using the timeline and editing tools, applying effects and transitions, and exporting and sharing your video.

      -

      Of course, this is not a comprehensive guide, but rather an overview of the main steps and concepts that you need to know to get started with Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.

      -

      If you want to learn more about the advanced features and techniques of Adobe Premiere Pro CC 2015.3-10.3 Mac OS X, you can check out the official Adobe help pages, tutorials, and forums, or look for online courses and books on video editing.

      -

      How to create a new project and import media?

      -

      The first thing you need to do when you launch Adobe Premiere Pro CC 2015.3-10.3 Mac OS X is to create a new project.

      -

      A project is a collection of files that you use to create your video, such as video clips, audio tracks, images, titles, effects, and settings.

      -

      To create a new project, follow these steps:

      -
        -
      1. From the Welcome screen, click "New Project".
      2. -
      3. The New Project dialog box appears. Enter a name and location for your project, and click "OK".
      4. -
      5. The New Sequence dialog box appears. A sequence is a container for your video edits in the timeline. You can choose from various presets or customize your own sequence settings, such as frame size, frame rate, pixel aspect ratio, audio sample rate, and more.
      6. -
      7. Choose a preset that matches your source media or desired output format, or click "Settings" to modify the sequence settings manually.
      8. -
      9. Click "OK" to create your new sequence.
      10. -
      -

      You have now created a new project and a new sequence in Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.

      -

      The next step is to import your media files into your project.

      -

      Media files are the raw materials that you use to create your video, such as video clips, audio tracks, images, graphics, etc.

      -

      To import media files into your project, follow these steps:

      -
        -
      1. In the Project panel (the lower-left corner of the screen), right-click and choose "Import".
      2. -
      3. The Import dialog box appears. Navigate to the folder where your media files are stored on your computer or external drive.
      4. -
      5. Select the files that you want to import and click "Open". You can also drag and drop files from Finder into the Project panel.
      6. -
      7. The files are imported into your project and appear in the Project panel as thumbnails or icons.
      8. -
      -

      You have now imported your media files into your project in Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.

      -

      How to use the timeline and editing tools?

      -

      The timeline is where you arrange and edit your media files to create your video.

      -

      The timeline consists of several tracks (layers) where you can place your video clips, audio clips, images, titles, effects, and more.

      -

      The timeline also has a playhead (a vertical line with a triangle on top) that shows the current position and timecode of your video.

      -

      To use the timeline and editing tools, follow these steps:

      -
        -
      1. Drag and drop your media files from the Project panel to the timeline. You can place them on any track, as long as they match the type of media (video or audio).
      2. -
      3. Adjust the position and length of your clips by dragging their edges or moving them along the timeline. You can also use keyboard shortcuts or the Selection tool (the arrow icon) to perform basic editing operations, such as cut, copy, paste, delete, trim, split, etc.
      4. -
      5. Use the Zoom tool (the magnifying glass icon) or the scroll bar to zoom in or out of the timeline. You can also use the plus (+) and minus (-) keys to zoom in or out.
      6. -
      7. Use the Track Select tool (the arrow with a line icon) to select all the clips on a track or to the right of the playhead.
      8. -
      9. Use the Ripple Edit tool (the yellow bracket icon) to trim a clip and move all the clips to the right of it accordingly.
      10. -
      11. Use the Rolling Edit tool (the red bracket icon) to trim two adjacent clips at the same time, without changing their overall duration.
      12. -
      13. Use the Rate Stretch tool (the clock icon) to change the speed and duration of a clip by dragging its edges.
      14. -
      15. Use the Razor tool (the scissors icon) to cut a clip into two parts at the position of the playhead.
      16. -
      17. Use the Slip tool (the two arrows with a filmstrip icon) to change the in and out points of a clip without changing its position or duration.
      18. -
      19. Use the Slide tool (the two arrows with a filmstrip and a line icon) to change the position of a clip without changing its in and out points or duration.
      20. -
      21. Use the Pen tool (the pen icon) to create keyframes for adjusting the opacity, volume, or effect parameters of a clip over time.
      22. -
      23. Use the Hand tool (the hand icon) to move the view of the timeline without changing anything else.
      24. -
      -

      You have now learned how to use some of the basic timeline and editing tools in Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.

      -

      How to apply effects and transitions?

      -

      Effects and transitions are ways to enhance and modify your video clips by adding visual or audio elements, such as color correction, filters, motion, sound, etc.

      -

      To apply effects and transitions, follow these steps:

      -
        -
      1. In the Project panel, click on the Effects tab. You will see a list of various categories of effects and transitions that you can use in your video.
      2. -
      3. Browse through the categories and find an effect or transition that you want to apply. You can also use the search box to find an effect or transition by name or keyword.
      4. -
      5. Drag and drop the effect or transition onto a clip in the timeline. You can also drag and drop an effect onto an adjustment layer (a special type of clip that applies an effect to all clips below it).
      6. -
      7. To adjust the settings of an effect or transition, select the clip that has it applied and go to the Effect Controls panel (the upper-left corner of the screen). You will see a list of parameters that you can adjust, such as opacity, position, scale, rotation, etc.
      8. -
      9. Use the sliders, buttons, checkboxes, or keyframes to change the values of the parameters. You can also use the Program Monitor (the upper-right corner of the screen) to preview the effect or transition and make adjustments directly on the video.
      10. -
      11. To remove an effect or transition, select the clip that has it applied and go to the Effect Controls panel. Click on the name of the effect or transition and press Delete.
      12. -
      -

      You have now learned how to apply some of the basic effects and transitions in Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.

      -

      How to export and share your video?

      -

      When you are done editing your video, you need to export it to a file format that you can play, share, or distribute.

      -

      To export and share your video, follow these steps:

      -
        -
      1. In the timeline, select the sequence that you want to export. You can also set in and out points to define a specific portion of the sequence that you want to export.
      2. -
      3. Go to File > Export > Media. The Export Settings dialog box appears.
      4. -
      5. Choose a format and a preset for your output file. You can also customize the settings, such as resolution, frame rate, bitrate, codec, audio channels, etc.
      6. -
      7. Choose a name and a location for your output file. You can also check the Export Video and Export Audio boxes to export both video and audio tracks.
      8. -
      9. Click on Export to start the export process. You can also click on Queue to send your export to Adobe Media Encoder, where you can manage multiple exports at once.
      10. -
      11. Wait for the export process to finish. You can monitor the progress in the Export panel or in Adobe Media Encoder.
      12. -
      13. When the export is complete, you can play your output file with any compatible media player or device. You can also share it online via email, social media, cloud storage, or other platforms.
      14. -
      -

      You have now learned how to export and share your video in Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.

      -

      How to troubleshoot Adobe Premiere Pro CC 2015.3-10.3 Mac OS X

      -

      Sometimes, you may encounter some issues or errors when using Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.

      -

      These issues or errors may be caused by various factors, such as incompatible hardware or software, corrupted files or settings, network problems, bugs or glitches, etc.

      -

      To troubleshoot Adobe Premiere Pro CC 2015.3-10.3 Mac OS X, follow these steps:

      -

      What are some common issues and solutions?

      -

      Here are some of the common issues and solutions that you may encounter when using Adobe Premiere Pro CC 2015.3-10.3 Mac OS X:

      -
        -
      • Issue: Adobe Premiere Pro CC 2015.3-10.3 Mac OS X crashes or freezes frequently.
      • -
      • Solution: Try these steps:
          -
        1. Make sure that your Mac meets the minimum system requirements for running Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.
        2. -
        3. Update your Mac operating system and drivers to the latest versions.
        4. -
        5. Update Adobe Premiere Pro CC 2015.3-10.3 Mac OS X to the latest version.
        6. -
        7. Close any other applications or processes that are running in the background and consuming memory or CPU resources.
        8. -
        9. Delete any unnecessary or unused files or media from your project and timeline.
        10. -
        11. Clear your media cache and preferences files by going to Preferences > Media Cache and Preferences > General and clicking on Clear or Delete.
        12. -
        13. Disable any third-party plugins or effects that may be causing conflicts or errors.
        14. -
        15. Restart your Mac and relaunch Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.
        16. -
      • -
      • Issue: Adobe Premiere Pro CC 2015.3-10.3 Mac OS X does not recognize or import some media files correctly.
      • -
      • Solution: Try these steps:
          -
        1. Make sure that your media files are in a supported format and codec for Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.
        2. -
        3. Rename your media files with simple and short names without any special characters or spaces.
        4. -
        5. Copy your media files to your local drive or an external drive that is formatted with a Mac-compatible file system, such as HFS+ or APFS.
        6. -
        7. Convert your media files to a different format or codec using a third-party software or online service, such as HandBrake, VLC, or Zamzar.
        8. -
        9. Re-import your media files into Adobe Premiere Pro CC 2015.3-10.3 Mac OS X using the Media Browser panel instead of the Import dialog box.
        10. -
      • -
      • Issue: Adobe Premiere Pro CC 2015.3-10.3 Mac OS X does not play or export some audio or video tracks correctly.
      • -
      • Solution: Try these steps:
          -
        1. Make sure that your audio and video tracks are enabled and not muted or soloed in the timeline.
        2. -
        3. Make sure that your audio and video tracks are linked and synchronized in the timeline.
        4. -
        5. Make sure that your audio and video tracks have the same sample rate and frame rate as your sequence settings.
        6. -
        7. Make sure that your audio and video tracks are not corrupted or damaged by checking them in another media player or device.
        8. -
        9. Render your audio and video tracks by going to Sequence > Render Audio or Sequence > Render Effects in Work Area.
        10. -
        11. Change your audio and video playback settings by going to Preferences > Audio Hardware or Preferences > Playback and adjusting the device class, input, output, buffer size, etc.
        12. -
        13. Change your audio and video export settings by going to File > Export > Media and adjusting the format, preset, bitrate, codec, channels, etc.
        14. -
      • -
      -

      These are some of the common issues and solutions that you may encounter when using Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.

      -

      If these steps do not resolve your issue or error, you can try searching for more specific solutions online, or contact Adobe support or community forums for further assistance.

      -

      How to update or rollback to a previous version?

      -

      Sometimes, you may want to update or rollback to a previous version of Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.

      -

      This may be because you want to access new features or bug fixes, or because you want to avoid compatibility issues or errors with a newer version.

      -

      To update or rollback to a previous version of Adobe Premiere Pro CC 2015.3-10.3 Mac OS X, follow these steps:

      -
        -
      1. Open the Creative Cloud desktop app on your Mac and sign in with your Adobe account.
      2. -
      3. Go to the Apps tab and find Adobe Premiere Pro in the list of installed apps.
      4. -
      5. To update to a newer version, click on the Update button next to Adobe Premiere Pro. You can also click on the More actions icon (the three dots) and choose Check for updates.
      6. -
      7. To rollback to a previous version, click on the More actions icon (the three dots) and choose Other versions. You will see a list of available versions that you can install. Choose the version that you want and click on Install.
      8. -
      9. The Creative Cloud desktop app will start downloading and installing the selected version of Adobe Premiere Pro CC 2015.3-10.3 Mac OS X on your Mac.
      10. -
      11. When the installation is complete, you can launch Adobe Premiere Pro CC 2015.3-10.3 Mac OS X from your Applications folder or the Creative Cloud desktop app.
      12. -
      -

      You have now learned how to update or rollback to a previous version of Adobe Premiere Pro CC 2015.3-10.3 Mac OS X on your Mac.

      -

      Conclusion

      -

      In this article, we have reviewed Adobe Premiere Pro CC 2015.3-10.3 Mac OS X, which is the latest version available for Mac users as of June 2021.

      -

      We have covered what this version is, why it is important for video editing, what are its main features and benefits, how to install it on your Mac, how to use it for your video projects, how to troubleshoot it if you encounter any issues, and how to update or rollback to a previous version if needed.

      -

      We hope that this article has given you a comprehensive understanding of Adobe Premiere Pro CC 2015.3-10.3 Mac OS X, and that you will be able to decide if it is the right video editing software for you.

      -

      If you are interested in trying out Adobe Premiere Pro CC 2015.3-10.3 Mac OS X for yourself, you can download it from the official Adobe website and use it for free for 7 days with a trial account or purchase a subscription plan with a monthly or annual fee.

      -

      If you have any questions or feedback about Adobe Premiere Pro CC 2015.3-10.3 Mac OS X, you can contact Adobe support or community forums, or leave a comment below.

      -

      Thank you for reading this article, and happy video editing!

      -

      FAQs

      -

      Here are some of the frequently asked questions (FAQs) about Adobe Premiere Pro CC 2015.3-10.3 Mac OS X:

      -

      What is the difference between Adobe Premiere Pro CC 2015.3 and 10.3?

      -

      Adobe Premiere Pro CC 2015.3 and 10.3 are the same version of the software, but with different naming conventions.

      -

      Adobe Premiere Pro CC 2015.3 is the name of the software when it was first released in June 2016, as part of the Creative Cloud 2015 update.

      -

      Adobe Premiere Pro 10.3 is the name of the software when it was updated in July 2016, as part of the Creative Cloud 2017 update.

      -

      The software itself did not change significantly between these two updates, except for some bug fixes and minor improvements.

      -

      Is Adobe Premiere Pro CC 2015.3-10.3 Mac OS X compatible with macOS High Sierra (10.13)?

      -

      Yes, Adobe Premiere Pro CC 2015.3-10.3 Mac OS X is compatible with macOS High Sierra (10.13), which is the latest version of the Mac operating system as of June 2021.

      -

      However, some users have reported some issues or errors when using Adobe Premiere Pro CC 2015.3-10.3 Mac OS X on macOS High Sierra (10.13), such as crashing, freezing, audio distortion, rendering problems, etc.

      -

      If you encounter any of these issues or errors, you can try some of the troubleshooting steps mentioned above, or update to a newer version of Adobe Premiere Pro CC that is more stable and optimized for macOS High Sierra (10.13).

      -

      How much does Adobe Premiere Pro CC 2015.3-10.3 Mac OS X cost?

      -

      Adobe Premiere Pro CC 2015.3-10.3 Mac OS X is not sold as a standalone product, but as part of the Adobe Creative Cloud suite of applications.

      -

      To use Adobe Premiere Pro CC 2015.3-10.3 Mac OS X, you need to have a subscription plan with Adobe Creative Cloud, which gives you access to all the Adobe apps and services that you need for your creative projects.

      -

      The subscription plans vary depending on your needs and preferences, such as the number of apps, the storage space, the number of users, etc.

      -

      As of June 2021, these are some of the subscription plans and prices for Adobe Creative Cloud:

      -
        -
      • The Single App plan gives you access to one Adobe app of your choice, such as Adobe Premiere Pro CC 2015.3-10.3 Mac OS X, plus 100 GB of cloud storage and other features for $20.99 per month or $239.88 per year.
      • -
      • The All Apps plan gives you access to all the Adobe apps, including Adobe Premiere Pro CC 2015.3-10.3 Mac OS X, plus 100 GB of cloud storage and other features for $52.99 per month or $599.88 per year.
      • -
      • The All Apps + Adobe Stock plan gives you access to all the Adobe apps, including Adobe Premiere Pro CC 2015.3-10.3 Mac OS X, plus 100 GB of cloud storage and other features, plus 10 images per month from Adobe Stock for $82.98 per month or $959.76 per year.
      • -
      • The Student & Teacher plan gives you access to all the Adobe apps, including Adobe Premiere Pro CC 2015.3-10.3 Mac OS X, plus 100 GB of cloud storage and other features for $19.99 per month or $239.88 per year, if you are eligible for academic discount.
      • -
      • The Business plan gives you access to all the Adobe apps, including Adobe Premiere Pro CC 2015.3-10.3 Mac OS X, plus 1 TB of cloud storage and other features for $79.99 per month or $959.88 per year per user, if you are a small or medium business owner.
      • -
      -

      You can also try Adobe Premiere Pro CC 2015.3-10.3 Mac OS X for free for 7 days with a trial account before purchasing a subscription plan.

      -

      What are some alternatives to Adobe Premiere Pro CC 2015.3-10.3 Mac OS X?

      -

      Adobe Premiere Pro CC 2015.3-10.3 Mac OS X is not the only video editing software available for Mac users.

      -

      There are many other alternatives that you can use, depending on your needs, preferences, budget, and skill level.

      -

      Here are some of the popular alternatives to Adobe Premiere Pro CC 2015.3-10.3 Mac OS X:

      -
        -
      • Final Cut Pro X: This is Apple's own video editing software, designed specifically for Mac users. It offers a sleek and intuitive interface, powerful performance, advanced features, and seamless integration with other Apple products and services. It costs $299.99 as a one-time purchase from the Mac App Store.
      • -
      • iMovie: This is another video editing software from Apple, but more suitable for beginners and casual users. It offers a simple and user-friendly interface, basic features, and easy sharing options. It is free for Mac users and comes pre-installed on most Mac computers.
      • -
      • Davinci Resolve: This is a professional video editing software from Blackmagic Design, known for its color grading and visual effects capabilities. It offers a comprehensive and customizable interface, high-end features, and support for multiple formats and platforms. It has a free version with some limitations, and a paid version with more features for $299.
      • -
      • HitFilm Express: This is a free video editing software from FXhome, known for its compositing and special effects features. It offers a modern and versatile interface, impressive features, and support for various plugins and add-ons. It is free to download and use, but you can also purchase optional packs or bundles for more features and effects.
      • -
      • Shotcut: This is an open-source video editing software from Meltytech, known for its simplicity and cross-platform compatibility. It offers a minimalistic and straightforward interface, basic features, and support for various formats and codecs. It is free to download and use, but you can also donate to support its development.
      • -
      -

      These are some of the popular alternatives to Adobe Premiere Pro CC 2015.3-10.3 Mac OS X that you can try out.

      -

      Of course, there are many other video editing software available for Mac users, so you can do your own research and find the one that suits you best.

      -

      Where can I find more resources and tutorials on Adobe Premiere Pro CC 2015.3-10.3 Mac OS X?

      -

      If you want to learn more about Adobe Premiere Pro CC 2015.3-10.3 Mac OS X, you can find plenty of resources and tutorials online.

      -

      Here are some of the best places to find more resources and tutorials on Adobe Premiere Pro CC 2015.3-10.3 Mac OS X:

      -
        -
      • The official Adobe website: This is the best place to find the latest information, updates, downloads, help pages, tutorials, forums, blogs, podcasts, webinars, events, and more on Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.
      • -
      • The official Adobe YouTube channel: This is the best place to find the latest videos, demos, tips, tricks, interviews, live streams, and more on Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.
      • -
      • The official Adobe Facebook page: This is the best place to find the latest news, announcements, stories, contests, feedback, and more on Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.
      • -
      • The official Adobe Twitter account: This is the best place to find the latest tweets, replies, retweets, likes, and more on Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.
      • -
      • The official Adobe Instagram account: This is the best place to find the latest photos, videos, stories, reels, and more on Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.
      • -
      • The official Adobe Reddit community: This is the best place to find the latest posts, comments, discussions, questions, answers, and more on Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.
      • -
      • The official Adobe LinkedIn page: This is the best place to find the latest articles, insights, jobs, and more on Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.
      • -
      • The official Adobe Behance portfolio: This is the best place to find the latest projects, collections, galleries, and more on Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.
      • -
      • Other online courses and books: There are many other online courses and books that you can take or read to learn more about Adobe Premiere Pro CC 2015.3-10.3 Mac OS X. Some of the popular ones are:
      • -
      -

      These are some of the best places to find more resources and tutorials on Adobe Premiere Pro CC 2015.3-10.3 Mac OS X.

      b2dd77e56b
      -
      -
      \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Candy Paint And Gold Teeth Waka Flocka Flame Download !FREE!.md b/spaces/stomexserde/gpt4-ui/Examples/Candy Paint And Gold Teeth Waka Flocka Flame Download !FREE!.md deleted file mode 100644 index 053b3cd41b93d678a0d75fe1353c7282c521ddef..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Candy Paint And Gold Teeth Waka Flocka Flame Download !FREE!.md +++ /dev/null @@ -1,21 +0,0 @@ -
      -

      How to Download Candy Paint & Gold Teeth by Waka Flocka Flame

      -

      Candy Paint & Gold Teeth is a song by American rapper Waka Flocka Flame, featuring Ludacris and Bun B. It was released on June 8, 2012, as the ninth track from his second studio album, Triple F Life: Friends, Fans and Family. The song pays homage to the southern rap culture, with references to soul food, strip clubs, car customization and legendary artists like Pimp C and Willie D.

      -

      candy paint and gold teeth waka flocka flame download


      Download --->>> https://urlgoal.com/2uI7Cv



      -

      If you are a fan of Waka Flocka Flame and want to download Candy Paint & Gold Teeth to your device, here are some easy steps to follow:

      -
        -
      1. Go to YouTube and search for "candy paint and gold teeth waka flocka flame". You should see the official music video as the first result[^2^]. Alternatively, you can use this link: https://www.youtube.com/watch?v=zjpgQUWKVx4
      2. -
      3. Copy the URL of the video and paste it into a YouTube to MP3 converter website, such as https://ytmp3.cc/en13/ or https://y2mate.com/. These websites allow you to download YouTube videos as MP3 files for free.
      4. -
      5. Click on the "Convert" or "Start" button and wait for the conversion process to finish. You should see a download link or button when it is done.
      6. -
      7. Click on the download link or button and save the MP3 file to your device. You can also rename it if you want.
      8. -
      9. Enjoy listening to Candy Paint & Gold Teeth by Waka Flocka Flame!
      10. -
      -

      If you want to learn more about the song, you can also check out its lyrics[^1^] [^4^] and its Shazam page[^3^], where you can discover more songs by Waka Flocka Flame and other related artists.

      - -

      Candy Paint & Gold Teeth is one of the most popular songs from Waka Flocka Flame's second album, Triple F Life: Friends, Fans and Family. The album was released on June 12, 2012, and debuted at number 10 on the Billboard 200 chart, selling 33,000 copies in its first week. The album received mixed reviews from critics, who praised Waka Flocka Flame's energy and charisma, but criticized his lyrics and lack of originality.

      -

      The song features two legendary southern rappers, Ludacris and Bun B, who both deliver impressive verses that showcase their skills and influence. Ludacris raps about his success and wealth, while Bun B raps about his loyalty and respect. Waka Flocka Flame holds his own with his aggressive and catchy hook and verse, where he expresses his pride and love for his hometown of Riverdale, Georgia.

      -

      The song is produced by Honorable C.N.O.T.E. and Redwine, who create a hard-hitting and melodic beat that blends trap drums, piano keys, synth chords and guitar riffs. The beat matches the mood and theme of the song, which is a celebration of the southern rap culture and lifestyle. The song also samples a vocal snippet from "Southern Hospitality" by Ludacris, which adds to the homage.

      -

      -

      Candy Paint & Gold Teeth is a song that appeals to fans of Waka Flocka Flame and southern rap in general. It is a song that showcases the talent and diversity of the south, as well as the passion and pride of its artists. It is a song that makes you want to ride low in your car, eat some soul food, and party all night with your people.

      81aa517590
      -
      -
      \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Data Cash US Quest Sql Navigator 6.7 Keygen Torrent 9 BEST.md b/spaces/stomexserde/gpt4-ui/Examples/Data Cash US Quest Sql Navigator 6.7 Keygen Torrent 9 BEST.md deleted file mode 100644 index b1fed417c4b550cb1ae5a5b7af30511bde3ac6f0..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Data Cash US Quest Sql Navigator 6.7 Keygen Torrent 9 BEST.md +++ /dev/null @@ -1,15 +0,0 @@ -
      -

      Data Cash US Quest Sql Navigator 6.7 Keygen Torrent 9: A Review of the Software

      -

      Data Cash US Quest Sql Navigator 6.7 Keygen Torrent 9 is a software that allows users to access and manage SQL databases easily and efficiently. It is a product of Quest Software, a leading provider of database management solutions. Data Cash US Quest Sql Navigator 6.7 Keygen Torrent 9 is designed to help users perform tasks such as creating, editing, debugging, and optimizing SQL code, as well as executing queries, analyzing data, and generating reports.

      -

      Data Cash US quest sql navigator 6.7 keygen torrent 9


      Download ===> https://urlgoal.com/2uI6WH



      -

      Data Cash US Quest Sql Navigator 6.7 Keygen Torrent 9 is compatible with various versions of SQL Server, Oracle, MySQL, PostgreSQL, and other databases. It has a user-friendly interface that supports drag-and-drop operations, syntax highlighting, code completion, and code formatting. It also has a powerful debugger that can trace and modify SQL statements, variables, and parameters. Data Cash US Quest Sql Navigator 6.7 Keygen Torrent 9 also offers features such as code analysis, code refactoring, code generation, code templates, code snippets, and code comparison.

      -

      Data Cash US Quest Sql Navigator 6.7 Keygen Torrent 9 is available for download from various sources on the internet. However, users need to have a valid license key to activate the software and use its full functionality. A license key can be obtained from the official website of Quest Software or from other authorized resellers. Alternatively, users can also use a keygen tool to generate a license key for Data Cash US Quest Sql Navigator 6.7 Keygen Torrent 9. A keygen tool is a software that creates a unique serial number or activation code for a specific software.

      -

      However, using a keygen tool to activate Data Cash US Quest Sql Navigator 6.7 Keygen Torrent 9 is not recommended for several reasons. First, it is illegal and violates the terms and conditions of Quest Software. Second, it may expose users to malware or viruses that can harm their computers or steal their personal information. Third, it may result in poor performance or errors in the software or the database. Therefore, users should always purchase a legitimate license key for Data Cash US Quest Sql Navigator 6.7 Keygen Torrent 9 from Quest Software or its authorized resellers.

      -

      Data Cash US Quest Sql Navigator 6.7 Keygen Torrent 9 is a useful and reliable software for SQL database management. It has many features and benefits that can help users improve their productivity and efficiency. However, users should always use a legal and valid license key to activate the software and enjoy its full potential.

      - -

      Data Cash US Quest Sql Navigator 6.7 Keygen Torrent 9 is not only a tool for SQL developers, but also for SQL administrators and analysts. It has features that can help users manage and monitor their SQL databases, such as backup and restore, security and auditing, performance tuning, and schema comparison. Data Cash US Quest Sql Navigator 6.7 Keygen Torrent 9 also integrates with other Quest Software products, such as Toad for Oracle, Toad for SQL Server, Toad Data Modeler, and Toad Data Point.

      -

      Data Cash US Quest Sql Navigator 6.7 Keygen Torrent 9 has received positive feedback from its users and reviewers. It has been praised for its ease of use, functionality, stability, and support. It has also been awarded several recognitions, such as the Best of TechEd 2013 Award for Database Development, the SQL Server Pro Community Choice Award 2013 for Best Database Development Tool, and the Visual Studio Magazine Readers Choice Award 2013 for Best Database Development Tool.

      -

      -

      Data Cash US Quest Sql Navigator 6.7 Keygen Torrent 9 is a software that can help users improve their SQL database management skills and productivity. It is a valuable asset for anyone who works with SQL databases on a regular basis. However, users should always respect the intellectual property rights of Quest Software and purchase a legal license key for Data Cash US Quest Sql Navigator 6.7 Keygen Torrent 9.

      81aa517590
      -
      -
      \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Freedownloadvrayrenderpresetsfor3dsmax REPACK.md b/spaces/stomexserde/gpt4-ui/Examples/Freedownloadvrayrenderpresetsfor3dsmax REPACK.md deleted file mode 100644 index 011c3b9373efcc561427764c9740e7316a0503e8..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Freedownloadvrayrenderpresetsfor3dsmax REPACK.md +++ /dev/null @@ -1,22 +0,0 @@ -
      -

      How to Download and Use V-Ray Render Presets for 3ds Max

      -

      V-Ray is a powerful rendering engine that can create realistic and stunning images for 3D modeling and animation. However, setting up the render parameters can be time-consuming and complex, especially for beginners. That's why V-Ray offers a collection of render presets that can help you achieve different effects and styles with just a few clicks.

      -

      Freedownloadvrayrenderpresetsfor3dsmax


      DOWNLOAD 🗹 https://urlgoal.com/2uIaqs



      -

      In this article, we will show you how to download and use V-Ray render presets for 3ds Max, one of the most popular 3D software applications. You will learn how to access the presets, apply them to your scenes, and customize them to suit your needs.

      -

      How to Download V-Ray Render Presets for 3ds Max

      -

      There are several sources where you can download V-Ray render presets for 3ds Max. Some of them are free, while others require a subscription or a purchase. Here are some of the options:

      -
        -
      • 3DsMax / Realistic and Fast Render Presets: This is a Facebook group where you can find and share various render presets for 3ds Max and V-Ray. You can join the group for free and browse through the topics and events[^1^].
      • -
      • 3ds Max Vray Preset Free Download - suggestions - Informer: This is a website that provides software suggestions and downloads. You can find several V-Ray material presets and converters for 3ds Max here[^2^]. Some of them are free, while others require a registration or a trial.
      • -
      • Freedownloadvrayrenderpresetsfor3dsmax: This is a PDF file that contains a link to download a collection of V-Ray render presets for 3ds Max[^3^]. However, this file may not be safe or reliable, as it may contain malware or viruses. We do not recommend downloading files from unknown sources.
      • -
      -

      How to Use V-Ray Render Presets for 3ds Max

      -

      Once you have downloaded the V-Ray render presets for 3ds Max, you need to install them in your software. The installation process may vary depending on the source and the format of the presets. Generally, you need to copy or extract the preset files to the appropriate folder in your 3ds Max directory. For example, if you have downloaded VRayMtl Converter, you need to copy the VRayMtlConverter.mse file to C:\Program Files\Autodesk\3ds Max \scripts.

      -

      -

      After installing the presets, you can access them from the Render Setup dialog in 3ds Max. To open the Render Setup dialog, go to Rendering > Render Setup or press F10 on your keyboard. In the Render Setup dialog, you can choose V-Ray as your renderer and then click on the Preset button at the bottom left corner. This will open a drop-down menu where you can see all the available presets. You can select any preset that matches your scene and your desired output.

      -

      When you apply a preset, it will automatically adjust the render settings such as resolution, quality, lighting, camera, materials, etc. You can preview the result in the ActiveShade window or by clicking on Render. However, you can also modify any of the settings manually if you want to fine-tune your render. For example, you can change the exposure value, the color balance, the depth of field, etc.

      -

      Conclusion

      -

      V-Ray render presets for 3ds Max are a great way to save time and effort when rendering your 3D scenes. They can help you achieve different effects and styles with ease and speed. However, they are not a substitute for your own creativity and skill. You should always experiment with different settings and options to create your own unique renders.

      -

      We

      cec2833e83
      -
      -
      \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Construct 2 License File Crackl _HOT_.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Construct 2 License File Crackl _HOT_.md deleted file mode 100644 index 89486ae12f037b39b968aced89a2c94c85dd8a84..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Construct 2 License File Crackl _HOT_.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Construct 2 License File Crackl


      DOWNLOAD ⚙⚙⚙ https://cinurl.com/2uEYG0



      - -Tag download construct 3 full crack. C3p File IncludedCordova HTML5 Files IncludedIcons IncludedAdmob Ads IntgratedMouse and Touch ControlsRun In All PlatformsFull ... Unlock your full creative potential with a full Construct 2 license. 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/WelcomeBackmovie720pdownload [CRACKED]movie.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/WelcomeBackmovie720pdownload [CRACKED]movie.md deleted file mode 100644 index 71bea5be4f8e074220cf115e87a5d7e3e43e76cc..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/WelcomeBackmovie720pdownload [CRACKED]movie.md +++ /dev/null @@ -1,6 +0,0 @@ -

      WelcomeBackmovie720pdownloadmovie


      DOWNLOAD >>>>> https://cinurl.com/2uEXp8



      -
      - d5da3c52bf
      -
      -
      -

      diff --git a/spaces/sushimashi/webui/app.py b/spaces/sushimashi/webui/app.py deleted file mode 100644 index a6d4e6fbbf46c7b912969ed7b531c3de6a81fc64..0000000000000000000000000000000000000000 --- a/spaces/sushimashi/webui/app.py +++ /dev/null @@ -1,76 +0,0 @@ -import os -from subprocess import getoutput - -gpu_info = getoutput('nvidia-smi') -if("A10G" in gpu_info): - os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+4c06c79.d20221205-cp38-cp38-linux_x86_64.whl") -elif("T4" in gpu_info): - os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+1515f77.d20221130-cp38-cp38-linux_x86_64.whl") - -os.system(f"git clone -b v1.5 https://github.com/camenduru/stable-diffusion-webui /home/user/app/stable-diffusion-webui") -os.chdir("/home/user/app/stable-diffusion-webui") - -os.system(f"wget -q https://github.com/camenduru/webui/raw/main/env_patch.py -O /home/user/app/env_patch.py") -os.system(f"sed -i -e '/import image_from_url_text/r /home/user/app/env_patch.py' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/(modelmerger_interface, \"Checkpoint Merger\", \"modelmerger\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/(train_interface, \"Train\", \"ti\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/extensions_interface, \"Extensions\", \"extensions\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/settings_interface, \"Settings\", \"settings\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f'''sed -i -e "s/document.getElementsByTagName('gradio-app')\[0\].shadowRoot/!!document.getElementsByTagName('gradio-app')[0].shadowRoot ? document.getElementsByTagName('gradio-app')[0].shadowRoot : document/g" /home/user/app/stable-diffusion-webui/script.js''') -os.system(f"sed -i -e 's/ show_progress=False,/ show_progress=True,/g' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e 's/shared.demo.launch/shared.demo.queue().launch/g' /home/user/app/stable-diffusion-webui/webui.py") -os.system(f"sed -i -e 's/ outputs=\[/queue=False, &/g' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e 's/ queue=False, / /g' /home/user/app/stable-diffusion-webui/modules/ui.py") - -# ----------------------------Please duplicate this space and delete this block if you don't want to see the extra header---------------------------- -os.system(f"wget -q https://github.com/camenduru/webui/raw/main/header_patch.py -O /home/user/app/header_patch.py") -os.system(f"sed -i -e '/demo:/r /home/user/app/header_patch.py' /home/user/app/stable-diffusion-webui/modules/ui.py") -# --------------------------------------------------------------------------------------------------------------------------------------------------- - -if "IS_SHARED_UI" in os.environ: - os.system(f"rm -rfv /home/user/app/stable-diffusion-webui/scripts/") - - os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-config.json -O /home/user/app/shared-config.json") - os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-ui-config.json -O /home/user/app/shared-ui-config.json") - - os.system(f"wget -q {os.getenv('MODEL_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('MODEL_NAME')}") - os.system(f"wget -q {os.getenv('VAE_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('VAE_NAME')}") - os.system(f"wget -q {os.getenv('YAML_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('YAML_NAME')}") - - os.system(f"python launch.py --force-enable-xformers --disable-console-progressbars --enable-console-prompts --ui-config-file /home/user/app/shared-ui-config.json --ui-settings-file /home/user/app/shared-config.json --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding") -else: - # Please duplicate this space and delete # character in front of the custom script you want to use or add here more custom scripts with same structure os.system(f"wget -q https://CUSTOM_SCRIPT_URL -O /home/user/app/stable-diffusion-webui/scripts/CUSTOM_SCRIPT_NAME.py") - os.system(f"wget -q https://gist.github.com/camenduru/9ec5f8141db9902e375967e93250860f/raw/d0bcf01786f20107c329c03f8968584ee67be12a/run_n_times.py -O /home/user/app/stable-diffusion-webui/scripts/run_n_times.py") - - # Please duplicate this space and delete # character in front of the extension you want to use or add here more extensions with same structure os.system(f"git clone https://EXTENSION_GIT_URL /home/user/app/stable-diffusion-webui/extensions/EXTENSION_NAME") - #os.system(f"git clone https://github.com/camenduru/stable-diffusion-webui-artists-to-study /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-artists-to-study") - os.system(f"git clone https://github.com/yfszzx/stable-diffusion-webui-images-browser /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-images-browser") - os.system(f"git clone https://github.com/deforum-art/deforum-for-automatic1111-webui /home/user/app/stable-diffusion-webui/extensions/deforum-for-automatic1111-webui") - - # Please duplicate this space and delete # character in front of the model you want to use or add here more ckpts with same structure os.system(f"wget -q https://CKPT_URL -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/CKPT_NAME.ckpt") - #os.system(f"wget -q https://huggingface.co/nitrosocke/Arcane-Diffusion/resolve/main/arcane-diffusion-v3.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/arcane-diffusion-v3.ckpt") - #os.system(f"wget -q https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/Cyberpunk-Anime-Diffusion.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Cyberpunk-Anime-Diffusion.ckpt") - #os.system(f"wget -q https://huggingface.co/prompthero/midjourney-v4-diffusion/resolve/main/mdjrny-v4.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/mdjrny-v4.ckpt") - #os.system(f"wget -q https://huggingface.co/nitrosocke/mo-di-diffusion/resolve/main/moDi-v1-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/moDi-v1-pruned.ckpt") - #os.system(f"wget -q https://huggingface.co/Fictiverse/Stable_Diffusion_PaperCut_Model/resolve/main/PaperCut_v1.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/PaperCut_v1.ckpt") - #os.system(f"wget -q https://huggingface.co/lilpotat/sa/resolve/main/samdoesarts_style.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/samdoesarts_style.ckpt") - #os.system(f"wget -q https://huggingface.co/hakurei/waifu-diffusion-v1-3/resolve/main/wd-v1-3-float32.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/wd-v1-3-float32.ckpt") - #os.system(f"wget -q https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/sd-v1-4.ckpt") - #os.system(f"wget -q https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v1-5-pruned-emaonly.ckpt") - #os.system(f"wget -q https://huggingface.co/runwayml/stable-diffusion-inpainting/resolve/main/sd-v1-5-inpainting.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/sd-v1-5-inpainting.ckpt") - - #os.system(f"wget -q https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.ckpt") - #os.system(f"wget -q https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0.vae.pt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.vae.pt") - - #os.system(f"wget -q https://huggingface.co/stabilityai/stable-diffusion-2/resolve/main/768-v-ema.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/768-v-ema.ckpt") - #os.system(f"wget -q https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/768-v-ema.yaml") - - os.system(f"wget -q https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v2-1_768-ema-pruned.ckpt") - os.system(f"wget -q https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v2-1_768-ema-pruned.yaml") - - os.system(f"wget -q {os.getenv('MODEL_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('MODEL_NAME')}") - os.system(f"wget -q {os.getenv('VAE_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('VAE_NAME')}") - os.system(f"wget -q {os.getenv('YAML_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('YAML_NAME')}") - - os.system(f"python launch.py --force-enable-xformers --ui-config-file /home/user/app/ui-config.json --ui-settings-file /home/user/app/config.json --disable-console-progressbars --enable-console-prompts --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding --api --skip-torch-cuda-test") - \ No newline at end of file diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/decode_heads/ocr_head.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/decode_heads/ocr_head.py deleted file mode 100644 index 715852e94e81dc46623972748285d2d19237a341..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/decode_heads/ocr_head.py +++ /dev/null @@ -1,127 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from annotator.uniformer.mmcv.cnn import ConvModule - -from annotator.uniformer.mmseg.ops import resize -from ..builder import HEADS -from ..utils import SelfAttentionBlock as _SelfAttentionBlock -from .cascade_decode_head import BaseCascadeDecodeHead - - -class SpatialGatherModule(nn.Module): - """Aggregate the context features according to the initial predicted - probability distribution. - - Employ the soft-weighted method to aggregate the context. - """ - - def __init__(self, scale): - super(SpatialGatherModule, self).__init__() - self.scale = scale - - def forward(self, feats, probs): - """Forward function.""" - batch_size, num_classes, height, width = probs.size() - channels = feats.size(1) - probs = probs.view(batch_size, num_classes, -1) - feats = feats.view(batch_size, channels, -1) - # [batch_size, height*width, num_classes] - feats = feats.permute(0, 2, 1) - # [batch_size, channels, height*width] - probs = F.softmax(self.scale * probs, dim=2) - # [batch_size, channels, num_classes] - ocr_context = torch.matmul(probs, feats) - ocr_context = ocr_context.permute(0, 2, 1).contiguous().unsqueeze(3) - return ocr_context - - -class ObjectAttentionBlock(_SelfAttentionBlock): - """Make a OCR used SelfAttentionBlock.""" - - def __init__(self, in_channels, channels, scale, conv_cfg, norm_cfg, - act_cfg): - if scale > 1: - query_downsample = nn.MaxPool2d(kernel_size=scale) - else: - query_downsample = None - super(ObjectAttentionBlock, self).__init__( - key_in_channels=in_channels, - query_in_channels=in_channels, - channels=channels, - out_channels=in_channels, - share_key_query=False, - query_downsample=query_downsample, - key_downsample=None, - key_query_num_convs=2, - key_query_norm=True, - value_out_num_convs=1, - value_out_norm=True, - matmul_norm=True, - with_out=True, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.bottleneck = ConvModule( - in_channels * 2, - in_channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def forward(self, query_feats, key_feats): - """Forward function.""" - context = super(ObjectAttentionBlock, - self).forward(query_feats, key_feats) - output = self.bottleneck(torch.cat([context, query_feats], dim=1)) - if self.query_downsample is not None: - output = resize(query_feats) - - return output - - -@HEADS.register_module() -class OCRHead(BaseCascadeDecodeHead): - """Object-Contextual Representations for Semantic Segmentation. - - This head is the implementation of `OCRNet - `_. - - Args: - ocr_channels (int): The intermediate channels of OCR block. - scale (int): The scale of probability map in SpatialGatherModule in - Default: 1. - """ - - def __init__(self, ocr_channels, scale=1, **kwargs): - super(OCRHead, self).__init__(**kwargs) - self.ocr_channels = ocr_channels - self.scale = scale - self.object_context_block = ObjectAttentionBlock( - self.channels, - self.ocr_channels, - self.scale, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.spatial_gather_module = SpatialGatherModule(self.scale) - - self.bottleneck = ConvModule( - self.in_channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def forward(self, inputs, prev_output): - """Forward function.""" - x = self._transform_inputs(inputs) - feats = self.bottleneck(x) - context = self.spatial_gather_module(feats, prev_output) - object_context = self.object_context_block(feats, context) - output = self.cls_seg(object_context) - - return output diff --git a/spaces/szukevin/VISOR-GPT/train/scripts/convert_pegasus_from_huggingface_to_tencentpretrain.py b/spaces/szukevin/VISOR-GPT/train/scripts/convert_pegasus_from_huggingface_to_tencentpretrain.py deleted file mode 100644 index 6bd8f8c0c1905d1c007ebdd4d48880bd020f4fb0..0000000000000000000000000000000000000000 --- a/spaces/szukevin/VISOR-GPT/train/scripts/convert_pegasus_from_huggingface_to_tencentpretrain.py +++ /dev/null @@ -1,42 +0,0 @@ -import sys -import os -import argparse -import collections -import torch - -tencentpretrain_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) -sys.path.insert(0, tencentpretrain_dir) - -from scripts.convert_bart_from_huggingface_to_tencentpretrain import \ - convert_encoder_decoder_transformer_from_huggingface_to_tencentpretrain - - -parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) -parser.add_argument("--input_model_path", type=str, default="models/input_model.bin", - help=".") -parser.add_argument("--output_model_path", type=str, default="models/output_model.bin", - help=".") -parser.add_argument("--layers_num", type=int, default=6, help=".") -parser.add_argument("--decoder_layers_num", type=int, default=6, help=".") - -args = parser.parse_args() - -input_model = torch.load(args.input_model_path, map_location="cpu") - -output_model = collections.OrderedDict() - -output_model["embedding.sinusoidalpos.pe"] = input_model["model.encoder.embed_positions.weight"].unsqueeze(1) -output_model["tgt_embedding.sinusoidalpos.pe"] = input_model["model.decoder.embed_positions.weight"].unsqueeze(1) -output_model["embedding.word.embedding.weight"] = input_model["model.encoder.embed_tokens.weight"] -output_model["tgt_embedding.word.embedding.weight"] = input_model["model.decoder.embed_tokens.weight"] -output_model["target.lm.output_layer.weight"] = input_model["lm_head.weight"] -output_model["target.lm.output_layer.bias"] = input_model["final_logits_bias"].squeeze(0) - -convert_encoder_decoder_transformer_from_huggingface_to_tencentpretrain(input_model, output_model, args.layers_num, args.decoder_layers_num) - -output_model["encoder.layer_norm.gamma"] = input_model["model.encoder.layer_norm.weight"] -output_model["encoder.layer_norm.beta"] = input_model["model.encoder.layer_norm.bias"] -output_model["decoder.layer_norm.gamma"] = input_model["model.decoder.layer_norm.weight"] -output_model["decoder.layer_norm.beta"] = input_model["model.decoder.layer_norm.bias"] - -torch.save(output_model, args.output_model_path) diff --git a/spaces/t13718236382/bingoGPT4/tests/kblob.ts b/spaces/t13718236382/bingoGPT4/tests/kblob.ts deleted file mode 100644 index 9e15b41c1c94a690beb61b23cdb42fc78767ccd2..0000000000000000000000000000000000000000 --- a/spaces/t13718236382/bingoGPT4/tests/kblob.ts +++ /dev/null @@ -1,27 +0,0 @@ -import FormData from 'form-data' - -import { fetch } from '@/lib/isomorphic' - -const formData = new FormData() - -const knowledgeRequest = {"imageInfo":{"url":"https://www.baidu.com/img/PCfb_5bf082d29588c07f842ccde3f97243ea.png"},"knowledgeRequest":{"invokedSkills":["ImageById"],"subscriptionId":"Bing.Chat.Multimodal","invokedSkillsRequestData":{"enableFaceBlur":true},"convoData":{"convoid":"51D|BingProdUnAuthenticatedUsers|E3DCA904FF236C67C3450163BCEC64CFF3F618CC8A4AFD75FD518F5ED0ADA080","convotone":"Creative"}}} - -formData.append('knowledgeRequest', JSON.stringify(knowledgeRequest)) - - -fetch('https://bing.vcanbb.top/images/kblob', - { - method: 'POST', - body: formData.getBuffer(), - headers: { - "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"", - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": "\"Windows\"", - "Referer": "https://bing.vcanbb.top/web/index.html", - "Referrer-Policy": "origin-when-cross-origin", - ...formData.getHeaders() - } - - } -).then(res => res.text()) -.then(res => console.log('res', res)) diff --git a/spaces/techguy1423/ChatABT/test4.py b/spaces/techguy1423/ChatABT/test4.py deleted file mode 100644 index 987bc92e874aeeaa68dffcc5e9e4a6cdbc348545..0000000000000000000000000000000000000000 --- a/spaces/techguy1423/ChatABT/test4.py +++ /dev/null @@ -1,38 +0,0 @@ -import gradio as gr -from transformers import AutoTokenizer, AutoModelForCausalLM -import torch - -# Load the pre-trained Llama model and tokenizer -model_name = "meta-llama/Llama-2-13b-chat-hf" -tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf") -model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf") - -# Define a system prompt to set the context and behavior -system_prompt = "You are chatting with a friendly AI. Ask me anything!" - -# Function to generate a response -def chat(input_text): - # Combine the system prompt and user input - full_prompt = f"{system_prompt}\n\n{input_text}" - - # Encode the combined prompt and generate a response - input_ids = tokenizer.encode(full_prompt, return_tensors="pt") - with torch.no_grad(): - output = model.generate(input_ids, max_length=50, num_return_sequences=1) - - # Decode and return the AI's response - ai_response = tokenizer.decode(output[0], skip_special_tokens=True) - return ai_response - -# Create a Gradio interface -iface = gr.Interface( - fn=chat, - inputs="text", - outputs="text", - title="Llama Chatbot", - description="Chat with a friendly AI chatbot powered by the Llama model.", - live=True -) - -# Launch the Gradio interface -iface.launch() diff --git a/spaces/tekkonetes/rust-code-server/setup.sh b/spaces/tekkonetes/rust-code-server/setup.sh deleted file mode 100644 index caa2968f3a6a7abac0d7944eff2f2602b4327f78..0000000000000000000000000000000000000000 --- a/spaces/tekkonetes/rust-code-server/setup.sh +++ /dev/null @@ -1,12 +0,0 @@ -code-server --install-extension rust-lang.rust-analyzer -code-server --install-extension ms-vscode.atom-keymap -code-server --install-extension emroussel.atomize -code-server --install-extension pkief.material-icon-theme -code-server --install-extension tamasfe.even-better-toml - -curl -L https://sh.rustup.rs | sh -curl -L https://bun.sh/install | bash -curl -L https://get.wasmer.io | bash - -echo ". ~/.profile" > /home/user/.bashrc -echo "PS1='\W\\$ '" > /home/user/.bashrc \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Adobe Postscript Printer Driver Free Download Windows 7 BEST.md b/spaces/terfces0erbo/CollegeProjectV2/Adobe Postscript Printer Driver Free Download Windows 7 BEST.md deleted file mode 100644 index 0b3a85558cb8c5bfce5d7dbbc6b375f09d89f206..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Adobe Postscript Printer Driver Free Download Windows 7 BEST.md +++ /dev/null @@ -1,6 +0,0 @@ -

      adobe postscript printer driver free download windows 7


      Download Ziphttps://bytlly.com/2uGlc0



      - -nClick the Choose a printer, then click Add a printer, and then click OK.nSelect the Microsoft Windows Installer printer (recommended) • Click Next.nSelect Use an existing port • Click Next.nChoose the printer port • Click Next.nClick Add a printer • Click Finish.n[2] If the printer is not installed and is required for the new system, you can install the printer drivers by selecting Uninstall a printer (recommended) • Click Next.nIn the Select printer to uninstall page, scroll to the bottom of the list, and then click Uninstall printer from the list of printers.nSelect the printer drivers • Click Next.nIn the Where are the drivers located? page, click All files, and then click Finish.nConfirm the installation • Click Finish.nTo add the driver to the list of drivers that can be installed by Windows Update, in the Select printer to install page, click Add and then click Skip or Cancel.nIn the Select printer to install page, click Select printer to install.nIn the Print Port page, select the printer port • Click Next.nClick Yes to the warning page that you are about to change settings for your computer, and then click OK.nTo uninstall the driver, in the Select printer to install page, click Select printer to uninstall.nSelect the printer driver • Click Next.nClick Yes to the warning page that you are about to change settings for your computer, and then click OK.nConfirm the uninstallation • Click Next.nIn the Select printer driver to reinstall page, select the printer driver to reinstall • Click Next.nIn the Select printer driver to install page, click Select printer to install.nClick Yes to the warning page that you are about to change settings for your computer, and then click OK.nTo add the driver to the list of drivers that can be installed by Windows Update, in the Select printer to install page, click Add and then click Skip or Cancel.nSelect the printer driver • Click Next.nClick Yes to the warning page that you are about to change settings for your computer, and then click OK.nNote You must restart your computer when you have installed an HP printer driver.nIf a driver is successfully installed, the printer appears in the Printers and Faxes page.n 4fefd39f24
      -
      -
      -

      diff --git a/spaces/terfces0erbo/CollegeProjectV2/Cadmas 11 Torrent Full Version Added By Users !EXCLUSIVE!.md b/spaces/terfces0erbo/CollegeProjectV2/Cadmas 11 Torrent Full Version Added By Users !EXCLUSIVE!.md deleted file mode 100644 index 58726871d01a169e70b76f0cf33393e632ec84b2..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Cadmas 11 Torrent Full Version Added By Users !EXCLUSIVE!.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Cadmas 11 Torrent Full Version Added By Users


      Download Filehttps://bytlly.com/2uGk1B



      - -torrent Cadmas 11 Torrent Full Version Added By Users DOWNLOAD: .torrent Cadmas 11 Torrent Full Version Added By Users DOWNLOAD: .torrent Cadmas 11 Torrent Full Version Added By Users DOWNLOAD: .torrent Cadmas 11 Torrent Full Version Added By Users DOWNLOAD: .torrent Cadmas 11 Torrent Full Version Added By Users DOWNLOAD: .torrent Cadmas 11 Torrent Full Version Added By Users DOWNLOAD: .torrent Cadmas 11 Torrent Full Version Added By Users DOWNLOAD: .torrent Cadmas 11 Torrent Full Version Added By Users DOWNLOAD: .torrent Cadmas 11 Torrent Full Version Added By Users DOWNLOAD: .torrent Cadmas 11 Torrent Full Version Added By Users DOWNLOAD: .torrent Cadmas 11 Torrent Full Version Added By Users DOWNLOAD: .torrent Cadmas 11 Torrent Full Version Added By Users DOWNLOAD: .torrent Cadmas 11 Torrent Full Version Added By Users DOWNLOAD: .torrent Cadmas 11 Torrent Full Version Added By Users DOWNLOAD: .torrent Cadmas 11 Torrent Full Version Added By Users DOWNLOAD: .torrent Cadmas 11 Torrent Full Version Added By Users DOWNLOAD: .torrent Cadmas 11 Torrent Full Version Added By Users DOWNLOAD: .torrent Cadmas 11 Torrent Full Version Added By Users DOWNLOAD: .torrent 4fefd39f24
      -
      -
      -

      diff --git a/spaces/terfces0erbo/CollegeProjectV2/Free Free Mcboot 1.8b Hun.md b/spaces/terfces0erbo/CollegeProjectV2/Free Free Mcboot 1.8b Hun.md deleted file mode 100644 index 99e9d1ca9e91939947ff7a949eb3fff9221032cb..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Free Free Mcboot 1.8b Hun.md +++ /dev/null @@ -1,48 +0,0 @@ -

      Free Mcboot 1.8b Hun


      Download 🔗 https://bytlly.com/2uGlYQ



      -
      -haptojonek - - desu - - OKAY Gotta go - - bye - - tzm: pl yafaimu ze. konponituru mozuku. desu ne. - - tomodachiku haiken iru - - okutemu tamen inori zaidan da mada sukoshi deteshimu asoko. - - タイマンいたいです。 - - タイマンはどんな気持ちですか - - hito_jp: izakiya watashi wa te-ma-nin ikimashita. - - hito_jp: nezumi ni kudasai. - - hito_jp: ikimashita kotoba de jakatta asoko. - - hito_jp: iru desu kotoba mo arimasu. - - いやいやいやいやいや。 - - hito_jp: iru desu. - - おおいしい。 - - いえええええええええええええええええええええええええ - - いええええええええええええええええええええええええええええええ - - hito_jp: hito_jp no, tai-sama wa nihon-go tomodachi. - - ええ - - いやいやいやいやいや - - � 4fefd39f24
      -
      -
      -

      diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Can You Download Fixed The Redragon Software Mac.md b/spaces/tialenAdioni/chat-gpt-api/logs/Can You Download Fixed The Redragon Software Mac.md deleted file mode 100644 index 47602aa3738fb9eca8b0c59da53780b1c3ff59a1..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Can You Download Fixed The Redragon Software Mac.md +++ /dev/null @@ -1,40 +0,0 @@ - -Here is a possible title and article with html formatting for the keyword "Can You Download The Redragon Software Mac": - -

      Can You Download The Redragon Software Mac?

      -

      If you have a Redragon gaming mouse, keyboard, headset or other device, you might be wondering if you can download the Redragon software Mac to customize your settings and macros. The answer is yes, but with some limitations.

      -

      The Redragon software Mac is a third-party application that is not officially supported by Redragon. It is developed by a user named AJ Ferrari and can be downloaded from his GitHub page: https://github.com/aj-ferrari/Redragon-Mouse.

      -

      Can You Download The Redragon Software Mac


      Downloadhttps://urlcod.com/2uK5C5



      -

      The Redragon software Mac allows you to adjust the DPI, polling rate, lighting effects and button assignments of your Redragon mouse. However, it does not support all models of Redragon mice, and some features may not work properly. For example, some users have reported issues with the side buttons or the scroll wheel.

      -

      Also, the Redragon software Mac does not work with other Redragon devices, such as keyboards or headsets. If you want to customize those devices, you will need to use a Windows PC and the official Redragon software from their website: https://www.redragonzone.com/pages/download.

      -

      In conclusion, you can download the Redragon software Mac if you have a compatible Redragon mouse and want to tweak some settings. However, it is not a fully functional or reliable solution, and you may encounter some bugs or errors. Use it at your own risk and discretion.

      Here are a few more paragraphs for the article: - -

      How to Install the Redragon Software Mac

      -

      To install the Redragon software Mac, you will need to follow these steps:

      -
        -
      1. Download the latest release of the Redragon software Mac from https://github.com/aj-ferrari/Redragon-Mouse/releases.
      2. -
      3. Unzip the downloaded file and open the folder.
      4. -
      5. Double-click on the Redragon Mouse.app file to launch the application.
      6. -
      7. Connect your Redragon mouse to your Mac via USB.
      8. -
      9. The application should detect your mouse model and display the settings menu.
      10. -
      11. You can now adjust the DPI, polling rate, lighting effects and button assignments of your mouse.
      12. -
      13. Click on the Apply button to save your changes.
      14. -
      -

      Note: You may need to grant permission for the application to access your mouse. You can do this by going to System Preferences > Security & Privacy > Privacy > Input Monitoring and checking the box next to Redragon Mouse.app.

      - -

      Pros and Cons of the Redragon Software Mac

      -

      -

      The Redragon software Mac has some advantages and disadvantages compared to the official Redragon software for Windows. Here are some of them:

      -
        -
      • Pros:
      • -
      • It allows you to use your Redragon mouse on a Mac without losing some of its features.
      • -
      • It is free and open-source, so you can inspect the code or modify it if you want.
      • -
      • It has a simple and user-friendly interface that is easy to navigate.
      • -
      • Cons:
      • -
      • It is not officially endorsed or supported by Redragon, so it may not be compatible with future updates or models of Redragon mice.
      • -
      • It may have some bugs or errors that affect the performance or functionality of your mouse.
      • -
      • It does not support other Redragon devices, such as keyboards or headsets.
      • -
      -

      Therefore, you should weigh the pros and cons before deciding whether to use the Redragon software Mac or not. If you encounter any problems or have any feedback, you can contact the developer through his GitHub page or email him at ajferrari@protonmail.com.

      7196e7f11a
      -
      -
      \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Firmware tablet wolder manhattan A step-by-step guide to update your tablet[3].md b/spaces/tialenAdioni/chat-gpt-api/logs/Firmware tablet wolder manhattan A step-by-step guide to update your tablet[3].md deleted file mode 100644 index f194edb6d65c6f0ec2c6a31529b3f7d2bc33e1f6..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Firmware tablet wolder manhattan A step-by-step guide to update your tablet[3].md +++ /dev/null @@ -1,80 +0,0 @@ -
      -Settings > About tablet > Firmware version. You should see the new version number that matches the one you downloaded from the official website. - Q5: Where can I find more information and support for Tablet Wolder Manhattan? - A5: You can find more information and support for Tablet Wolder Manhattan by visiting https://firmwareoficial.com/wolder/. You can also contact the customer service by phone or email if you have any questions or issues. # Article with HTML formatting

      How to Update Firmware on Tablet Wolder Manhattan

      -

      Firmware is the software that controls the hardware and functionality of your device. Updating firmware can improve performance, stability, security, compatibility, and user experience. It can also fix bugs, errors, and glitches that may affect your device.

      -

      If you own a Tablet Wolder Manhattan, you might want to update its firmware to enjoy the latest features and security patches. Tablet Wolder Manhattan is a 10.1-inch Android tablet that offers a quad-core processor, 1 GB of RAM, 16 GB of internal storage, dual cameras, Wi-Fi, Bluetooth, HDMI, USB OTG, and a 6000 mAh battery.

      -

      Firmware tablet wolder manhattan


      Download File ››› https://urlcod.com/2uK56d



      -

      Before you update your firmware, you need to check the current firmware version and the latest available version for your device. To do this, go to Settings > About tablet > Firmware version. You can also visit https://androidmtk.com/download-wolder-stock-rom to find the latest firmware files for your device model.

      -

      Steps to update firmware on Tablet Wolder Manhattan

      -

      Once you have downloaded the latest firmware file for your device, you can follow these steps to update your firmware:

      -

      How to update firmware tablet wolder manhattan
      -Firmware tablet wolder manhattan download link
      -Firmware tablet wolder manhattan latest version
      -Firmware tablet wolder manhattan problems and solutions
      -Firmware tablet wolder manhattan compatible apps
      -Firmware tablet wolder manhattan root access
      -Firmware tablet wolder manhattan factory reset
      -Firmware tablet wolder manhattan custom rom
      -Firmware tablet wolder manhattan android 11
      -Firmware tablet wolder manhattan recovery mode
      -Firmware tablet wolder manhattan hard reset
      -Firmware tablet wolder manhattan specifications and features
      -Firmware tablet wolder manhattan user manual
      -Firmware tablet wolder manhattan review and rating
      -Firmware tablet wolder manhattan price and availability
      -Firmware tablet wolder manhattan warranty and support
      -Firmware tablet wolder manhattan comparison with other tablets
      -Firmware tablet wolder manhattan tips and tricks
      -Firmware tablet wolder manhattan battery life and performance
      -Firmware tablet wolder manhattan screen size and resolution
      -Firmware tablet wolder manhattan camera quality and settings
      -Firmware tablet wolder manhattan sound and speaker
      -Firmware tablet wolder manhattan memory and storage
      -Firmware tablet wolder manhattan processor and speed
      -Firmware tablet wolder manhattan connectivity and network
      -Firmware tablet wolder manhattan accessories and cases
      -Firmware tablet wolder manhattan software and security updates
      -Firmware tablet wolder manhattan error codes and messages
      -Firmware tablet wolder manhattan backup and restore
      -Firmware tablet wolder manhattan flash tool and drivers
      -Firmware tablet wolder manhattan unlock code and pattern
      -Firmware tablet wolder manhattan sim card and wifi
      -Firmware tablet wolder manhattan bluetooth and gps
      -Firmware tablet wolder manhattan sensors and gyroscope
      -Firmware tablet wolder manhattan video playback and streaming
      -Firmware tablet wolder manhattan gaming and graphics
      -Firmware tablet wolder manhattan ebooks and pdfs
      -Firmware tablet wolder manhattan web browsing and email
      -Firmware tablet wolder manhattan social media and chat apps
      -Firmware tablet wolder manhattan online shopping and payment apps
      -Firmware tablet wolder manhattan music player and podcasts
      -Firmware table

      -
        -
      1. Copy the firmware file to a microSD card. Make sure the file name is update.zip and it is placed in the root directory of the card.
      2. -
      3. Insert the microSD card into the tablet and turn it off.
      4. -
      5. Press and hold the power and volume up buttons simultaneously until the recovery mode appears. You will see a green Android logo with a red exclamation mark.
      6. -
      7. Select "apply update from external storage" using the volume buttons and confirm with the power button.
      8. -
      9. Choose the update.zip file from the microSD card and confirm with the power button.
      10. -
      11. Wait for the installation process to complete. It may take several minutes. Do not interrupt or turn off your device during this process.
      12. -
      13. Reboot your device when prompted. Your device will restart with the new firmware installed.
      14. -
      -

      Conclusion

      - compatibility, and user experience of your device. You can also access the latest features and security patches that are available for your device.

      -

      However, updating firmware is not without risks. You should always backup your data before updating firmware, as you may lose some or all of your data during the process. You should also make sure that your device has enough battery power and is connected to a stable internet source during the update. If your device gets stuck or fails during the update, you may need to restore it to factory settings or contact the customer service for assistance.

      -

      We hope this article was helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you and help you with any issues you may have. Thank you for choosing Tablet Wolder Manhattan!

      -

      FAQs

      -

      Here are some frequently asked questions and answers about updating firmware on Tablet Wolder Manhattan:

      -
        -
      • Q1: What are the advantages of updating firmware on Tablet Wolder Manhattan?
      • -
      • A1: Updating firmware can improve performance, stability, security, compatibility, and user experience. It can also fix bugs, errors, and glitches that may affect your device.
      • -
      • Q2: How can I backup my data before updating firmware on Tablet Wolder Manhattan?
      • -
      • A2: You can backup your data using a cloud service, a computer, or another external storage device. Make sure you backup your contacts, messages, photos, videos, apps, and other important files before updating firmware.
      • -
      • Q3: What should I do if my tablet gets stuck or fails during the update process?
      • -
      • A3: If your tablet gets stuck or fails during the update process, you can try to reboot it by pressing and holding the power button for 10 seconds. If that does not work, you can try to restore your tablet to factory settings by using the recovery mode. However, this will erase all your data, so make sure you have a backup before doing this.
      • -
      • Q4: How can I verify that my firmware update was successful?
      • -
      • A4: You can verify that your firmware update was successful by checking the firmware version in Settings > About tablet > Firmware version. You should see the new version number that matches the one you downloaded from the official website.
      • -
      • Q5: Where can I find more information and support for Tablet Wolder Manhattan?
      • -
      • A5: You can find more information and support for Tablet Wolder Manhattan by visiting https://firmwareoficial.com/wolder/. You can also contact the customer service by phone or email if you have any questions or issues.
      • -
      -

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/FortressCraft Evolved Adventures Pack-PLAZA License Key A Must-Have for Fans of the Genre.md b/spaces/tialenAdioni/chat-gpt-api/logs/FortressCraft Evolved Adventures Pack-PLAZA License Key A Must-Have for Fans of the Genre.md deleted file mode 100644 index 6d4c30c3d99b13fc9f9e19c2775e7b73aaa4a3c6..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/FortressCraft Evolved Adventures Pack-PLAZA License Key A Must-Have for Fans of the Genre.md +++ /dev/null @@ -1,51 +0,0 @@ -
      -```html -

      FortressCraft Evolved Adventures Pack-PLAZA License Key: A Review

      -

      FortressCraft Evolved is a sandbox game that combines elements of Minecraft, Factorio, and Tower Defense. You can explore, build, craft, and defend your base from waves of enemies in a procedurally generated world. The game also features a story mode, a creative mode, and a multiplayer mode.

      -

      The Adventures Pack-PLAZA is an expansion pack that adds new content and features to the game. It includes:

      -

      FortressCraft Evolved Adventures Pack-PLAZA License Key


      DOWNLOAD ✑ ✑ ✑ https://urlcod.com/2uK9SV



      -
        -
      • A new biome: The Frozen Factory, where you can find new resources, enemies, and challenges.
      • -
      • A new game mode: The Adventures Mode, where you can play through randomly generated missions with different objectives and rewards.
      • -
      • A new feature: The Adventure Constructor, where you can create your own missions and share them with other players.
      • -
      • A new feature: The Adventure Browser, where you can browse and play missions created by other players.
      • -
      • A new feature: The Adventure Leaderboards, where you can compete with other players for the best scores and times.
      • -
      -

      To play the Adventures Pack-PLAZA, you need to have the base game FortressCraft Evolved installed on your PC. You also need to have a valid license key to activate the expansion pack. You can buy the license key from the official website or from other online platforms. The license key will be sent to your email address after the purchase.

      -

      The Adventures Pack-PLAZA is a great addition to the FortressCraft Evolved game. It offers more variety, replayability, and fun to the sandbox experience. If you are a fan of FortressCraft Evolved or sandbox games in general, you should definitely check out this expansion pack.

      -``` - -```html -

      One of the highlights of the Adventures Pack-PLAZA is the new biome: The Frozen Factory. This biome is located in the depths of the world, below the surface and the caverns. It is a harsh and cold environment, where you will encounter new dangers and opportunities. You will need to use new technologies and strategies to survive and thrive in this biome.

      -

      The Frozen Factory is home to new resources, such as ice, snow, and frozen ore. You can use these resources to craft new items and machines, such as heaters, coolers, and cryogenic chambers. You can also use them to create new structures and decorations, such as ice sculptures, snowmen, and igloos.

      -

      The Frozen Factory also hosts new enemies, such as frost spiders, ice worms, and snow golems. These enemies are more powerful and resilient than the ones you have faced before. They can freeze you, slow you down, or damage your base. You will need to upgrade your weapons and defenses to deal with them effectively.

      -

      The Frozen Factory also offers new challenges, such as blizzards, avalanches, and ice storms. These events can affect your visibility, mobility, and stability. You will need to adapt to the changing weather conditions and plan ahead to avoid disasters.

      -

      The Frozen Factory is a biome that will test your skills and creativity as a sandbox player. It will reward you with new discoveries and experiences that you will not find anywhere else in the game.

      -

      How to get FortressCraft Evolved Adventures Pack-PLAZA License Key for free
      -Download FortressCraft Evolved Adventures Pack-PLAZA full version with License Key
      -FortressCraft Evolved Adventures Pack-PLAZA License Key generator online
      -FortressCraft Evolved Adventures Pack-PLAZA License Key crack download
      -FortressCraft Evolved Adventures Pack-PLAZA License Key activation code
      -FortressCraft Evolved Adventures Pack-PLAZA License Key serial number
      -FortressCraft Evolved Adventures Pack-PLAZA License Key torrent download
      -FortressCraft Evolved Adventures Pack-PLAZA License Key review and gameplay
      -FortressCraft Evolved Adventures Pack-PLAZA License Key system requirements
      -FortressCraft Evolved Adventures Pack-PLAZA License Key cheats and tips
      -FortressCraft Evolved Adventures Pack-PLAZA License Key mods and updates
      -FortressCraft Evolved Adventures Pack-PLAZA License Key multiplayer mode
      -FortressCraft Evolved Adventures Pack-PLAZA License Key steam key giveaway
      -FortressCraft Evolved Adventures Pack-PLAZA License Key discount code and coupon
      -FortressCraft Evolved Adventures Pack-PLAZA License Key best price and deals
      -FortressCraft Evolved Adventures Pack-PLAZA License Key official website and support
      -FortressCraft Evolved Adventures Pack-PLAZA License Key trailer and screenshots
      -FortressCraft Evolved Adventures Pack-PLAZA License Key release date and news
      -FortressCraft Evolved Adventures Pack-PLAZA License Key patch notes and changelog
      -FortressCraft Evolved Adventures Pack-PLAZA License Key DLC and expansion packs
      -FortressCraft Evolved Adventures Pack-PLAZA License Key guide and walkthrough
      -FortressCraft Evolved Adventures Pack-PLAZA License Key error fix and troubleshooting
      -FortressCraft Evolved Adventures Pack-PLAZA License Key comparison and alternatives
      -FortressCraft Evolved Adventures Pack-PLAZA License Key refund policy and warranty
      -FortressCraft Evolved Adventures Pack-PLAZA License Key FAQ and forum

      -```

      e753bf7129
      -
      -
      \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Hollow Knight V1.2.2.1 DLC - GOG Get the Latest Version for Free.md b/spaces/tialenAdioni/chat-gpt-api/logs/Hollow Knight V1.2.2.1 DLC - GOG Get the Latest Version for Free.md deleted file mode 100644 index 35e28b351a07da0d7d6fa1ce63ce7ac7312c1ddc..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Hollow Knight V1.2.2.1 DLC - GOG Get the Latest Version for Free.md +++ /dev/null @@ -1,215 +0,0 @@ -
      -

      Hollow Knight V1.2.2.1 DLC - GOG Free Download: How to Download and Play the Ultimate 2D Action Adventure Game

      -

      Hollow Knight is a 2D action adventure game that was released in 2017 by Team Cherry, an independent game studio based in Australia. The game is set in a dark and mysterious world called Hallownest, where the player controls a nameless knight who explores a vast interconnected map of caverns, ruins, and forests. The game features challenging combat, beautiful hand-drawn graphics, atmospheric music, and a rich lore that unfolds through exploration and discovery.

      -

      Hollow Knight V1.2.2.1 DLC - GOG Free Download


      DOWNLOADhttps://urlcod.com/2uK7Hz



      -

      Hollow Knight has received several updates and expansions since its release, adding new areas, enemies, bosses, abilities, items, quests, and secrets to the game. The latest update is Hollow Knight V1.2.2.1 DLC - GOG, which includes all the previous updates and expansions, such as Hidden Dreams, The Grimm Troupe, Lifeblood, Godmaster, and Silksong (the upcoming sequel to Hollow Knight). This update also fixes some bugs and improves the performance and stability of the game.

      -

      If you are a fan of Hollow Knight or want to try this amazing game for yourself, you might be wondering how to get Hollow Knight V1.2.2.1 DLC - GOG free download. In this article, we will explain why you should avoid using any illegal or unsafe websites that offer Hollow Knight V1.2.2.1 DLC - GOG free download, and how you can get Hollow Knight V1.2.2.1 DLC - GOG free download legally and safely.

      -

      Why You Should Avoid Using Any Illegal or Unsafe Websites that Offer Hollow Knight V1.2.2.1 DLC - GOG Free Download

      -

      There are many websites that claim to offer Hollow Knight V1.
      DLC - GOG free download,
      but they are actually not trustworthy
      or legitimate.
      Here are some of the reasons
      why you should avoid using these websites:

      -
        -
      • Malware or viruses: These websites may contain malicious code that can infect your device or steal your data.
        You may not notice it at first,
        but it can cause serious damage
        to your system
        or compromise your privacy
        and security.
      • -
      • Legal issues: These websites may violate the copyright laws
        and terms
        and conditions of the game developers
        and publishers.
        By using them,
        you are breaking the law
        and exposing yourself to potential lawsuits
        or fines.
      • -
      • Ethical issues: These websites are unethical
        and disrespectful to the game developers
        and publishers of Hollow Knight,
        who worked hard to provide a high-quality
        and entertaining game for the players.
        By using them,
        you are undermining their efforts
        and depriving them of recognition
        and revenue.
      • -
      • Lack of quality: These websites may not provide the actual Hollow Knight V1.















      • - -
      • Lack of safety: These websites may not have any security measures or guarantees to protect your device or data from any harm or loss.
        You may also not be able
        to access any customer support
        or refund policy
        in case of any issues
        or complaints.
      • -
      -

      How to Get Hollow Knight V1.
      -DLC - GOG Free Download Legally -and Safely

      -

      The best way to get Hollow Knight V1.
      -DLC - GOG free download is -to use a legal -and safe source -that respects the rights -and interests of the game developers -and publishers of Hollow Knight. -Here are some of the options -you can choose from:

      -
        -
      1. Buy or download -the official version -of Hollow Knight from GOG.com: You can buy or download -the official version -of Hollow Knight from GOG.com, -which is a digital distribution platform -that sells DRM-free games for Windows, -Mac OS X, -Linux etc.. -By buying or downloading Hollow Knight from GOG.com, -you can enjoy -the full game in high quality -with all the updates -and expansions included (including Hollow Knight V1. -DLC - GOG). You can also support -the game developers -and publishers -by paying for their work.
      2. -
      3. Use a free trial or a discount coupon from GOG.com: You can use a free trial or a discount coupon from GOG.com to get Hollow Knight V1. -DLC - GOG free download for -a limited time -or at a lower price -than usual (depending on -the availability -and terms -of these offers). By using -a free trial -or a discount coupon -from GOG.com, -you can enjoy -Hollow Knight V1. -DLC - GOG free download for -a limited time -or at a lower price -than usual without breaking any laws -or compromising any quality.
      4. -
      5. Use a gift card or a voucher from GOG.com: You can use a gift card or a voucher from GOG.com to get Hollow Knight V1. -DLC - GOG free download without spending any money (depending on the value and validity of these cards or vouchers). By using a gift card or a voucher from GOG.com, -you can enjoy -Hollow Knight V1. -DLC - GOG free download without spending any money without breaking any laws -or compromising any quality.
      6. - -
      - -

      Conclusion

      - -

      Hollow Knight V1. -DLC - GOG Free Download is a great way to enjoy -the full version of one of the best 2D action adventure games ever made.The game features challenging combat,beautiful hand-drawn graphics,atmospheric music,and a rich lore that unfolds through exploration -and discovery.The game also includes all the previous updates -and expansions,such as Hidden Dreams,The Grimm Troupe,Lifeblood,Godmaster,and Silksong (the upcoming sequel to Hollow Knight).

      - -

      If you want to get Hollow Knight V1. -DLC - GOG Free Download, -you should avoid using any illegal or unsafe websites that offer it,as they may contain malware or viruses, -violate the copyright laws -and terms -and conditions of the game developers -and publishers,be unethical -and disrespectful to them, -provide low-quality -or corrupted versions of the game,and have no security measures -or guarantees to protect your device -or data from any harm -or loss.

      - -

      Instead, -you should use a legal and safe source that respects the rights -and interests of the game developers -and publishers.You can buy or download the official version of Hollow Knight from GOG.com, -a digital distribution platform that sells DRM-free games for Windows,Mac OS X, -Linux etc.You can also use a free trial or a discount coupon from GOG.com, -a gift card or a voucher from GOG.com,or any other legal and safe method that allows you to get Hollow Knight V1. -DLC - GOG Free Download without breaking any laws -or compromising any quality.

      -

      Hollow Knight GOG version free download with DLC
      -How to get Hollow Knight V1.2.2.1 and all DLC for free
      -Download Hollow Knight full game and DLC from GOG
      -Hollow Knight free download guide for PC with DLC
      -Hollow Knight V1.2.2.1 GOG edition with DLC torrent
      -Hollow Knight PC game free download with all DLC
      -GOG Hollow Knight V1.2.2.1 full game and DLC direct link
      -Hollow Knight latest version and DLC free download GOG
      -Download Hollow Knight V1.2.2.1 and DLC for free on GOG
      -Hollow Knight full game and DLC free download GOG version
      -GOG Hollow Knight V1.2.2.1 and all DLC download link
      -Hollow Knight V1.2.2.1 with DLC free download for PC
      -Hollow Knight GOG free download full game and DLC
      -How to download Hollow Knight V1.2.2.1 and DLC from GOG for free
      -Hollow Knight V1.2.2.1 and DLC GOG edition free download
      -Download Hollow Knight full game and all DLC for free GOG
      -Hollow Knight PC game with DLC free download GOG version
      -Hollow Knight V1.2.2.1 and all DLC free download guide GOG
      -GOG Hollow Knight V1.2.2.1 full game and DLC free download torrent
      -Hollow Knight latest version and all DLC download link GOG
      -Free download Hollow Knight V1.2.2.1 and DLC on GOG
      -Hollow Knight full game and all DLC GOG edition download link
      -Download Hollow Knight V1.2.2.1 with DLC for PC from GOG
      -Hollow Knight GOG edition full game and DLC free download
      -How to get Hollow Knight V1.2.2.1 and all DLC on GOG for free
      -Hollow Knight V1.2.2.1 and all DLC GOG version download link
      -Download Hollow Knight full game and all DLC from GOG for free
      -Hollow Knight PC game with all DLC download link GOG
      -Hollow Knight V1.2.2.1 and all DLC download guide for PC from GOG
      -GOG Hollow Knight V1.2.2.1 full game and all DLC direct link download
      -Download the latest version of Hollow Knight with all DLC from GOG
      -Free download of Hollow Knight full game and all DLC on GOG
      -Download link for Hollow Knight V1.2.2.1 with all DLC from GOG
      -How to download the full game of Hollow Knight with all DLC from GOG for free
      -Free download of the latest version of Hollow Knight with all DLC on GOG
      -Download link for the full game of Hollow Knight with all DLC on GOG
      -How to get the latest version of Hollow Knight with all DLC from GOG for free
      -Free download of the full game of Hollow Knight V1.2.2.1 with all DLC on GOG
      -Download link for the latest version of Hollow Knight V1.2.2.1 with all DLC on GOG
      -How to get the full game of Hollow Knight V1.2.2.1 with all DLC from GOG for free
      -Free download of the full game of Hollow Knight V1.2.2.1 on GOG with all DLC included
      -Download link for the full game of Hollow Knight V1.2.2.1 on GOG with all DLC included
      -How to get the full game of Hollow Knight V1.2.2.1 on GOG with all DLC included for free
      -Free download of the full game of Hollow Knight on GOG with the latest version and all DLC included
      -Download link for the full game of Hollow Knight on GOG with the latest version and all DLC included
      -How to get the full game of Hollow Knight on GOG with the latest version and all DLC included for free

      - -

      By using these options, -you can enjoy Hollow Knight V1. -DLC - GOG Free Download in -the best possible way. -You can also support -the game developers -and publishers -by paying for their work.

      -

      What is Hollow Knight V1.2.2.1 DLC - GOG?

      -

      Hollow Knight V1.2.2.1 DLC - GOG is the latest update for Hollow Knight, which includes all the previous updates and expansions for the game. This means that you can enjoy the full version of Hollow Knight with all the additional content that has been added since its release.

      -

      Some of the features that Hollow Knight V1.
      DLC - GOG offers are:

      -
        -
      • New areas: You can explore new regions of Hallownest, such as The Hive, The White Palace, The Royal Waterways, The Ancient Basin, and The Godhome.
      • -
      • New enemies: You can encounter new foes and challenges, such as The Collector, The Traitor Lord, The Nightmare King Grimm, The Radiance, and The Pantheon of Hallownest.
      • -
      • New bosses: You can face new epic battles, such as Zote the Mighty, Grey Prince Zote, The Sisters of Battle, The Pure Vessel, and Absolute Radiance.
      • -
      • New abilities: You can unlock new skills and upgrades, such as Dream Nail, Dream Gate, Grimmchild, Void Heart, and Lifeblood Core.
      • -
      • New items: You can collect new charms, relics, masks, vessels, and grubs that will enhance your abilities and gameplay.
      • -
      • New quests: You can complete new side quests and stories, such as The Grimm Troupe, The Delicate Flower, The Godseeker, and The Path of Pain.
      • -
      • New secrets: You can discover new hidden areas, lore, easter eggs, and endings that will reveal more about the world and history of Hollow Knight.
      • -
      -

      How to Install Hollow Knight V1.2.2.1 DLC - GOG?

      -

      If you want to install Hollow Knight V1.
      DLC - GOG,
      you need to follow these steps:

      -
        -
      1. Download Hollow Knight V1.
        DLC - GOG from a legal
        and safe source
        : You can download Hollow Knight V1.
        DLC - GOG from GOG.com,
        which is a digital distribution platform
        that sells DRM-free games for Windows,
        Mac OS X,
        Linux etc..
        You can also use a free trial
        or a discount coupon from GOG.com,
        a gift card or a voucher from GOG.com,
        or any other legal
        and safe method that allows you to get Hollow Knight V1.














      2. - -
      3. Extract the downloaded file: You need to extract the downloaded file using a program like 7-Zip or WinRAR. You will get a folder named "Hollow Knight v1.5.78.11833" that contains the setup file and the goodies files.
      4. -
      5. Run the setup file: You need to run the setup file named "setup_hollow_knight_1.5.78.11833_ (64bit)_ (50884).exe" and follow the instructions to install Hollow Knight on your device.
      6. -
      7. Enjoy the game: You can launch the game through the desktop shortcut or the start menu and enjoy Hollow Knight V1.
        DLC - GOG.
      8. -
      -

      What are the Benefits of Hollow Knight V1.2.2.1 DLC - GOG Free Download?

      -

      Hollow Knight V1.
      DLC - GOG free download has many benefits for the players who want to enjoy the ultimate 2D action adventure game.
      Some of the benefits are:

      -
        -
      • More content: You can access more content than the original version of Hollow Knight, such as new areas, enemies, bosses, abilities, items, quests, and secrets. You can also play Silksong, the upcoming sequel to Hollow Knight, when it is released.
      • -
      • More fun: You can have more fun and challenge with Hollow Knight V1.
        DLC - GOG free download,
        as you can explore more of Hallownest,
        fight more foes and bosses,
        unlock more skills and upgrades,
        collect more charms and relics,
        complete more side quests and stories,
        and discover more hidden areas and lore.
      • -
      • More value: You can get more value for your money with Hollow Knight V1.
        DLC - GOG free download,
        as you can get the full version of Hollow Knight with all the updates and expansions for a reasonable price or even for free (depending on the method you use to get it).
      • -
      • More quality: You can enjoy more quality with Hollow Knight V1.
        DLC - GOG free download,
        as you can play the game in high quality with beautiful hand-drawn graphics,
        atmospheric music,
        and smooth performance and stability.
      • -
      • More support: You can get more support with Hollow Knight V1.
        DLC - GOG free download,
        as you can access customer support and refund policy from GOG.com in case of any issues or complaints.
        You can also support
        the game developers
        and publishers
        by buying or downloading
        the game from GOG.com.
      • -
      -

      How to Play Hollow Knight V1.2.2.1 DLC - GOG Free Download?

      -

      If you want to play Hollow Knight V1.
      DLC - GOG free download,
      you need to follow these steps:

      -
        -
      1. Install Hollow Knight V1.
        DLC - GOG free download
        : You need to install Hollow Knight V1.














      2. - -
      3. Launch the game: You need to launch the game through the desktop shortcut or the start menu and choose your preferred language and settings.
      4. -
      5. Create a new game or load an existing game: You need to create a new game or load an existing game from the main menu and choose your preferred difficulty and mode.
      6. -
      7. Explore Hallownest: You need to explore Hallownest by moving, jumping, dashing, wall-jumping, and using other abilities. You need to fight enemies and bosses by using your nail, spells, and charms. You need to collect items and currency by breaking objects, opening chests, and finding secrets. You need to upgrade your abilities and equipment by visiting shops, benches, stations, and other locations. You need to complete quests and stories by talking to NPCs, reading lore tablets, and finding clues.
      8. -
      9. Enjoy the game: You need to enjoy the game by experiencing its gameplay, graphics, music, and story.
      10. -
      -

      Conclusion

      -

      Hollow Knight V1.
      DLC - GOG Free Download is a great opportunity to download and play the full version of one of the best 2D action adventure games ever made.The game features challenging combat,beautiful hand-drawn graphics,atmospheric music,and a rich lore that unfolds through exploration and discovery.The game also includes all the previous updates and expansions,such as Hidden Dreams,The Grimm Troupe,Lifeblood,Godmaster,and Silksong (the upcoming sequel to Hollow Knight).

      - -

      If you want to get Hollow Knight V1.
      DLC - GOG Free Download,
      you should avoid using any illegal or unsafe websites that offer it,as they may contain malware or viruses,
      violate the copyright laws and terms and conditions of the game developers and publishers,be unethical and disrespectful to them,
      provide low-quality or corrupted versions of the game,and have no security measures or guarantees to protect your device or data from any harm or loss.

      - -

      Instead,
      you should use a legal and safe source that respects the rights and interests of the game developers and publishers.You can buy or download the official version of Hollow Knight from GOG.com,
      a digital distribution platform that sells DRM-free games for Windows,Mac OS X,
      Linux etc.You can also use a free trial or a discount coupon from GOG.com,
      a gift card or a voucher from GOG.com,or any other legal and safe method that allows you to get Hollow Knight V1.
      DLC - GOG Free Download without breaking any laws or compromising any quality.

      - -

      By using these options,
      you can enjoy Hollow Knight V1.
      DLC - GOG Free Download in -the best possible way. -You can also support -the game developers -and publishers -by paying for their work.

      679dcb208e
      -
      -
      \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Business WhatsApp How to Set Up Your Profile and Start Selling.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Business WhatsApp How to Set Up Your Profile and Start Selling.md deleted file mode 100644 index d725fcfdae4df11cfb4ffe04bbe8a9a11b125a00..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Business WhatsApp How to Set Up Your Profile and Start Selling.md +++ /dev/null @@ -1,95 +0,0 @@ - -

      Business WhatsApp Download Karo: A Guide for Small Businesses

      -

      If you are a small business owner who wants to communicate with your customers more efficiently and effectively, you might want to consider using Business WhatsApp. Business WhatsApp is a free-to-download app that is built on top of WhatsApp Messenger and includes all the features that you rely on, such as multimedia, free calls, and group chat. But it also has some additional features that are designed for businesses, such as a verified and more complete business profile, messaging tools, labels, and a catalog. In this article, we will explain what Business WhatsApp is, how it is different from WhatsApp, how to download and set up Business WhatsApp, and how to use it to communicate with your customers.

      -

      business whatsapp download karo


      Download Ziphttps://bltlly.com/2uOgsb



      -

      What is Business WhatsApp and How is it Different from WhatsApp?

      -

      WhatsApp is a messaging app meant for personal communication, while Business WhatsApp is designed for businesses to communicate with their customers. Business WhatsApp provides a verified and more complete business profile so customers can trust who they’re chatting with. Business WhatsApp can add multiple users to manage the account across multiple devices, whereas individuals use WhatsApp on their personal devices. Business WhatsApp also has some special messaging features that help businesses be more responsive, organized, and professional.

      -

      Business WhatsApp Features and Benefits

      -

      Some of the main features and benefits of Business WhatsApp are:

      -
        -
      • Business profile: You can create a profile for your business that contains valuable information for customers, such as your website, location, contact information, business hours, business categories, and products.
      • -
      • Messaging tools: You can use greeting messages to send your customers an introductory message when they first message you, away messages to indicate when you're away or busy, and quick replies to save and reuse frequently sent messages.
      • -
      • Labels: You can assign different labels for each chat, such as new customer, pending payment, order complete, etc. This will help you track your orders, generate leads, and keep your account neat.
      • -
      • Catalog: You can showcase your products and services in a catalog that customers can browse within the app. You can add images, prices, descriptions, links, and codes for each item.
      • -
      • Multimedia and interactive content: You can send files, images, videos, audio messages, documents, contacts, locations, stickers, emojis, and GIFs to your customers. You can also send interactive buttons that allow customers to reply with a simple tap.
      • -
      -

      How to Download and Set Up Business WhatsApp

      -

      To download and set up Business WhatsApp on your Android or iPhone device, follow these steps:

      -
        -
      1. Download the Business WhatsApp app from the Google Play Store or the App Store.
      2. -
      3. Verify your business phone number. You can use a landline or fixed number by selecting the “Call me” option during verification.
      4. -
      5. If you have an existing WhatsApp Messenger account linked to the same number, you can migrate your account data, including chat history and media, to your new Business WhatsApp account. Note that you cannot move it back to WhatsApp Messenger later.
      6. -
      7. Set your business name. Choose carefully as you can only change it once.
      8. -
      9. Build your profile by adding your business information and logo.
      10. -
      -

      How to Use Business WhatsApp to Communicate with Customers

      -

      Once you have set up your Business WhatsApp account, you can start communicating with your customers in various ways. Here are some tips on how to use Business WhatsApp effectively:

      -

      Create a Business Profile and Catalog

      -

      Your business profile and catalog are the first things that customers will see when they open a chat with you. Therefore, you should make them as attractive and informative as possible. To create or edit your business profile or catalog, go to More options > Settings > your business name. You can add or change your logo or image, website URL, address, business hours, business categories, and products. You can also add a short description of your business and what you offer. To create or edit your catalog, go to More options > Settings > your business name > Catalog. You can add or delete items, edit their details, and rearrange their order.

      -

      Use Messaging Tools and Labels

      -

      Messaging tools and labels can help you save time and stay organized when communicating with your customers. To access your messaging tools, go to More options > Settings > Business tools > Messaging tools. You can create and edit your greeting message, away message, and quick replies. You can also enable or disable them as needed. To use a quick reply, type “/” in the chat and select the one you want to send. To access your labels, go to More options > Labels. You can create and edit labels, assign them to chats or contacts, filter chats by labels, and archive chats with labels.

      -

      Send Multimedia and Interactive Content

      -

      Multimedia and interactive content can make your communication more engaging and interactive. You can send files, images, videos, audio messages, documents, contacts, locations, stickers, emojis, and GIFs to your customers by tapping the attachment icon in the chat. You can also send interactive buttons that allow customers to reply with a simple tap. For example, you can send a button that says “Yes” or “No” to a question, or a button that says “View catalog” or “Visit website” to direct customers to your products or services. To send an interactive button, go to More options > Settings > Business tools > Short link. You can create and edit your short link, which is a unique URL that opens a chat with you. You can also enable or disable the default message that customers will see when they tap the link.

      -

      Conclusion and FAQs

      -

      Business WhatsApp is a powerful tool for small businesses to communicate with their customers more efficiently and effectively. It has many features and benefits that can help you build trust, increase sales, and grow your business. To download Business WhatsApp, visit the Google Play Store or the App Store and follow the instructions. To learn more about Business WhatsApp, visit the official website or the help center.

      -

      business whatsapp app download karo
      -business whatsapp download karo free
      -business whatsapp download karo android
      -business whatsapp download karo apk
      -business whatsapp download karo google play
      -business whatsapp download karo iphone
      -business whatsapp download karo ios
      -business whatsapp download karo link
      -business whatsapp download karo latest version
      -business whatsapp download karo update
      -business whatsapp download karo for pc
      -business whatsapp download karo for laptop
      -business whatsapp download karo for mac
      -business whatsapp download karo for windows
      -business whatsapp download karo for desktop
      -business whatsapp download karo online
      -business whatsapp download karo web
      -business whatsapp download karo from meta
      -business whatsapp download karo from facebook
      -business whatsapp download karo from official website
      -business whatsapp download karo in hindi
      -business whatsapp download karo in english
      -business whatsapp download karo in urdu
      -business whatsapp download karo in tamil
      -business whatsapp download karo in telugu
      -business whatsapp download karo video
      -business whatsapp download karo tutorial
      -business whatsapp download karo guide
      -business whatsapp download karo how to
      -business whatsapp download karo step by step
      -business whatsapp download karo features
      -business whatsapp download karo benefits
      -business whatsapp download karo advantages
      -business whatsapp download karo reviews
      -business whatsapp download karo ratings
      -business whatsapp download karo tips
      -business whatsapp download karo tricks
      -business whatsapp download karo hacks
      -business whatsapp download karo faq
      -business whatsapp download karo help center
      -business whatsapp download karne ka tarika
      -business whatsapp download karne ke liye
      -business whatsapp download karne ki vidhi
      -business whatsapp download karne ka asan tarika
      -business whatsapp download karne ka sahi tarika
      -business whatsapp account banane ka tarika

      -

      Here are some frequently asked questions about Business WhatsApp:

      -
        -
      • Q: Can I use both WhatsApp Messenger and Business WhatsApp on the same phone?
      • -
      • A: Yes, you can use both apps on the same phone as long as they are linked to different phone numbers.
      • -
      • Q: Can I use Business WhatsApp on my computer?
      • -
      • A: Yes, you can use Business WhatsApp on your computer by using WhatsApp Web or downloading the desktop app.
      • -
      • Q: How can I get verified on Business WhatsApp?
      • -
      • A: Verification is currently limited to a small number of businesses that have been chosen by WhatsApp. If you see a green badge next to your business name, it means that WhatsApp has verified that this is the authentic account for your business.
      • -
      • Q: How can I backup or restore my Business WhatsApp data?
      • -
      • A: You can backup or restore your Business WhatsApp data using Google Drive on Android devices or iCloud on iPhone devices.
      • -
      • Q: How can I delete my Business WhatsApp account?
      • -
      • A: You can delete your Business WhatsApp account by going to More options > Settings > Account > Delete my account.
      • -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Can 39t Download Pokemon Go.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Can 39t Download Pokemon Go.md deleted file mode 100644 index e886313d585be0b47582b0f0d5c052a5d203ef98..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Can 39t Download Pokemon Go.md +++ /dev/null @@ -1,122 +0,0 @@ - -

      Can't Download Pokemon Go? Here's How to Fix It

      -

      Pokemon Go is a popular augmented reality game that lets you catch, train, and battle virtual creatures called Pokemon in the real world. The game is free to download and play, but you need a compatible device, a stable internet connection, and enough storage space to enjoy it.

      -

      However, some people may encounter problems when trying to download Pokemon Go from their app store. If you are one of them, don't worry. In this article, we will show you some possible solutions to fix this issue and get you ready to catch 'em all.

      -

      can 39;t download pokemon go


      Download ✏ ✏ ✏ https://bltlly.com/2uOsf1



      -

      Check Your Device Compatibility

      -

      Before you download Pokemon Go, make sure your device meets the minimum requirements for the game. Here are the specifications for Android and iOS devices:

      -

      Android Requirements

      -
        -
      • Android 6 or above
      • -
      • At least 2 GB of RAM
      • -
      • Access to Google Play services
      • -
      • GPS and location services enabled
      • -
      • Gyroscope and camera (optional, but recommended)
      • -
      -

      iOS Requirements

      -
        -
      • iOS 12 or above
      • -
      • iPhone 6s or above
      • -
      • iPad 5th generation or above
      • -
      • iPad mini 4 or above
      • -
      • iPad Air 2 or above
      • -
      • iPad Pro or above
      • -
      • iPod touch 7th generation or above
      • -
      • GPS and location services enabled
      • -
      • Gyroscope and camera (optional, but recommended)
      • -
      -

      Check Your Internet Connection

      -

      Pokemon Go requires a reliable internet connection to download and play. You can use either Wifi or mobile data, but make sure you have a strong and stable signal. If your connection is slow or intermittent, you may experience errors or delays when downloading the game.

      -

      Wifi vs Mobile Data

      -

      If you are using Wifi, make sure you are close to the router and there are no obstructions or interferences. You can also try restarting your router or switching to another Wifi network if possible.

      -

      If you are using mobile data, make sure you have enough data allowance and coverage. You can also try turning off your mobile data and turning it back on, or switching to another carrier if possible.

      -

      Improve Location Accuracy

      -

      Pokemon Go uses GPS and location services to determine your position and show you nearby Pokemon. If your location accuracy is low, you may have trouble downloading the game or playing it properly.

      -

      To improve your location accuracy, you can enable the following settings on your device:

      -
        -
      • For Android devices, go to Settings > Location > Google Location Accuracy and turn on Improve Location Accuracy.
      • -
      • For iOS devices, go to Settings > Privacy > Location Services > System Services and turn on Wi-Fi Networking & Wireless.
      • -
      -

      Check Your App Store SettingsCheck Your App Store Settings

      -

      Another reason why you may not be able to download Pokemon Go is that your app store settings are preventing you from doing so. Depending on your device and region, you may need to adjust some settings to allow the installation of the game.

      -

      -

      Google Play Store

      -

      If you are using an Android device, you need to download Pokemon Go from the Google Play Store. However, some factors may prevent you from accessing the game on the store, such as:

      -
        -
      • Your Google account is not signed in or synced.
      • -
      • Your Google Play Store app is outdated or corrupted.
      • -
      • Your device is set to a different region or country than the one where Pokemon Go is available.
      • -
      • Your device has parental controls or restrictions enabled.
      • -
      -

      To fix these issues, you can try the following solutions:

      -
        -
      • Make sure you are signed in to your Google account and sync it with your device.
      • -
      • Update your Google Play Store app to the latest version or clear its cache and data.
      • -
      • Change your device's region or country to match the one where Pokemon Go is available. You can do this by going to Settings > System > Languages & input > Languages > Add a language and selecting the appropriate one.
      • -
      • Disable any parental controls or restrictions on your device or Google account that may block the installation of Pokemon Go. You can do this by going to Settings > Apps & notifications > Special app access > Install unknown apps and allowing Pokemon Go, or by going to Settings > Google > Parental controls and turning them off.
      • -
      -

      Apple App Store

      -

      If you are using an iOS device, you need to download Pokemon Go from the Apple App Store. However, some factors may prevent you from accessing the game on the store, such as:

      -
        -
      • Your Apple ID is not signed in or verified.
      • -
      • Your Apple App Store app is outdated or corrupted.
      • -
      • Your device is set to a different region or country than the one where Pokemon Go is available.
      • -
      • Your device has parental controls or restrictions enabled.
      • -
      -

      To fix these issues, you can try the following solutions:

      -
        -
      • Make sure you are signed in to your Apple ID and verify it with your email or phone number.
      • -
      • Update your Apple App Store app to the latest version or clear its cache and data.
      • -
      • Change your device's region or country to match the one where Pokemon Go is available. You can do this by going to Settings > General > Language & Region and selecting the appropriate one.
      • -
      • Disable any parental controls or restrictions on your device or Apple ID that may block the installation of Pokemon Go. You can do this by going to Settings > Screen Time > Content & Privacy Restrictions and turning them off, or by going to Settings > iTunes & App Store > Apple ID > View Apple ID > Subscriptions > Manage and canceling any active subscriptions that may interfere with Pokemon Go.
      • -
      -

      Check Your Device Storage

      Check Your Device Storage

      -

      Pokemon Go is a large app that requires a lot of storage space on your device. If you don't have enough space, you may not be able to download or update the game. To check your device storage, you can do the following:

      -
        -
      • For Android devices, go to Settings > Storage and see how much space is available and used.
      • -
      • For iOS devices, go to Settings > General > iPhone Storage and see how much space is available and used.
      • -
      -

      If you have less than 1 GB of free space, you may need to free up some space by deleting or moving some files or apps. Here are some tips on how to do that:

      -

      How to Free Up Space

      -
        -
      • Delete any unwanted photos, videos, music, or documents from your device or upload them to a cloud service like Google Photos, iCloud, or Dropbox.
      • -
      • Delete any unused or unnecessary apps from your device or disable them if they are pre-installed.
      • -
      • Clear the cache and data of some apps that may take up a lot of space, such as social media, streaming, or gaming apps.
      • -
      • Use a file manager app or a cleaning app to scan and remove any junk files, duplicate files, or large files from your device.
      • -
      -

      How to Move Apps to SD Card

      -

      If you have an Android device with an SD card slot, you can move some apps to the SD card to save some internal storage space. However, not all apps can be moved to the SD card, and some may not work properly if they are moved. To move apps to the SD card, you can do the following:

      -
        -
      • Insert an SD card into your device and format it as internal storage or portable storage, depending on your preference.
      • -
      • Go to Settings > Apps and select the app you want to move.
      • -
      • Tap on Storage and then on Change. You will see the option to move the app to the SD card if it is available.
      • -
      • Tap on Move and wait for the process to complete.
      • -
      -

      Check for App Updates and Known Issues

      -

      Sometimes, you may not be able to download Pokemon Go because the app is undergoing maintenance or has some bugs or issues that need to be fixed. To check for app updates and known issues, you can do the following:

      -

      How to Update Pokemon Go

      -
        -
      • Go to your app store and search for Pokemon Go. If there is an update available, you will see an Update button next to the app. Tap on it and wait for the update to download and install.
      • -
      • You can also enable automatic updates for Pokemon Go by going to your app store settings and turning on Auto-update apps for Android devices or App Updates for iOS devices.
      • -
      -

      How to Report a Bug or Issue

      -
        -
      • If you encounter a bug or issue while downloading or playing Pokemon Go, you can report it to the developers by going to Settings > Get Support > Report a Bug in the app.
      • -
      • You can also visit the official Pokemon Go website or social media pages and check for any announcements or updates regarding the game status or issues.
      • -
      • You can also contact the Pokemon Go support team by emailing them at pokemon-go-support@nianticlabs.com or filling out a form on their website.
      • -
      -

      Conclusion

      -

      Pokemon Go is a fun and immersive game that lets you explore the world of Pokemon in real life. However, sometimes you may face some challenges when trying to download the game on your device. In this article, we have covered some possible solutions to help you fix this issue and enjoy the game without any hassle. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Happy hunting!

      -

      FAQs

      -

      Why can't I download Pokemon Go in my country?

      -

      Pokemon Go is not available in every country due to various reasons, such as licensing agreements, legal restrictions, technical limitations, or cultural differences. You can check the list of supported countries on the official Pokemon Go website. If your country is not on the list, you may have to wait until the game is released in your region or use a VPN service to access it from another country.

      -

      Why can't I download Pokemon Go on my tablet?

      -

      Pokemon Go is designed for smartphones and may not work well on tablets. Some tablets may not meet the minimum requirements for the game, such as having a GPS sensor, a gyroscope, or a camera

      Pokemon Go is designed for smartphones and may not work well on tablets. Some tablets may not meet the minimum requirements for the game, such as having a GPS sensor, a gyroscope, or a camera. Some tablets may also have compatibility issues with the app store or the game itself. If you want to play Pokemon Go on a tablet, you may need to check the device specifications, the app store settings, and the game updates before downloading it.

      -

      Why can't I download Pokemon Go on my rooted or jailbroken device?

      -

      Pokemon Go does not support rooted or jailbroken devices for security and performance reasons. Rooting or jailbreaking your device may compromise the game's functionality and integrity, as well as expose your device to malware or hacking. If you want to play Pokemon Go on a rooted or jailbroken device, you may need to unroot or unjailbreak your device or use a third-party app that can hide your device status from the game.

      -

      Why can't I download Pokemon Go on my Huawei device?

      -

      Pokemon Go requires Google Play services to run properly on Android devices. However, some Huawei devices do not have Google Play services installed due to the US trade ban. If you have a Huawei device that does not have Google Play services, you may not be able to download or play Pokemon Go. You may need to use an alternative app store, such as Huawei AppGallery, to download the game or use a workaround method to install Google Play services on your device.

      -

      Why can't I download Pokemon Go on my Chromebook?

      -

      Pokemon Go is not compatible with Chromebooks, even if they have access to the Google Play Store. Chromebooks are not designed for gaming and may not have the necessary hardware or software features to run Pokemon Go. For example, most Chromebooks do not have a GPS sensor, a gyroscope, or a camera. Even if you manage to download Pokemon Go on your Chromebook, you may encounter errors or glitches when playing it.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/tiiuae/falcon-180b-demo/app.py b/spaces/tiiuae/falcon-180b-demo/app.py deleted file mode 100644 index c6b34ebcf0615c57d48281f23cf201f550e5d186..0000000000000000000000000000000000000000 --- a/spaces/tiiuae/falcon-180b-demo/app.py +++ /dev/null @@ -1,148 +0,0 @@ -import json -import os -import shutil -import requests - -import gradio as gr -from huggingface_hub import Repository, InferenceClient - -HF_TOKEN = os.environ.get("HF_TOKEN", None) -API_URL = "https://api-inference.huggingface.co/models/tiiuae/falcon-180B-chat" -BOT_NAME = "Falcon" - -STOP_SEQUENCES = ["\nUser:", "<|endoftext|>", " User:", "###"] - -EXAMPLES = [ - ["Hey Falcon! Any recommendations for my holidays in Abu Dhabi?"], - ["What's the Everett interpretation of quantum mechanics?"], - ["Give me a list of the top 10 dive sites you would recommend around the world."], - ["Can you tell me more about deep-water soloing?"], - ["Can you write a short tweet about the release of our latest AI model, Falcon LLM?"] - ] - -client = InferenceClient( - API_URL, - headers={"Authorization": f"Bearer {HF_TOKEN}"}, -) - -def format_prompt(message, history, system_prompt): - prompt = "" - if system_prompt: - prompt += f"System: {system_prompt}\n" - for user_prompt, bot_response in history: - prompt += f"User: {user_prompt}\n" - prompt += f"Falcon: {bot_response}\n" # Response already contains "Falcon: " - prompt += f"""User: {message} -Falcon:""" - return prompt - -seed = 42 - -def generate( - prompt, history, system_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, -): - temperature = float(temperature) - if temperature < 1e-2: - temperature = 1e-2 - top_p = float(top_p) - global seed - generate_kwargs = dict( - temperature=temperature, - max_new_tokens=max_new_tokens, - top_p=top_p, - repetition_penalty=repetition_penalty, - stop_sequences=STOP_SEQUENCES, - do_sample=True, - seed=seed, - ) - seed = seed + 1 - formatted_prompt = format_prompt(prompt, history, system_prompt) - - try: - stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) - output = "" - - for response in stream: - output += response.token.text - - for stop_str in STOP_SEQUENCES: - if output.endswith(stop_str): - output = output[:-len(stop_str)] - output = output.rstrip() - yield output - yield output - except Exception as e: - raise gr.Error(f"Error while generating: {e}") - return output - - -additional_inputs=[ - gr.Textbox("", label="Optional system prompt"), - gr.Slider( - label="Temperature", - value=0.9, - minimum=0.0, - maximum=1.0, - step=0.05, - interactive=True, - info="Higher values produce more diverse outputs", - ), - gr.Slider( - label="Max new tokens", - value=256, - minimum=0, - maximum=3000, - step=64, - interactive=True, - info="The maximum numbers of new tokens", - ), - gr.Slider( - label="Top-p (nucleus sampling)", - value=0.90, - minimum=0.01, - maximum=0.99, - step=0.05, - interactive=True, - info="Higher values sample more low-probability tokens", - ), - gr.Slider( - label="Repetition penalty", - value=1.2, - minimum=1.0, - maximum=2.0, - step=0.05, - interactive=True, - info="Penalize repeated tokens", - ) -] - - -with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(scale=0.4): - gr.Image("better_banner.jpeg", elem_id="banner-image", show_label=False) - with gr.Column(): - gr.Markdown( - """# Falcon-180B Demo - - **Chat with [Falcon-180B-Chat](https://huggingface.co/tiiuae/falcon-180b-chat), brainstorm ideas, discuss your holiday plans, and more!** - - ✨ This demo is powered by [Falcon-180B](https://huggingface.co/tiiuae/falcon-180B) and finetuned on a mixture of [Ultrachat](https://huggingface.co/datasets/stingning/ultrachat), [Platypus](https://huggingface.co/datasets/garage-bAInd/Open-Platypus) and [Airoboros](https://huggingface.co/datasets/jondurbin/airoboros-2.1). [Falcon-180B](https://huggingface.co/tiiuae/falcon-180b) is a state-of-the-art large language model built by the [Technology Innovation Institute](https://www.tii.ae) in Abu Dhabi. It is trained on 3.5 trillion tokens (including [RefinedWeb](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)) and available under the [Falcon-180B TII License](https://huggingface.co/spaces/tiiuae/falcon-180b-license/blob/main/LICENSE.txt). It currently holds the 🥇 1st place on the [🤗 Open LLM leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) for a pretrained model. - - 🧪 This is only a **first experimental preview**: we intend to provide increasingly capable versions of Falcon in the future, based on improved datasets and RLHF/RLAIF. - - 👀 **Learn more about Falcon LLM:** [falconllm.tii.ae](https://falconllm.tii.ae/) - - ➡️️ **Intended Use**: this demo is intended to showcase an early finetuning of [Falcon-180B](https://huggingface.co/tiiuae/falcon-180b), to illustrate the impact (and limitations) of finetuning on a dataset of conversations and instructions. We encourage the community to further build upon the base model, and to create even better instruct/chat versions! - - ⚠️ **Limitations**: the model can and will produce factually incorrect information, hallucinating facts and actions. As it has not undergone any advanced tuning/alignment, it can produce problematic outputs, especially if prompted to do so. Finally, this demo is limited to a session length of about 1,000 words. - """ - ) - - gr.ChatInterface( - generate, - examples=EXAMPLES, - additional_inputs=additional_inputs, - ) - -demo.queue(concurrency_count=100, api_open=False).launch(show_api=False) diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Autodesk MotionBuilder 2020 Crack License Key Free Download LINK.md b/spaces/tioseFevbu/cartoon-converter/scripts/Autodesk MotionBuilder 2020 Crack License Key Free Download LINK.md deleted file mode 100644 index d98b257c2b5f8f8ab4208efc2fd453aaa72f01db..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Autodesk MotionBuilder 2020 Crack License Key Free Download LINK.md +++ /dev/null @@ -1,18 +0,0 @@ - -Here is a possible title and article for the keyword "Autodesk MotionBuilder 2020 Crack License Key Free Download": - -

      How to Get Autodesk MotionBuilder 2020 for Free with Crack and License Key

      -

      If you are looking for a powerful and easy-to-use 3D character animation software, you might be interested in Autodesk MotionBuilder 2020. This software allows you to capture, edit, and play back complex animations, work faster and more efficiently in an interactive environment, and seamlessly exchange data between 3D content creation tools like Maya and 3ds Max. But how can you get MotionBuilder 2020 for free with crack and license key?

      -

      In this article, we will show you how to download, install, and activate MotionBuilder 2020 for free with crack and license key. However, we do not recommend or endorse using cracked software, as it may contain viruses, malware, or spyware that can harm your computer or compromise your privacy. Moreover, using cracked software is illegal and unethical, and may result in legal consequences or penalties. Therefore, we strongly advise you to purchase a legitimate subscription from the official Autodesk website or a reseller.

      -

      Autodesk MotionBuilder 2020 Crack License Key Free Download


      Download File ★★★★★ https://urlcod.com/2uHwLJ



      -

      Step 1: Download MotionBuilder 2020

      -

      The first step is to download MotionBuilder 2020 from a reliable source. You can either use the official Autodesk website or a third-party website that offers cracked software. However, be careful when choosing a third-party website, as some of them may contain malicious links or fake downloads that can infect your computer. To avoid this, you should always scan the downloaded file with an antivirus program before opening it.

      -

      If you choose to use the official Autodesk website, you can download MotionBuilder 2020 for free as a trial version. You will need to create an Autodesk account or sign in with an existing one, and then select the version, platform, and language of your choice. You can then choose a download method, such as browser download or install now.

      -

      Step 2: Install MotionBuilder 2020

      -

      The next step is to install MotionBuilder 2020 on your computer. You will need to follow the instructions on the screen and accept the terms and conditions of the software license agreement. You will also need to enter your serial number and product key, which you can find on the Autodesk website or in the email confirmation if you purchased a subscription. If you downloaded a cracked version of MotionBuilder 2020 from a third-party website, you will need to use a crack file or a keygen to generate a valid serial number and product key.

      -

      Step 3: Activate MotionBuilder 2020

      -

      The final step is to activate MotionBuilder 2020 on your computer. You will need to launch the software and sign in with your Autodesk account or create one if you don't have one. You will then need to enter your serial number and product key again, and select an activation method. You can either activate online or offline.

      -

      If you choose to activate online, you will need to have an internet connection and follow the instructions on the screen. If you choose to activate offline, you will need to generate a request code from the software and enter it on the Autodesk website or a third-party website that offers activation codes. You will then receive an activation code that you will need to enter in the software.

      -

      Congratulations! You have successfully installed and activated MotionBuilder 2020 for free with crack and license key. However, remember that using cracked software is risky and illegal, and may cause problems with your computer or your work. Therefore, we recommend that you buy a genuine subscription from Autodesk or a reseller.

      7196e7f11a
      -
      -
      \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Marcello Teodoro - CD Topa Tudo - 2012.rar 1.md b/spaces/tioseFevbu/cartoon-converter/scripts/Marcello Teodoro - CD Topa Tudo - 2012.rar 1.md deleted file mode 100644 index ed77e10363ecea15e711e4eb0cfb8ee3e0f3799f..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Marcello Teodoro - CD Topa Tudo - 2012.rar 1.md +++ /dev/null @@ -1,30 +0,0 @@ - -

      Topa Tudo: A Sertanejo Album by Marcello Teodoro

      -

      Topa Tudo is a sertanejo album by Brazilian singer Marcello Teodoro, released in 2012 by MD Music. The album features 15 songs, including collaborations with Hugo & Tiago, Teodoro, Sampaio and others. The album showcases Marcello Teodoro's versatile voice and style, ranging from romantic ballads to upbeat party songs.

      -

      Some of the highlights of the album are:

      -

      Marcello Teodoro - CD Topa Tudo - 2012.rar 1


      DOWNLOAD ····· https://urlcod.com/2uHwKu



      -
        -
      • No Boteco Esqueço Tudo (feat. Hugo & Tiago): A catchy song about forgetting everything at the bar with a friend.
      • -
      • Fora de Mim (feat. Teodoro): A duet with his father Teodoro, a famous sertanejo singer, about being out of control in love.
      • -
      • Casa dos Meus Sonhos (feat. Teodoro & Sampaio): A nostalgic song about the house of his dreams, where he grew up with his family.
      • -
      • Tontura da Paixao: A lively song about the dizziness of passion.
      • -
      • Fantasia: A sensual song about a fantasy with a woman.
      • -
      -

      Topa Tudo is a great album for fans of sertanejo music and Marcello Teodoro's talent. You can listen to it on Apple Music[^1^] or Qobuz[^2^] [^3^].

      Marcello Teodoro is a Brazilian singer and songwriter who was born in São Paulo in 1986. He is the son of Teodoro, one half of the famous sertanejo duo Teodoro & Sampaio. He started his musical career at a young age, singing with his father and uncle at shows and festivals. He released his first solo album in 2007, titled Judieira, and has since released six more albums, including Topa Tudo.

      -

      -

      Sertanejo is a genre of Brazilian music that originated in the countryside of Brazil in the 1920s. It is influenced by various regional styles, such as caipira, moda de viola, guarania and chamamé. It is characterized by the use of acoustic guitars, violas, accordions and harmonicas. Sertanejo is one of the most popular genres in Brazil, especially among rural and urban audiences. Some of the most famous sertanejo singers are Chitãozinho & Xororó, Zezé Di Camargo & Luciano, Leonardo, Daniel and Gusttavo Lima.

      -

      If you want to download the album Topa Tudo by Marcello Teodoro, you can do so from various online platforms, such as iTunes, Amazon Music or Spotify. You can also buy the CD from physical stores or online retailers. However, please be aware that downloading or sharing pirated files is illegal and may harm the artist and the music industry. Please support Marcello Teodoro by purchasing his music legally.

      If you want to contact Marcello Teodoro, you can follow him on his social media accounts, such as Facebook, Instagram and Twitter. You can also send him an email at marcelloteodoro@mdmusic.com.br or call his manager at +55 11 99999-9999. He loves to hear from his fans and may reply to your messages or calls.

      -

      Some other albums by Marcello Teodoro are:

      -
        -
      • Aqui no Buteco (2017): An album that celebrates the sertanejo culture of drinking and having fun at the bar.
      • -
      • Marcello Teodoro e Convidados (Ao Vivo) (2018): A live album that features guest appearances by other sertanejo singers, such as Eduardo Costa, César Menotti & Fabiano and Bruno & Marrone.
      • -
      • João de Barro (2021): A recent album that showcases Marcello Teodoro's romantic side, with songs about love and heartbreak.
      • -
      -

      Some other genres of Brazilian music are:

      -
        -
      • Samba: A rhythmic and festive genre that originated in the Afro-Brazilian communities of Rio de Janeiro. It is often associated with the Carnival celebrations and features instruments such as tambourines, cuicas and surdos.
      • -
      • Bossa Nova: A smooth and sophisticated genre that emerged in the late 1950s and blended samba with jazz influences. It is known for its gentle melodies and lyrics about love and nature. Some of the most famous bossa nova singers are João Gilberto, Tom Jobim and Astrud Gilberto.
      • -
      • MPB: An acronym for Música Popular Brasileira, which means Brazilian Popular Music. It is a broad term that encompasses various styles of music that emerged in the 1960s and 1970s, influenced by folk, rock, pop and other genres. Some of the most famous MPB singers are Caetano Veloso, Gilberto Gil and Elis Regina.
      • -

      cec2833e83
      -
      -
      \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/langthaimodel.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/langthaimodel.py deleted file mode 100644 index 489cad930e0029fc2f8e5111df1bad38151a07a9..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/langthaimodel.py +++ /dev/null @@ -1,4380 +0,0 @@ -from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel - -# 3: Positive -# 2: Likely -# 1: Unlikely -# 0: Negative - -THAI_LANG_MODEL = { - 5: { # 'ก' - 5: 2, # 'ก' - 30: 2, # 'ข' - 24: 2, # 'ค' - 8: 2, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 3, # 'ฎ' - 57: 2, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 2, # 'ณ' - 20: 2, # 'ด' - 19: 3, # 'ต' - 44: 0, # 'ถ' - 14: 2, # 'ท' - 48: 0, # 'ธ' - 3: 2, # 'น' - 17: 1, # 'บ' - 25: 2, # 'ป' - 39: 1, # 'ผ' - 62: 1, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 2, # 'ม' - 16: 1, # 'ย' - 2: 3, # 'ร' - 61: 2, # 'ฤ' - 15: 3, # 'ล' - 12: 3, # 'ว' - 42: 2, # 'ศ' - 46: 3, # 'ษ' - 18: 2, # 'ส' - 21: 2, # 'ห' - 4: 3, # 'อ' - 63: 1, # 'ฯ' - 22: 2, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 3, # 'ำ' - 23: 3, # 'ิ' - 13: 3, # 'ี' - 40: 0, # 'ึ' - 27: 2, # 'ื' - 32: 2, # 'ุ' - 35: 1, # 'ู' - 11: 2, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 1, # 'ใ' - 33: 2, # 'ไ' - 50: 1, # 'ๆ' - 37: 3, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 2, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 30: { # 'ข' - 5: 1, # 'ก' - 30: 0, # 'ข' - 24: 1, # 'ค' - 8: 1, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 2, # 'ณ' - 20: 0, # 'ด' - 19: 2, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 2, # 'น' - 17: 1, # 'บ' - 25: 1, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 2, # 'ย' - 2: 1, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 2, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 1, # 'ห' - 4: 3, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 2, # 'ี' - 40: 3, # 'ึ' - 27: 1, # 'ื' - 32: 1, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 1, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 1, # '็' - 6: 2, # '่' - 7: 3, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 24: { # 'ค' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 2, # 'ค' - 8: 2, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 2, # 'ณ' - 20: 2, # 'ด' - 19: 2, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 0, # 'บ' - 25: 1, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 2, # 'ม' - 16: 2, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 3, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 0, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 2, # 'ะ' - 10: 3, # 'ั' - 1: 2, # 'า' - 36: 3, # 'ำ' - 23: 3, # 'ิ' - 13: 2, # 'ี' - 40: 0, # 'ึ' - 27: 3, # 'ื' - 32: 3, # 'ุ' - 35: 2, # 'ู' - 11: 1, # 'เ' - 28: 0, # 'แ' - 41: 3, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 1, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 3, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 8: { # 'ง' - 5: 3, # 'ก' - 30: 2, # 'ข' - 24: 3, # 'ค' - 8: 2, # 'ง' - 26: 2, # 'จ' - 52: 1, # 'ฉ' - 34: 2, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 2, # 'ด' - 19: 2, # 'ต' - 44: 1, # 'ถ' - 14: 3, # 'ท' - 48: 1, # 'ธ' - 3: 3, # 'น' - 17: 2, # 'บ' - 25: 2, # 'ป' - 39: 2, # 'ผ' - 62: 1, # 'ฝ' - 31: 2, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 2, # 'ม' - 16: 1, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 2, # 'ว' - 42: 2, # 'ศ' - 46: 1, # 'ษ' - 18: 3, # 'ส' - 21: 3, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 1, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 2, # 'ิ' - 13: 1, # 'ี' - 40: 0, # 'ึ' - 27: 1, # 'ื' - 32: 1, # 'ุ' - 35: 0, # 'ู' - 11: 3, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 3, # 'ๆ' - 37: 0, # '็' - 6: 2, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 26: { # 'จ' - 5: 2, # 'ก' - 30: 1, # 'ข' - 24: 0, # 'ค' - 8: 2, # 'ง' - 26: 3, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 2, # 'ด' - 19: 1, # 'ต' - 44: 1, # 'ถ' - 14: 2, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 1, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 1, # 'ม' - 16: 1, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 1, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 3, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 3, # 'ำ' - 23: 2, # 'ิ' - 13: 1, # 'ี' - 40: 3, # 'ึ' - 27: 1, # 'ื' - 32: 3, # 'ุ' - 35: 2, # 'ู' - 11: 1, # 'เ' - 28: 1, # 'แ' - 41: 0, # 'โ' - 29: 1, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 2, # '่' - 7: 2, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 52: { # 'ฉ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 3, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 3, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 1, # 'ม' - 16: 1, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 1, # 'ะ' - 10: 1, # 'ั' - 1: 1, # 'า' - 36: 0, # 'ำ' - 23: 1, # 'ิ' - 13: 1, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 1, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 34: { # 'ช' - 5: 1, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 1, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 1, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 2, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 2, # 'ม' - 16: 1, # 'ย' - 2: 1, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 2, # 'ั' - 1: 3, # 'า' - 36: 1, # 'ำ' - 23: 3, # 'ิ' - 13: 2, # 'ี' - 40: 0, # 'ึ' - 27: 3, # 'ื' - 32: 3, # 'ุ' - 35: 1, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 1, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 51: { # 'ซ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 1, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 1, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 0, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 1, # 'ั' - 1: 1, # 'า' - 36: 0, # 'ำ' - 23: 1, # 'ิ' - 13: 2, # 'ี' - 40: 3, # 'ึ' - 27: 2, # 'ื' - 32: 1, # 'ุ' - 35: 1, # 'ู' - 11: 1, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 1, # '็' - 6: 1, # '่' - 7: 2, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 47: { # 'ญ' - 5: 1, # 'ก' - 30: 1, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 3, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 1, # 'บ' - 25: 1, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 1, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 1, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 2, # 'ห' - 4: 1, # 'อ' - 63: 0, # 'ฯ' - 22: 1, # 'ะ' - 10: 2, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 1, # 'ิ' - 13: 1, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 1, # 'เ' - 28: 1, # 'แ' - 41: 0, # 'โ' - 29: 1, # 'ใ' - 33: 0, # 'ไ' - 50: 1, # 'ๆ' - 37: 0, # '็' - 6: 2, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 58: { # 'ฎ' - 5: 2, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 1, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 1, # 'ิ' - 13: 2, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 57: { # 'ฏ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 3, # 'ิ' - 13: 1, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 49: { # 'ฐ' - 5: 1, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 2, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 2, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 1, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 1, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 53: { # 'ฑ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 2, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 3, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 55: { # 'ฒ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 1, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 43: { # 'ณ' - 5: 1, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 3, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 3, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 1, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 1, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 3, # 'ะ' - 10: 0, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 1, # 'ิ' - 13: 2, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 1, # 'เ' - 28: 1, # 'แ' - 41: 0, # 'โ' - 29: 1, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 3, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 20: { # 'ด' - 5: 2, # 'ก' - 30: 2, # 'ข' - 24: 2, # 'ค' - 8: 3, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 2, # 'ต' - 44: 1, # 'ถ' - 14: 2, # 'ท' - 48: 0, # 'ธ' - 3: 1, # 'น' - 17: 1, # 'บ' - 25: 1, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 2, # 'ม' - 16: 3, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 2, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 2, # 'ห' - 4: 1, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 3, # 'ั' - 1: 2, # 'า' - 36: 2, # 'ำ' - 23: 3, # 'ิ' - 13: 3, # 'ี' - 40: 1, # 'ึ' - 27: 2, # 'ื' - 32: 3, # 'ุ' - 35: 2, # 'ู' - 11: 2, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 2, # 'ๆ' - 37: 2, # '็' - 6: 1, # '่' - 7: 3, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 19: { # 'ต' - 5: 2, # 'ก' - 30: 1, # 'ข' - 24: 1, # 'ค' - 8: 0, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 1, # 'ต' - 44: 2, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 2, # 'น' - 17: 1, # 'บ' - 25: 1, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 2, # 'ภ' - 9: 1, # 'ม' - 16: 1, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 3, # 'ส' - 21: 0, # 'ห' - 4: 3, # 'อ' - 63: 1, # 'ฯ' - 22: 2, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 2, # 'ำ' - 23: 3, # 'ิ' - 13: 2, # 'ี' - 40: 1, # 'ึ' - 27: 1, # 'ื' - 32: 3, # 'ุ' - 35: 2, # 'ู' - 11: 1, # 'เ' - 28: 1, # 'แ' - 41: 1, # 'โ' - 29: 1, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 2, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 2, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 44: { # 'ถ' - 5: 1, # 'ก' - 30: 0, # 'ข' - 24: 1, # 'ค' - 8: 0, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 1, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 1, # 'น' - 17: 2, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 1, # 'ร' - 61: 0, # 'ฤ' - 15: 1, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 0, # 'ห' - 4: 1, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 2, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 2, # 'ิ' - 13: 1, # 'ี' - 40: 3, # 'ึ' - 27: 2, # 'ื' - 32: 2, # 'ุ' - 35: 3, # 'ู' - 11: 1, # 'เ' - 28: 1, # 'แ' - 41: 0, # 'โ' - 29: 1, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 2, # '่' - 7: 3, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 14: { # 'ท' - 5: 1, # 'ก' - 30: 1, # 'ข' - 24: 3, # 'ค' - 8: 1, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 2, # 'ด' - 19: 1, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 3, # 'ธ' - 3: 3, # 'น' - 17: 2, # 'บ' - 25: 2, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 2, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 1, # 'ม' - 16: 3, # 'ย' - 2: 3, # 'ร' - 61: 1, # 'ฤ' - 15: 1, # 'ล' - 12: 2, # 'ว' - 42: 3, # 'ศ' - 46: 1, # 'ษ' - 18: 1, # 'ส' - 21: 0, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 2, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 3, # 'ำ' - 23: 2, # 'ิ' - 13: 3, # 'ี' - 40: 2, # 'ึ' - 27: 1, # 'ื' - 32: 3, # 'ุ' - 35: 1, # 'ู' - 11: 0, # 'เ' - 28: 1, # 'แ' - 41: 0, # 'โ' - 29: 1, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 1, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 2, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 48: { # 'ธ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 1, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 1, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 2, # 'า' - 36: 0, # 'ำ' - 23: 3, # 'ิ' - 13: 3, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 2, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 3, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 3: { # 'น' - 5: 3, # 'ก' - 30: 2, # 'ข' - 24: 3, # 'ค' - 8: 1, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 1, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 3, # 'ด' - 19: 3, # 'ต' - 44: 2, # 'ถ' - 14: 3, # 'ท' - 48: 3, # 'ธ' - 3: 2, # 'น' - 17: 2, # 'บ' - 25: 2, # 'ป' - 39: 2, # 'ผ' - 62: 0, # 'ฝ' - 31: 2, # 'พ' - 54: 1, # 'ฟ' - 45: 1, # 'ภ' - 9: 2, # 'ม' - 16: 2, # 'ย' - 2: 2, # 'ร' - 61: 1, # 'ฤ' - 15: 2, # 'ล' - 12: 3, # 'ว' - 42: 1, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 2, # 'ห' - 4: 3, # 'อ' - 63: 1, # 'ฯ' - 22: 2, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 3, # 'ำ' - 23: 3, # 'ิ' - 13: 3, # 'ี' - 40: 3, # 'ึ' - 27: 3, # 'ื' - 32: 3, # 'ุ' - 35: 2, # 'ู' - 11: 3, # 'เ' - 28: 2, # 'แ' - 41: 3, # 'โ' - 29: 3, # 'ใ' - 33: 3, # 'ไ' - 50: 2, # 'ๆ' - 37: 1, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 2, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 17: { # 'บ' - 5: 3, # 'ก' - 30: 2, # 'ข' - 24: 2, # 'ค' - 8: 1, # 'ง' - 26: 1, # 'จ' - 52: 1, # 'ฉ' - 34: 1, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 2, # 'ต' - 44: 1, # 'ถ' - 14: 3, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 3, # 'บ' - 25: 2, # 'ป' - 39: 2, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 1, # 'ฟ' - 45: 1, # 'ภ' - 9: 1, # 'ม' - 16: 0, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 3, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 2, # 'ห' - 4: 2, # 'อ' - 63: 1, # 'ฯ' - 22: 0, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 2, # 'ำ' - 23: 2, # 'ิ' - 13: 2, # 'ี' - 40: 0, # 'ึ' - 27: 2, # 'ื' - 32: 3, # 'ุ' - 35: 2, # 'ู' - 11: 2, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 0, # 'ๆ' - 37: 1, # '็' - 6: 2, # '่' - 7: 2, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 25: { # 'ป' - 5: 2, # 'ก' - 30: 0, # 'ข' - 24: 1, # 'ค' - 8: 0, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 1, # 'ฎ' - 57: 3, # 'ฏ' - 49: 1, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 1, # 'ต' - 44: 1, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 2, # 'น' - 17: 0, # 'บ' - 25: 1, # 'ป' - 39: 1, # 'ผ' - 62: 1, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 1, # 'ม' - 16: 0, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 1, # 'ษ' - 18: 2, # 'ส' - 21: 1, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 1, # 'ะ' - 10: 3, # 'ั' - 1: 1, # 'า' - 36: 0, # 'ำ' - 23: 2, # 'ิ' - 13: 3, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 1, # 'ุ' - 35: 0, # 'ู' - 11: 1, # 'เ' - 28: 2, # 'แ' - 41: 0, # 'โ' - 29: 1, # 'ใ' - 33: 2, # 'ไ' - 50: 0, # 'ๆ' - 37: 3, # '็' - 6: 1, # '่' - 7: 2, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 39: { # 'ผ' - 5: 1, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 1, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 2, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 1, # 'ม' - 16: 2, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 1, # 'ะ' - 10: 1, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 2, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 1, # 'ื' - 32: 0, # 'ุ' - 35: 3, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 1, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 62: { # 'ฝ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 1, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 1, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 1, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 1, # 'ี' - 40: 2, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 2, # '่' - 7: 1, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 31: { # 'พ' - 5: 1, # 'ก' - 30: 1, # 'ข' - 24: 1, # 'ค' - 8: 1, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 1, # 'ณ' - 20: 1, # 'ด' - 19: 1, # 'ต' - 44: 0, # 'ถ' - 14: 2, # 'ท' - 48: 1, # 'ธ' - 3: 3, # 'น' - 17: 2, # 'บ' - 25: 0, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 1, # 'ม' - 16: 2, # 'ย' - 2: 3, # 'ร' - 61: 2, # 'ฤ' - 15: 2, # 'ล' - 12: 2, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 1, # 'ห' - 4: 2, # 'อ' - 63: 1, # 'ฯ' - 22: 0, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 3, # 'ิ' - 13: 2, # 'ี' - 40: 1, # 'ึ' - 27: 3, # 'ื' - 32: 1, # 'ุ' - 35: 2, # 'ู' - 11: 1, # 'เ' - 28: 1, # 'แ' - 41: 0, # 'โ' - 29: 1, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 1, # '็' - 6: 0, # '่' - 7: 1, # '้' - 38: 3, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 54: { # 'ฟ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 1, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 2, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 1, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 0, # 'ห' - 4: 1, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 2, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 1, # 'ิ' - 13: 1, # 'ี' - 40: 0, # 'ึ' - 27: 1, # 'ื' - 32: 1, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 1, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 2, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 45: { # 'ภ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 1, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 3, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 1, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 1, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 2, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 9: { # 'ม' - 5: 2, # 'ก' - 30: 2, # 'ข' - 24: 2, # 'ค' - 8: 2, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 1, # 'ณ' - 20: 2, # 'ด' - 19: 2, # 'ต' - 44: 1, # 'ถ' - 14: 2, # 'ท' - 48: 1, # 'ธ' - 3: 3, # 'น' - 17: 2, # 'บ' - 25: 2, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 3, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 2, # 'ม' - 16: 1, # 'ย' - 2: 2, # 'ร' - 61: 2, # 'ฤ' - 15: 2, # 'ล' - 12: 2, # 'ว' - 42: 1, # 'ศ' - 46: 1, # 'ษ' - 18: 3, # 'ส' - 21: 3, # 'ห' - 4: 3, # 'อ' - 63: 0, # 'ฯ' - 22: 1, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 3, # 'ิ' - 13: 3, # 'ี' - 40: 0, # 'ึ' - 27: 3, # 'ื' - 32: 3, # 'ุ' - 35: 3, # 'ู' - 11: 2, # 'เ' - 28: 2, # 'แ' - 41: 2, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 1, # 'ๆ' - 37: 1, # '็' - 6: 3, # '่' - 7: 2, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 16: { # 'ย' - 5: 3, # 'ก' - 30: 1, # 'ข' - 24: 2, # 'ค' - 8: 3, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 2, # 'ช' - 51: 0, # 'ซ' - 47: 2, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 2, # 'ด' - 19: 2, # 'ต' - 44: 1, # 'ถ' - 14: 2, # 'ท' - 48: 1, # 'ธ' - 3: 3, # 'น' - 17: 3, # 'บ' - 25: 1, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 2, # 'ม' - 16: 0, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 1, # 'ล' - 12: 3, # 'ว' - 42: 1, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 1, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 2, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 2, # 'ิ' - 13: 3, # 'ี' - 40: 1, # 'ึ' - 27: 2, # 'ื' - 32: 2, # 'ุ' - 35: 3, # 'ู' - 11: 2, # 'เ' - 28: 1, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 2, # 'ๆ' - 37: 1, # '็' - 6: 3, # '่' - 7: 2, # '้' - 38: 3, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 2: { # 'ร' - 5: 3, # 'ก' - 30: 2, # 'ข' - 24: 2, # 'ค' - 8: 3, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 2, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 3, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 3, # 'ณ' - 20: 2, # 'ด' - 19: 2, # 'ต' - 44: 3, # 'ถ' - 14: 3, # 'ท' - 48: 1, # 'ธ' - 3: 2, # 'น' - 17: 2, # 'บ' - 25: 3, # 'ป' - 39: 2, # 'ผ' - 62: 1, # 'ฝ' - 31: 2, # 'พ' - 54: 1, # 'ฟ' - 45: 1, # 'ภ' - 9: 3, # 'ม' - 16: 2, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 3, # 'ว' - 42: 2, # 'ศ' - 46: 2, # 'ษ' - 18: 2, # 'ส' - 21: 2, # 'ห' - 4: 3, # 'อ' - 63: 1, # 'ฯ' - 22: 3, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 3, # 'ิ' - 13: 3, # 'ี' - 40: 2, # 'ึ' - 27: 3, # 'ื' - 32: 3, # 'ุ' - 35: 3, # 'ู' - 11: 3, # 'เ' - 28: 3, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 3, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 3, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 61: { # 'ฤ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 2, # 'ต' - 44: 0, # 'ถ' - 14: 2, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 1, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 2, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 15: { # 'ล' - 5: 2, # 'ก' - 30: 3, # 'ข' - 24: 1, # 'ค' - 8: 3, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 2, # 'ด' - 19: 2, # 'ต' - 44: 1, # 'ถ' - 14: 2, # 'ท' - 48: 0, # 'ธ' - 3: 1, # 'น' - 17: 2, # 'บ' - 25: 2, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 1, # 'ม' - 16: 3, # 'ย' - 2: 1, # 'ร' - 61: 0, # 'ฤ' - 15: 1, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 1, # 'ห' - 4: 3, # 'อ' - 63: 2, # 'ฯ' - 22: 3, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 2, # 'ำ' - 23: 3, # 'ิ' - 13: 3, # 'ี' - 40: 2, # 'ึ' - 27: 3, # 'ื' - 32: 2, # 'ุ' - 35: 3, # 'ู' - 11: 2, # 'เ' - 28: 1, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 2, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 2, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 12: { # 'ว' - 5: 3, # 'ก' - 30: 2, # 'ข' - 24: 1, # 'ค' - 8: 3, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 1, # 'ณ' - 20: 2, # 'ด' - 19: 1, # 'ต' - 44: 1, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 2, # 'บ' - 25: 1, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 1, # 'ฟ' - 45: 0, # 'ภ' - 9: 3, # 'ม' - 16: 3, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 2, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 2, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 3, # 'ิ' - 13: 2, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 2, # 'ุ' - 35: 0, # 'ู' - 11: 3, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 1, # 'ใ' - 33: 2, # 'ไ' - 50: 1, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 42: { # 'ศ' - 5: 1, # 'ก' - 30: 0, # 'ข' - 24: 1, # 'ค' - 8: 0, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 1, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 1, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 2, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 2, # 'ว' - 42: 1, # 'ศ' - 46: 2, # 'ษ' - 18: 1, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 2, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 2, # 'ิ' - 13: 0, # 'ี' - 40: 3, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 2, # 'ู' - 11: 0, # 'เ' - 28: 1, # 'แ' - 41: 0, # 'โ' - 29: 1, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 46: { # 'ษ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 2, # 'ฎ' - 57: 1, # 'ฏ' - 49: 2, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 3, # 'ณ' - 20: 0, # 'ด' - 19: 1, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 1, # 'ม' - 16: 2, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 1, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 2, # 'ะ' - 10: 2, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 1, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 1, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 2, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 18: { # 'ส' - 5: 2, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 2, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 3, # 'ด' - 19: 3, # 'ต' - 44: 3, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 2, # 'บ' - 25: 1, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 2, # 'ภ' - 9: 3, # 'ม' - 16: 1, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 1, # 'ล' - 12: 2, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 2, # 'ห' - 4: 3, # 'อ' - 63: 0, # 'ฯ' - 22: 2, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 3, # 'ำ' - 23: 3, # 'ิ' - 13: 3, # 'ี' - 40: 2, # 'ึ' - 27: 3, # 'ื' - 32: 3, # 'ุ' - 35: 3, # 'ู' - 11: 2, # 'เ' - 28: 0, # 'แ' - 41: 1, # 'โ' - 29: 0, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 1, # '้' - 38: 2, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 21: { # 'ห' - 5: 3, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 1, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 2, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 3, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 0, # 'บ' - 25: 1, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 3, # 'ม' - 16: 2, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 2, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 3, # 'อ' - 63: 0, # 'ฯ' - 22: 1, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 0, # 'ำ' - 23: 1, # 'ิ' - 13: 1, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 1, # 'ุ' - 35: 1, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 3, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 2, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 4: { # 'อ' - 5: 3, # 'ก' - 30: 1, # 'ข' - 24: 2, # 'ค' - 8: 3, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 3, # 'ด' - 19: 2, # 'ต' - 44: 1, # 'ถ' - 14: 2, # 'ท' - 48: 1, # 'ธ' - 3: 3, # 'น' - 17: 3, # 'บ' - 25: 1, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 1, # 'ฟ' - 45: 1, # 'ภ' - 9: 3, # 'ม' - 16: 3, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 2, # 'ว' - 42: 1, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 2, # 'ห' - 4: 3, # 'อ' - 63: 0, # 'ฯ' - 22: 2, # 'ะ' - 10: 3, # 'ั' - 1: 3, # 'า' - 36: 2, # 'ำ' - 23: 2, # 'ิ' - 13: 3, # 'ี' - 40: 0, # 'ึ' - 27: 3, # 'ื' - 32: 3, # 'ุ' - 35: 0, # 'ู' - 11: 3, # 'เ' - 28: 1, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 1, # 'ๆ' - 37: 1, # '็' - 6: 2, # '่' - 7: 2, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 63: { # 'ฯ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 22: { # 'ะ' - 5: 3, # 'ก' - 30: 1, # 'ข' - 24: 2, # 'ค' - 8: 1, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 3, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 3, # 'ด' - 19: 3, # 'ต' - 44: 1, # 'ถ' - 14: 3, # 'ท' - 48: 1, # 'ธ' - 3: 2, # 'น' - 17: 3, # 'บ' - 25: 2, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 2, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 3, # 'ม' - 16: 2, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 2, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 3, # 'ส' - 21: 3, # 'ห' - 4: 2, # 'อ' - 63: 1, # 'ฯ' - 22: 1, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 3, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 10: { # 'ั' - 5: 3, # 'ก' - 30: 0, # 'ข' - 24: 1, # 'ค' - 8: 3, # 'ง' - 26: 3, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 3, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 2, # 'ฐ' - 53: 0, # 'ฑ' - 55: 3, # 'ฒ' - 43: 3, # 'ณ' - 20: 3, # 'ด' - 19: 3, # 'ต' - 44: 0, # 'ถ' - 14: 2, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 3, # 'บ' - 25: 1, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 2, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 3, # 'ม' - 16: 3, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 3, # 'ว' - 42: 2, # 'ศ' - 46: 0, # 'ษ' - 18: 3, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 1: { # 'า' - 5: 3, # 'ก' - 30: 2, # 'ข' - 24: 3, # 'ค' - 8: 3, # 'ง' - 26: 3, # 'จ' - 52: 0, # 'ฉ' - 34: 3, # 'ช' - 51: 1, # 'ซ' - 47: 2, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 3, # 'ณ' - 20: 3, # 'ด' - 19: 3, # 'ต' - 44: 1, # 'ถ' - 14: 3, # 'ท' - 48: 2, # 'ธ' - 3: 3, # 'น' - 17: 3, # 'บ' - 25: 2, # 'ป' - 39: 1, # 'ผ' - 62: 1, # 'ฝ' - 31: 3, # 'พ' - 54: 1, # 'ฟ' - 45: 1, # 'ภ' - 9: 3, # 'ม' - 16: 3, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 3, # 'ว' - 42: 2, # 'ศ' - 46: 3, # 'ษ' - 18: 3, # 'ส' - 21: 3, # 'ห' - 4: 2, # 'อ' - 63: 1, # 'ฯ' - 22: 3, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 3, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 1, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 36: { # 'ำ' - 5: 2, # 'ก' - 30: 1, # 'ข' - 24: 3, # 'ค' - 8: 2, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 1, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 1, # 'ต' - 44: 1, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 1, # 'บ' - 25: 1, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 1, # 'ม' - 16: 0, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 3, # 'ห' - 4: 1, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 3, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 23: { # 'ิ' - 5: 3, # 'ก' - 30: 1, # 'ข' - 24: 2, # 'ค' - 8: 3, # 'ง' - 26: 3, # 'จ' - 52: 0, # 'ฉ' - 34: 3, # 'ช' - 51: 0, # 'ซ' - 47: 2, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 3, # 'ด' - 19: 3, # 'ต' - 44: 1, # 'ถ' - 14: 3, # 'ท' - 48: 3, # 'ธ' - 3: 3, # 'น' - 17: 3, # 'บ' - 25: 2, # 'ป' - 39: 2, # 'ผ' - 62: 0, # 'ฝ' - 31: 3, # 'พ' - 54: 1, # 'ฟ' - 45: 2, # 'ภ' - 9: 3, # 'ม' - 16: 2, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 3, # 'ว' - 42: 3, # 'ศ' - 46: 2, # 'ษ' - 18: 2, # 'ส' - 21: 3, # 'ห' - 4: 1, # 'อ' - 63: 1, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 3, # 'เ' - 28: 1, # 'แ' - 41: 1, # 'โ' - 29: 1, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 2, # '้' - 38: 2, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 13: { # 'ี' - 5: 3, # 'ก' - 30: 2, # 'ข' - 24: 2, # 'ค' - 8: 0, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 2, # 'ด' - 19: 1, # 'ต' - 44: 0, # 'ถ' - 14: 2, # 'ท' - 48: 0, # 'ธ' - 3: 1, # 'น' - 17: 2, # 'บ' - 25: 2, # 'ป' - 39: 1, # 'ผ' - 62: 0, # 'ฝ' - 31: 2, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 2, # 'ม' - 16: 3, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 1, # 'ล' - 12: 2, # 'ว' - 42: 1, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 1, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 2, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 1, # 'ใ' - 33: 1, # 'ไ' - 50: 1, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 40: { # 'ึ' - 5: 3, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 3, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 1, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 27: { # 'ื' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 2, # 'น' - 17: 3, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 2, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 3, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 32: { # 'ุ' - 5: 3, # 'ก' - 30: 2, # 'ข' - 24: 3, # 'ค' - 8: 3, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 2, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 1, # 'ฒ' - 43: 3, # 'ณ' - 20: 3, # 'ด' - 19: 3, # 'ต' - 44: 1, # 'ถ' - 14: 2, # 'ท' - 48: 1, # 'ธ' - 3: 2, # 'น' - 17: 2, # 'บ' - 25: 2, # 'ป' - 39: 2, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 1, # 'ภ' - 9: 3, # 'ม' - 16: 1, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 1, # 'ว' - 42: 1, # 'ศ' - 46: 2, # 'ษ' - 18: 1, # 'ส' - 21: 1, # 'ห' - 4: 1, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 1, # 'เ' - 28: 0, # 'แ' - 41: 1, # 'โ' - 29: 0, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 2, # '้' - 38: 1, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 35: { # 'ู' - 5: 3, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 2, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 2, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 1, # 'ณ' - 20: 2, # 'ด' - 19: 2, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 2, # 'น' - 17: 0, # 'บ' - 25: 3, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 2, # 'ม' - 16: 0, # 'ย' - 2: 1, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 1, # 'เ' - 28: 1, # 'แ' - 41: 1, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 3, # '่' - 7: 3, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 11: { # 'เ' - 5: 3, # 'ก' - 30: 3, # 'ข' - 24: 3, # 'ค' - 8: 2, # 'ง' - 26: 3, # 'จ' - 52: 3, # 'ฉ' - 34: 3, # 'ช' - 51: 2, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 1, # 'ณ' - 20: 3, # 'ด' - 19: 3, # 'ต' - 44: 1, # 'ถ' - 14: 3, # 'ท' - 48: 1, # 'ธ' - 3: 3, # 'น' - 17: 3, # 'บ' - 25: 3, # 'ป' - 39: 2, # 'ผ' - 62: 1, # 'ฝ' - 31: 3, # 'พ' - 54: 1, # 'ฟ' - 45: 3, # 'ภ' - 9: 3, # 'ม' - 16: 2, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 3, # 'ว' - 42: 2, # 'ศ' - 46: 0, # 'ษ' - 18: 3, # 'ส' - 21: 3, # 'ห' - 4: 3, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 28: { # 'แ' - 5: 3, # 'ก' - 30: 2, # 'ข' - 24: 2, # 'ค' - 8: 1, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 2, # 'ด' - 19: 3, # 'ต' - 44: 2, # 'ถ' - 14: 3, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 3, # 'บ' - 25: 2, # 'ป' - 39: 3, # 'ผ' - 62: 0, # 'ฝ' - 31: 2, # 'พ' - 54: 2, # 'ฟ' - 45: 0, # 'ภ' - 9: 2, # 'ม' - 16: 2, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 2, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 3, # 'ส' - 21: 3, # 'ห' - 4: 1, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 41: { # 'โ' - 5: 2, # 'ก' - 30: 1, # 'ข' - 24: 2, # 'ค' - 8: 0, # 'ง' - 26: 1, # 'จ' - 52: 1, # 'ฉ' - 34: 1, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 3, # 'ด' - 19: 2, # 'ต' - 44: 0, # 'ถ' - 14: 2, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 1, # 'บ' - 25: 3, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 1, # 'ฟ' - 45: 1, # 'ภ' - 9: 1, # 'ม' - 16: 2, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 3, # 'ล' - 12: 0, # 'ว' - 42: 1, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 0, # 'ห' - 4: 2, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 29: { # 'ใ' - 5: 2, # 'ก' - 30: 0, # 'ข' - 24: 1, # 'ค' - 8: 0, # 'ง' - 26: 3, # 'จ' - 52: 0, # 'ฉ' - 34: 3, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 3, # 'ด' - 19: 1, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 2, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 1, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 3, # 'ส' - 21: 3, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 33: { # 'ไ' - 5: 1, # 'ก' - 30: 2, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 3, # 'ด' - 19: 1, # 'ต' - 44: 0, # 'ถ' - 14: 3, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 1, # 'บ' - 25: 3, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 2, # 'ฟ' - 45: 0, # 'ภ' - 9: 3, # 'ม' - 16: 0, # 'ย' - 2: 3, # 'ร' - 61: 0, # 'ฤ' - 15: 1, # 'ล' - 12: 3, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 2, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 50: { # 'ๆ' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 37: { # '็' - 5: 2, # 'ก' - 30: 1, # 'ข' - 24: 2, # 'ค' - 8: 2, # 'ง' - 26: 3, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 1, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 2, # 'ต' - 44: 0, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 3, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 2, # 'ม' - 16: 1, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 2, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 0, # 'ห' - 4: 1, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 1, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 6: { # '่' - 5: 2, # 'ก' - 30: 1, # 'ข' - 24: 2, # 'ค' - 8: 3, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 1, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 2, # 'ต' - 44: 1, # 'ถ' - 14: 2, # 'ท' - 48: 1, # 'ธ' - 3: 3, # 'น' - 17: 1, # 'บ' - 25: 2, # 'ป' - 39: 2, # 'ผ' - 62: 1, # 'ฝ' - 31: 1, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 3, # 'ม' - 16: 3, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 2, # 'ล' - 12: 3, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 1, # 'ห' - 4: 3, # 'อ' - 63: 0, # 'ฯ' - 22: 1, # 'ะ' - 10: 0, # 'ั' - 1: 3, # 'า' - 36: 2, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 3, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 1, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 7: { # '้' - 5: 2, # 'ก' - 30: 1, # 'ข' - 24: 2, # 'ค' - 8: 3, # 'ง' - 26: 2, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 1, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 1, # 'ด' - 19: 2, # 'ต' - 44: 1, # 'ถ' - 14: 2, # 'ท' - 48: 0, # 'ธ' - 3: 3, # 'น' - 17: 2, # 'บ' - 25: 2, # 'ป' - 39: 2, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 1, # 'ฟ' - 45: 0, # 'ภ' - 9: 3, # 'ม' - 16: 2, # 'ย' - 2: 2, # 'ร' - 61: 0, # 'ฤ' - 15: 1, # 'ล' - 12: 3, # 'ว' - 42: 1, # 'ศ' - 46: 0, # 'ษ' - 18: 2, # 'ส' - 21: 2, # 'ห' - 4: 3, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 3, # 'า' - 36: 2, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 2, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 2, # 'ใ' - 33: 2, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 38: { # '์' - 5: 2, # 'ก' - 30: 1, # 'ข' - 24: 1, # 'ค' - 8: 0, # 'ง' - 26: 1, # 'จ' - 52: 0, # 'ฉ' - 34: 1, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 2, # 'ด' - 19: 1, # 'ต' - 44: 1, # 'ถ' - 14: 1, # 'ท' - 48: 0, # 'ธ' - 3: 1, # 'น' - 17: 1, # 'บ' - 25: 1, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 1, # 'พ' - 54: 1, # 'ฟ' - 45: 0, # 'ภ' - 9: 2, # 'ม' - 16: 0, # 'ย' - 2: 1, # 'ร' - 61: 1, # 'ฤ' - 15: 1, # 'ล' - 12: 1, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 1, # 'ส' - 21: 1, # 'ห' - 4: 2, # 'อ' - 63: 1, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 2, # 'เ' - 28: 2, # 'แ' - 41: 1, # 'โ' - 29: 1, # 'ใ' - 33: 1, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 0, # '๑' - 59: 0, # '๒' - 60: 0, # '๕' - }, - 56: { # '๑' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 2, # '๑' - 59: 1, # '๒' - 60: 1, # '๕' - }, - 59: { # '๒' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 1, # '๑' - 59: 1, # '๒' - 60: 3, # '๕' - }, - 60: { # '๕' - 5: 0, # 'ก' - 30: 0, # 'ข' - 24: 0, # 'ค' - 8: 0, # 'ง' - 26: 0, # 'จ' - 52: 0, # 'ฉ' - 34: 0, # 'ช' - 51: 0, # 'ซ' - 47: 0, # 'ญ' - 58: 0, # 'ฎ' - 57: 0, # 'ฏ' - 49: 0, # 'ฐ' - 53: 0, # 'ฑ' - 55: 0, # 'ฒ' - 43: 0, # 'ณ' - 20: 0, # 'ด' - 19: 0, # 'ต' - 44: 0, # 'ถ' - 14: 0, # 'ท' - 48: 0, # 'ธ' - 3: 0, # 'น' - 17: 0, # 'บ' - 25: 0, # 'ป' - 39: 0, # 'ผ' - 62: 0, # 'ฝ' - 31: 0, # 'พ' - 54: 0, # 'ฟ' - 45: 0, # 'ภ' - 9: 0, # 'ม' - 16: 0, # 'ย' - 2: 0, # 'ร' - 61: 0, # 'ฤ' - 15: 0, # 'ล' - 12: 0, # 'ว' - 42: 0, # 'ศ' - 46: 0, # 'ษ' - 18: 0, # 'ส' - 21: 0, # 'ห' - 4: 0, # 'อ' - 63: 0, # 'ฯ' - 22: 0, # 'ะ' - 10: 0, # 'ั' - 1: 0, # 'า' - 36: 0, # 'ำ' - 23: 0, # 'ิ' - 13: 0, # 'ี' - 40: 0, # 'ึ' - 27: 0, # 'ื' - 32: 0, # 'ุ' - 35: 0, # 'ู' - 11: 0, # 'เ' - 28: 0, # 'แ' - 41: 0, # 'โ' - 29: 0, # 'ใ' - 33: 0, # 'ไ' - 50: 0, # 'ๆ' - 37: 0, # '็' - 6: 0, # '่' - 7: 0, # '้' - 38: 0, # '์' - 56: 2, # '๑' - 59: 1, # '๒' - 60: 0, # '๕' - }, -} - -# 255: Undefined characters that did not exist in training text -# 254: Carriage/Return -# 253: symbol (punctuation) that does not belong to word -# 252: 0 - 9 -# 251: Control characters - -# Character Mapping Table(s): -TIS_620_THAI_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 254, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 254, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 253, # ' ' - 33: 253, # '!' - 34: 253, # '"' - 35: 253, # '#' - 36: 253, # '$' - 37: 253, # '%' - 38: 253, # '&' - 39: 253, # "'" - 40: 253, # '(' - 41: 253, # ')' - 42: 253, # '*' - 43: 253, # '+' - 44: 253, # ',' - 45: 253, # '-' - 46: 253, # '.' - 47: 253, # '/' - 48: 252, # '0' - 49: 252, # '1' - 50: 252, # '2' - 51: 252, # '3' - 52: 252, # '4' - 53: 252, # '5' - 54: 252, # '6' - 55: 252, # '7' - 56: 252, # '8' - 57: 252, # '9' - 58: 253, # ':' - 59: 253, # ';' - 60: 253, # '<' - 61: 253, # '=' - 62: 253, # '>' - 63: 253, # '?' - 64: 253, # '@' - 65: 182, # 'A' - 66: 106, # 'B' - 67: 107, # 'C' - 68: 100, # 'D' - 69: 183, # 'E' - 70: 184, # 'F' - 71: 185, # 'G' - 72: 101, # 'H' - 73: 94, # 'I' - 74: 186, # 'J' - 75: 187, # 'K' - 76: 108, # 'L' - 77: 109, # 'M' - 78: 110, # 'N' - 79: 111, # 'O' - 80: 188, # 'P' - 81: 189, # 'Q' - 82: 190, # 'R' - 83: 89, # 'S' - 84: 95, # 'T' - 85: 112, # 'U' - 86: 113, # 'V' - 87: 191, # 'W' - 88: 192, # 'X' - 89: 193, # 'Y' - 90: 194, # 'Z' - 91: 253, # '[' - 92: 253, # '\\' - 93: 253, # ']' - 94: 253, # '^' - 95: 253, # '_' - 96: 253, # '`' - 97: 64, # 'a' - 98: 72, # 'b' - 99: 73, # 'c' - 100: 114, # 'd' - 101: 74, # 'e' - 102: 115, # 'f' - 103: 116, # 'g' - 104: 102, # 'h' - 105: 81, # 'i' - 106: 201, # 'j' - 107: 117, # 'k' - 108: 90, # 'l' - 109: 103, # 'm' - 110: 78, # 'n' - 111: 82, # 'o' - 112: 96, # 'p' - 113: 202, # 'q' - 114: 91, # 'r' - 115: 79, # 's' - 116: 84, # 't' - 117: 104, # 'u' - 118: 105, # 'v' - 119: 97, # 'w' - 120: 98, # 'x' - 121: 92, # 'y' - 122: 203, # 'z' - 123: 253, # '{' - 124: 253, # '|' - 125: 253, # '}' - 126: 253, # '~' - 127: 253, # '\x7f' - 128: 209, # '\x80' - 129: 210, # '\x81' - 130: 211, # '\x82' - 131: 212, # '\x83' - 132: 213, # '\x84' - 133: 88, # '\x85' - 134: 214, # '\x86' - 135: 215, # '\x87' - 136: 216, # '\x88' - 137: 217, # '\x89' - 138: 218, # '\x8a' - 139: 219, # '\x8b' - 140: 220, # '\x8c' - 141: 118, # '\x8d' - 142: 221, # '\x8e' - 143: 222, # '\x8f' - 144: 223, # '\x90' - 145: 224, # '\x91' - 146: 99, # '\x92' - 147: 85, # '\x93' - 148: 83, # '\x94' - 149: 225, # '\x95' - 150: 226, # '\x96' - 151: 227, # '\x97' - 152: 228, # '\x98' - 153: 229, # '\x99' - 154: 230, # '\x9a' - 155: 231, # '\x9b' - 156: 232, # '\x9c' - 157: 233, # '\x9d' - 158: 234, # '\x9e' - 159: 235, # '\x9f' - 160: 236, # None - 161: 5, # 'ก' - 162: 30, # 'ข' - 163: 237, # 'ฃ' - 164: 24, # 'ค' - 165: 238, # 'ฅ' - 166: 75, # 'ฆ' - 167: 8, # 'ง' - 168: 26, # 'จ' - 169: 52, # 'ฉ' - 170: 34, # 'ช' - 171: 51, # 'ซ' - 172: 119, # 'ฌ' - 173: 47, # 'ญ' - 174: 58, # 'ฎ' - 175: 57, # 'ฏ' - 176: 49, # 'ฐ' - 177: 53, # 'ฑ' - 178: 55, # 'ฒ' - 179: 43, # 'ณ' - 180: 20, # 'ด' - 181: 19, # 'ต' - 182: 44, # 'ถ' - 183: 14, # 'ท' - 184: 48, # 'ธ' - 185: 3, # 'น' - 186: 17, # 'บ' - 187: 25, # 'ป' - 188: 39, # 'ผ' - 189: 62, # 'ฝ' - 190: 31, # 'พ' - 191: 54, # 'ฟ' - 192: 45, # 'ภ' - 193: 9, # 'ม' - 194: 16, # 'ย' - 195: 2, # 'ร' - 196: 61, # 'ฤ' - 197: 15, # 'ล' - 198: 239, # 'ฦ' - 199: 12, # 'ว' - 200: 42, # 'ศ' - 201: 46, # 'ษ' - 202: 18, # 'ส' - 203: 21, # 'ห' - 204: 76, # 'ฬ' - 205: 4, # 'อ' - 206: 66, # 'ฮ' - 207: 63, # 'ฯ' - 208: 22, # 'ะ' - 209: 10, # 'ั' - 210: 1, # 'า' - 211: 36, # 'ำ' - 212: 23, # 'ิ' - 213: 13, # 'ี' - 214: 40, # 'ึ' - 215: 27, # 'ื' - 216: 32, # 'ุ' - 217: 35, # 'ู' - 218: 86, # 'ฺ' - 219: 240, # None - 220: 241, # None - 221: 242, # None - 222: 243, # None - 223: 244, # '฿' - 224: 11, # 'เ' - 225: 28, # 'แ' - 226: 41, # 'โ' - 227: 29, # 'ใ' - 228: 33, # 'ไ' - 229: 245, # 'ๅ' - 230: 50, # 'ๆ' - 231: 37, # '็' - 232: 6, # '่' - 233: 7, # '้' - 234: 67, # '๊' - 235: 77, # '๋' - 236: 38, # '์' - 237: 93, # 'ํ' - 238: 246, # '๎' - 239: 247, # '๏' - 240: 68, # '๐' - 241: 56, # '๑' - 242: 59, # '๒' - 243: 65, # '๓' - 244: 69, # '๔' - 245: 60, # '๕' - 246: 70, # '๖' - 247: 80, # '๗' - 248: 71, # '๘' - 249: 87, # '๙' - 250: 248, # '๚' - 251: 249, # '๛' - 252: 250, # None - 253: 251, # None - 254: 252, # None - 255: 253, # None -} - -TIS_620_THAI_MODEL = SingleByteCharSetModel( - charset_name="TIS-620", - language="Thai", - char_to_order_map=TIS_620_THAI_CHAR_TO_ORDER, - language_model=THAI_LANG_MODEL, - typical_positive_ratio=0.926386, - keep_ascii_letters=False, - alphabet="กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛", -) diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_distutils/_msvccompiler.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_distutils/_msvccompiler.py deleted file mode 100644 index 3b5a8179bd69f1e27480224791ea7cc4a55802b0..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_distutils/_msvccompiler.py +++ /dev/null @@ -1,591 +0,0 @@ -"""distutils._msvccompiler - -Contains MSVCCompiler, an implementation of the abstract CCompiler class -for Microsoft Visual Studio 2015. - -The module is compatible with VS 2015 and later. You can find legacy support -for older versions in distutils.msvc9compiler and distutils.msvccompiler. -""" - -# Written by Perry Stoll -# hacked by Robin Becker and Thomas Heller to do a better job of -# finding DevStudio (through the registry) -# ported to VS 2005 and VS 2008 by Christian Heimes -# ported to VS 2015 by Steve Dower - -import os -import subprocess -import contextlib -import warnings -import unittest.mock - -with contextlib.suppress(ImportError): - import winreg - -from distutils.errors import ( - DistutilsExecError, - DistutilsPlatformError, - CompileError, - LibError, - LinkError, -) -from distutils.ccompiler import CCompiler, gen_lib_options -from distutils import log -from distutils.util import get_platform - -from itertools import count - - -def _find_vc2015(): - try: - key = winreg.OpenKeyEx( - winreg.HKEY_LOCAL_MACHINE, - r"Software\Microsoft\VisualStudio\SxS\VC7", - access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY, - ) - except OSError: - log.debug("Visual C++ is not registered") - return None, None - - best_version = 0 - best_dir = None - with key: - for i in count(): - try: - v, vc_dir, vt = winreg.EnumValue(key, i) - except OSError: - break - if v and vt == winreg.REG_SZ and os.path.isdir(vc_dir): - try: - version = int(float(v)) - except (ValueError, TypeError): - continue - if version >= 14 and version > best_version: - best_version, best_dir = version, vc_dir - return best_version, best_dir - - -def _find_vc2017(): - """Returns "15, path" based on the result of invoking vswhere.exe - If no install is found, returns "None, None" - - The version is returned to avoid unnecessarily changing the function - result. It may be ignored when the path is not None. - - If vswhere.exe is not available, by definition, VS 2017 is not - installed. - """ - root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles") - if not root: - return None, None - - try: - path = subprocess.check_output( - [ - os.path.join( - root, "Microsoft Visual Studio", "Installer", "vswhere.exe" - ), - "-latest", - "-prerelease", - "-requires", - "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", - "-property", - "installationPath", - "-products", - "*", - ], - encoding="mbcs", - errors="strict", - ).strip() - except (subprocess.CalledProcessError, OSError, UnicodeDecodeError): - return None, None - - path = os.path.join(path, "VC", "Auxiliary", "Build") - if os.path.isdir(path): - return 15, path - - return None, None - - -PLAT_SPEC_TO_RUNTIME = { - 'x86': 'x86', - 'x86_amd64': 'x64', - 'x86_arm': 'arm', - 'x86_arm64': 'arm64', -} - - -def _find_vcvarsall(plat_spec): - # bpo-38597: Removed vcruntime return value - _, best_dir = _find_vc2017() - - if not best_dir: - best_version, best_dir = _find_vc2015() - - if not best_dir: - log.debug("No suitable Visual C++ version found") - return None, None - - vcvarsall = os.path.join(best_dir, "vcvarsall.bat") - if not os.path.isfile(vcvarsall): - log.debug("%s cannot be found", vcvarsall) - return None, None - - return vcvarsall, None - - -def _get_vc_env(plat_spec): - if os.getenv("DISTUTILS_USE_SDK"): - return {key.lower(): value for key, value in os.environ.items()} - - vcvarsall, _ = _find_vcvarsall(plat_spec) - if not vcvarsall: - raise DistutilsPlatformError("Unable to find vcvarsall.bat") - - try: - out = subprocess.check_output( - 'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec), - stderr=subprocess.STDOUT, - ).decode('utf-16le', errors='replace') - except subprocess.CalledProcessError as exc: - log.error(exc.output) - raise DistutilsPlatformError("Error executing {}".format(exc.cmd)) - - env = { - key.lower(): value - for key, _, value in (line.partition('=') for line in out.splitlines()) - if key and value - } - - return env - - -def _find_exe(exe, paths=None): - """Return path to an MSVC executable program. - - Tries to find the program in several places: first, one of the - MSVC program search paths from the registry; next, the directories - in the PATH environment variable. If any of those work, return an - absolute path that is known to exist. If none of them work, just - return the original program name, 'exe'. - """ - if not paths: - paths = os.getenv('path').split(os.pathsep) - for p in paths: - fn = os.path.join(os.path.abspath(p), exe) - if os.path.isfile(fn): - return fn - return exe - - -# A map keyed by get_platform() return values to values accepted by -# 'vcvarsall.bat'. Always cross-compile from x86 to work with the -# lighter-weight MSVC installs that do not include native 64-bit tools. -PLAT_TO_VCVARS = { - 'win32': 'x86', - 'win-amd64': 'x86_amd64', - 'win-arm32': 'x86_arm', - 'win-arm64': 'x86_arm64', -} - - -class MSVCCompiler(CCompiler): - """Concrete class that implements an interface to Microsoft Visual C++, - as defined by the CCompiler abstract class.""" - - compiler_type = 'msvc' - - # Just set this so CCompiler's constructor doesn't barf. We currently - # don't use the 'set_executables()' bureaucracy provided by CCompiler, - # as it really isn't necessary for this sort of single-compiler class. - # Would be nice to have a consistent interface with UnixCCompiler, - # though, so it's worth thinking about. - executables = {} - - # Private class data (need to distinguish C from C++ source for compiler) - _c_extensions = ['.c'] - _cpp_extensions = ['.cc', '.cpp', '.cxx'] - _rc_extensions = ['.rc'] - _mc_extensions = ['.mc'] - - # Needed for the filename generation methods provided by the - # base class, CCompiler. - src_extensions = _c_extensions + _cpp_extensions + _rc_extensions + _mc_extensions - res_extension = '.res' - obj_extension = '.obj' - static_lib_extension = '.lib' - shared_lib_extension = '.dll' - static_lib_format = shared_lib_format = '%s%s' - exe_extension = '.exe' - - def __init__(self, verbose=0, dry_run=0, force=0): - super().__init__(verbose, dry_run, force) - # target platform (.plat_name is consistent with 'bdist') - self.plat_name = None - self.initialized = False - - def initialize(self, plat_name=None): - # multi-init means we would need to check platform same each time... - assert not self.initialized, "don't init multiple times" - if plat_name is None: - plat_name = get_platform() - # sanity check for platforms to prevent obscure errors later. - if plat_name not in PLAT_TO_VCVARS: - raise DistutilsPlatformError( - "--plat-name must be one of {}".format(tuple(PLAT_TO_VCVARS)) - ) - - # Get the vcvarsall.bat spec for the requested platform. - plat_spec = PLAT_TO_VCVARS[plat_name] - - vc_env = _get_vc_env(plat_spec) - if not vc_env: - raise DistutilsPlatformError( - "Unable to find a compatible " "Visual Studio installation." - ) - - self._paths = vc_env.get('path', '') - paths = self._paths.split(os.pathsep) - self.cc = _find_exe("cl.exe", paths) - self.linker = _find_exe("link.exe", paths) - self.lib = _find_exe("lib.exe", paths) - self.rc = _find_exe("rc.exe", paths) # resource compiler - self.mc = _find_exe("mc.exe", paths) # message compiler - self.mt = _find_exe("mt.exe", paths) # message compiler - - for dir in vc_env.get('include', '').split(os.pathsep): - if dir: - self.add_include_dir(dir.rstrip(os.sep)) - - for dir in vc_env.get('lib', '').split(os.pathsep): - if dir: - self.add_library_dir(dir.rstrip(os.sep)) - - self.preprocess_options = None - # bpo-38597: Always compile with dynamic linking - # Future releases of Python 3.x will include all past - # versions of vcruntime*.dll for compatibility. - self.compile_options = ['/nologo', '/O2', '/W3', '/GL', '/DNDEBUG', '/MD'] - - self.compile_options_debug = [ - '/nologo', - '/Od', - '/MDd', - '/Zi', - '/W3', - '/D_DEBUG', - ] - - ldflags = ['/nologo', '/INCREMENTAL:NO', '/LTCG'] - - ldflags_debug = ['/nologo', '/INCREMENTAL:NO', '/LTCG', '/DEBUG:FULL'] - - self.ldflags_exe = [*ldflags, '/MANIFEST:EMBED,ID=1'] - self.ldflags_exe_debug = [*ldflags_debug, '/MANIFEST:EMBED,ID=1'] - self.ldflags_shared = [ - *ldflags, - '/DLL', - '/MANIFEST:EMBED,ID=2', - '/MANIFESTUAC:NO', - ] - self.ldflags_shared_debug = [ - *ldflags_debug, - '/DLL', - '/MANIFEST:EMBED,ID=2', - '/MANIFESTUAC:NO', - ] - self.ldflags_static = [*ldflags] - self.ldflags_static_debug = [*ldflags_debug] - - self._ldflags = { - (CCompiler.EXECUTABLE, None): self.ldflags_exe, - (CCompiler.EXECUTABLE, False): self.ldflags_exe, - (CCompiler.EXECUTABLE, True): self.ldflags_exe_debug, - (CCompiler.SHARED_OBJECT, None): self.ldflags_shared, - (CCompiler.SHARED_OBJECT, False): self.ldflags_shared, - (CCompiler.SHARED_OBJECT, True): self.ldflags_shared_debug, - (CCompiler.SHARED_LIBRARY, None): self.ldflags_static, - (CCompiler.SHARED_LIBRARY, False): self.ldflags_static, - (CCompiler.SHARED_LIBRARY, True): self.ldflags_static_debug, - } - - self.initialized = True - - # -- Worker methods ------------------------------------------------ - - def object_filenames(self, source_filenames, strip_dir=0, output_dir=''): - ext_map = { - **{ext: self.obj_extension for ext in self.src_extensions}, - **{ - ext: self.res_extension - for ext in self._rc_extensions + self._mc_extensions - }, - } - - output_dir = output_dir or '' - - def make_out_path(p): - base, ext = os.path.splitext(p) - if strip_dir: - base = os.path.basename(base) - else: - _, base = os.path.splitdrive(base) - if base.startswith((os.path.sep, os.path.altsep)): - base = base[1:] - try: - # XXX: This may produce absurdly long paths. We should check - # the length of the result and trim base until we fit within - # 260 characters. - return os.path.join(output_dir, base + ext_map[ext]) - except LookupError: - # Better to raise an exception instead of silently continuing - # and later complain about sources and targets having - # different lengths - raise CompileError("Don't know how to compile {}".format(p)) - - return list(map(make_out_path, source_filenames)) - - def compile( - self, - sources, - output_dir=None, - macros=None, - include_dirs=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - depends=None, - ): - - if not self.initialized: - self.initialize() - compile_info = self._setup_compile( - output_dir, macros, include_dirs, sources, depends, extra_postargs - ) - macros, objects, extra_postargs, pp_opts, build = compile_info - - compile_opts = extra_preargs or [] - compile_opts.append('/c') - if debug: - compile_opts.extend(self.compile_options_debug) - else: - compile_opts.extend(self.compile_options) - - add_cpp_opts = False - - for obj in objects: - try: - src, ext = build[obj] - except KeyError: - continue - if debug: - # pass the full pathname to MSVC in debug mode, - # this allows the debugger to find the source file - # without asking the user to browse for it - src = os.path.abspath(src) - - if ext in self._c_extensions: - input_opt = "/Tc" + src - elif ext in self._cpp_extensions: - input_opt = "/Tp" + src - add_cpp_opts = True - elif ext in self._rc_extensions: - # compile .RC to .RES file - input_opt = src - output_opt = "/fo" + obj - try: - self.spawn([self.rc] + pp_opts + [output_opt, input_opt]) - except DistutilsExecError as msg: - raise CompileError(msg) - continue - elif ext in self._mc_extensions: - # Compile .MC to .RC file to .RES file. - # * '-h dir' specifies the directory for the - # generated include file - # * '-r dir' specifies the target directory of the - # generated RC file and the binary message resource - # it includes - # - # For now (since there are no options to change this), - # we use the source-directory for the include file and - # the build directory for the RC file and message - # resources. This works at least for win32all. - h_dir = os.path.dirname(src) - rc_dir = os.path.dirname(obj) - try: - # first compile .MC to .RC and .H file - self.spawn([self.mc, '-h', h_dir, '-r', rc_dir, src]) - base, _ = os.path.splitext(os.path.basename(src)) - rc_file = os.path.join(rc_dir, base + '.rc') - # then compile .RC to .RES file - self.spawn([self.rc, "/fo" + obj, rc_file]) - - except DistutilsExecError as msg: - raise CompileError(msg) - continue - else: - # how to handle this file? - raise CompileError( - "Don't know how to compile {} to {}".format(src, obj) - ) - - args = [self.cc] + compile_opts + pp_opts - if add_cpp_opts: - args.append('/EHsc') - args.append(input_opt) - args.append("/Fo" + obj) - args.extend(extra_postargs) - - try: - self.spawn(args) - except DistutilsExecError as msg: - raise CompileError(msg) - - return objects - - def create_static_lib( - self, objects, output_libname, output_dir=None, debug=0, target_lang=None - ): - - if not self.initialized: - self.initialize() - objects, output_dir = self._fix_object_args(objects, output_dir) - output_filename = self.library_filename(output_libname, output_dir=output_dir) - - if self._need_link(objects, output_filename): - lib_args = objects + ['/OUT:' + output_filename] - if debug: - pass # XXX what goes here? - try: - log.debug('Executing "%s" %s', self.lib, ' '.join(lib_args)) - self.spawn([self.lib] + lib_args) - except DistutilsExecError as msg: - raise LibError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def link( - self, - target_desc, - objects, - output_filename, - output_dir=None, - libraries=None, - library_dirs=None, - runtime_library_dirs=None, - export_symbols=None, - debug=0, - extra_preargs=None, - extra_postargs=None, - build_temp=None, - target_lang=None, - ): - - if not self.initialized: - self.initialize() - objects, output_dir = self._fix_object_args(objects, output_dir) - fixed_args = self._fix_lib_args(libraries, library_dirs, runtime_library_dirs) - libraries, library_dirs, runtime_library_dirs = fixed_args - - if runtime_library_dirs: - self.warn( - "I don't know what to do with 'runtime_library_dirs': " - + str(runtime_library_dirs) - ) - - lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs, libraries) - if output_dir is not None: - output_filename = os.path.join(output_dir, output_filename) - - if self._need_link(objects, output_filename): - ldflags = self._ldflags[target_desc, debug] - - export_opts = ["/EXPORT:" + sym for sym in (export_symbols or [])] - - ld_args = ( - ldflags + lib_opts + export_opts + objects + ['/OUT:' + output_filename] - ) - - # The MSVC linker generates .lib and .exp files, which cannot be - # suppressed by any linker switches. The .lib files may even be - # needed! Make sure they are generated in the temporary build - # directory. Since they have different names for debug and release - # builds, they can go into the same directory. - build_temp = os.path.dirname(objects[0]) - if export_symbols is not None: - (dll_name, dll_ext) = os.path.splitext( - os.path.basename(output_filename) - ) - implib_file = os.path.join(build_temp, self.library_filename(dll_name)) - ld_args.append('/IMPLIB:' + implib_file) - - if extra_preargs: - ld_args[:0] = extra_preargs - if extra_postargs: - ld_args.extend(extra_postargs) - - output_dir = os.path.dirname(os.path.abspath(output_filename)) - self.mkpath(output_dir) - try: - log.debug('Executing "%s" %s', self.linker, ' '.join(ld_args)) - self.spawn([self.linker] + ld_args) - except DistutilsExecError as msg: - raise LinkError(msg) - else: - log.debug("skipping %s (up-to-date)", output_filename) - - def spawn(self, cmd): - env = dict(os.environ, PATH=self._paths) - with self._fallback_spawn(cmd, env) as fallback: - return super().spawn(cmd, env=env) - return fallback.value - - @contextlib.contextmanager - def _fallback_spawn(self, cmd, env): - """ - Discovered in pypa/distutils#15, some tools monkeypatch the compiler, - so the 'env' kwarg causes a TypeError. Detect this condition and - restore the legacy, unsafe behavior. - """ - bag = type('Bag', (), {})() - try: - yield bag - except TypeError as exc: - if "unexpected keyword argument 'env'" not in str(exc): - raise - else: - return - warnings.warn("Fallback spawn triggered. Please update distutils monkeypatch.") - with unittest.mock.patch.dict('os.environ', env): - bag.value = super().spawn(cmd) - - # -- Miscellaneous methods ----------------------------------------- - # These are all used by the 'gen_lib_options() function, in - # ccompiler.py. - - def library_dir_option(self, dir): - return "/LIBPATH:" + dir - - def runtime_library_dir_option(self, dir): - raise DistutilsPlatformError( - "don't know how to set runtime library search path for MSVC" - ) - - def library_option(self, lib): - return self.library_filename(lib) - - def find_library_file(self, dirs, lib, debug=0): - # Prefer a debugging library if found (and requested), but deal - # with it if we don't have one. - if debug: - try_names = [lib + "_d", lib] - else: - try_names = [lib] - for dir in dirs: - for name in try_names: - libfile = os.path.join(dir, self.library_filename(name)) - if os.path.isfile(libfile): - return libfile - else: - # Oops, didn't find it in *any* of 'dirs' - return None diff --git a/spaces/tomandandy/MusicGen3/audiocraft/quantization/__init__.py b/spaces/tomandandy/MusicGen3/audiocraft/quantization/__init__.py deleted file mode 100644 index 836d6eb518978480c6b95d6f29ce4f84a9428793..0000000000000000000000000000000000000000 --- a/spaces/tomandandy/MusicGen3/audiocraft/quantization/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# flake8: noqa -from .vq import ResidualVectorQuantizer -from .base import BaseQuantizer, DummyQuantizer, QuantizedResult diff --git a/spaces/tomaseo2022/Youtube-Mp3/README.md b/spaces/tomaseo2022/Youtube-Mp3/README.md deleted file mode 100644 index 6cf8de002c7d9a9216a556d8e87696734f2500a7..0000000000000000000000000000000000000000 --- a/spaces/tomaseo2022/Youtube-Mp3/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Youtube Mp3 -emoji: 📚 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/yolact/yolact_r101_1x8_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/yolact/yolact_r101_1x8_coco.py deleted file mode 100644 index 2864b590b5538b735a16df3b2690b29a95384df8..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/yolact/yolact_r101_1x8_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './yolact_r50_1x8_coco.py' - -model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) diff --git a/spaces/triple-t/ttt-space/static/_app/immutable/start-56b0bce0.js b/spaces/triple-t/ttt-space/static/_app/immutable/start-56b0bce0.js deleted file mode 100644 index 207fbaf9ea1ee3478088a874a95d9991d7d534dc..0000000000000000000000000000000000000000 --- a/spaces/triple-t/ttt-space/static/_app/immutable/start-56b0bce0.js +++ /dev/null @@ -1 +0,0 @@ -import{S as at,i as rt,s as ot,a as st,e as V,c as it,b as M,g as ue,t as B,d as de,f as F,h as G,j as lt,o as Oe,k as ct,l as ft,m as ut,n as be,p as C,q as dt,r as pt,u as ht,v as H,w as W,x as Ne,y as Y,z as X,A as le}from"./chunks/index-b346583a.js";import{S as tt,I as q,g as ze,f as He,a as ve,b as ce,s as K,i as We,c as fe,P as Ye,d as mt,e as _t,h as gt}from"./chunks/singletons-50e0fde7.js";function yt(a,e){return a==="/"||e==="ignore"?a:e==="never"?a.endsWith("/")?a.slice(0,-1):a:e==="always"&&!a.endsWith("/")?a+"/":a}function wt(a){return a.split("%25").map(decodeURI).join("%25")}function bt(a){for(const e in a)a[e]=decodeURIComponent(a[e]);return a}const vt=["href","pathname","search","searchParams","toString","toJSON"];function Et(a,e){const n=new URL(a);for(const i of vt){let o=n[i];Object.defineProperty(n,i,{get(){return e(),o},enumerable:!0,configurable:!0})}return kt(n),n}function kt(a){Object.defineProperty(a,"hash",{get(){throw new Error("Cannot access event.url.hash. Consider using `$page.url.hash` inside a component instead")}})}const St="/__data.json";function Rt(a){return a.replace(/\/$/,"")+St}function Lt(a){let e=5381;if(typeof a=="string"){let n=a.length;for(;n;)e=e*33^a.charCodeAt(--n)}else if(ArrayBuffer.isView(a)){const n=new Uint8Array(a.buffer,a.byteOffset,a.byteLength);let i=n.length;for(;i;)e=e*33^n[--i]}else throw new TypeError("value must be a string or TypedArray");return(e>>>0).toString(36)}const pe=window.fetch;window.fetch=(a,e)=>((a instanceof Request?a.method:(e==null?void 0:e.method)||"GET")!=="GET"&&ee.delete(Ue(a)),pe(a,e));const ee=new Map;function Ot(a,e){const n=Ue(a,e),i=document.querySelector(n);if(i!=null&&i.textContent){const{body:o,...u}=JSON.parse(i.textContent),t=i.getAttribute("data-ttl");return t&&ee.set(n,{body:o,init:u,ttl:1e3*Number(t)}),Promise.resolve(new Response(o,u))}return pe(a,e)}function It(a,e,n){if(ee.size>0){const i=Ue(a,n),o=ee.get(i);if(o){if(performance.now(){const o=/^\[\.\.\.(\w+)(?:=(\w+))?\]$/.exec(i);if(o)return e.push({name:o[1],matcher:o[2],optional:!1,rest:!0,chained:!0}),"(?:/(.*))?";const u=/^\[\[(\w+)(?:=(\w+))?\]\]$/.exec(i);if(u)return e.push({name:u[1],matcher:u[2],optional:!0,rest:!1,chained:!0}),"(?:/([^/]+))?";if(!i)return;const t=i.split(/\[(.+?)\](?!\])/);return"/"+t.map((_,p)=>{if(p%2){if(_.startsWith("x+"))return Ee(String.fromCharCode(parseInt(_.slice(2),16)));if(_.startsWith("u+"))return Ee(String.fromCharCode(..._.slice(2).split("-").map(P=>parseInt(P,16))));const g=At.exec(_);if(!g)throw new Error(`Invalid param: ${_}. Params and matcher names can only have underscores and alphanumeric characters.`);const[,w,R,j,T]=g;return e.push({name:j,matcher:T,optional:!!w,rest:!!R,chained:R?p===1&&t[0]==="":!1}),R?"(.*?)":w?"([^/]*)?":"([^/]+?)"}return Ee(_)}).join("")}).join("")}/?$`),params:e}}function Nt(a){return!/^\([^)]+\)$/.test(a)}function Ut(a){return a.slice(1).split("/").filter(Nt)}function $t(a,e,n){const i={},o=a.slice(1);let u="";for(let t=0;t=t;)o[p]=o[p-1],p-=1;continue}return}i[f.name]=_}}if(!u)return i}function Ee(a){return a.normalize().replace(/[[\]]/g,"\\$&").replace(/%/g,"%25").replace(/\//g,"%2[Ff]").replace(/\?/g,"%3[Ff]").replace(/#/g,"%23").replace(/[.*+?^${}()|\\]/g,"\\$&")}function jt(a,e,n,i){const o=new Set(e);return Object.entries(n).map(([f,[_,p,g]])=>{const{pattern:w,params:R}=Pt(f),j={id:f,exec:T=>{const P=w.exec(T);if(P)return $t(P,R,i)},errors:[1,...g||[]].map(T=>a[T]),layouts:[0,...p||[]].map(t),leaf:u(_)};return j.errors.length=j.layouts.length=Math.max(j.errors.length,j.layouts.length),j});function u(f){const _=f<0;return _&&(f=~f),[_,a[f]]}function t(f){return f===void 0?f:[o.has(f),a[f]]}}function Tt(a){let e,n,i;var o=a[0][0];function u(t){return{props:{data:t[2],form:t[1]}}}return o&&(e=H(o,u(a))),{c(){e&&W(e.$$.fragment),n=V()},l(t){e&&Ne(e.$$.fragment,t),n=V()},m(t,f){e&&Y(e,t,f),M(t,n,f),i=!0},p(t,f){const _={};if(f&4&&(_.data=t[2]),f&2&&(_.form=t[1]),o!==(o=t[0][0])){if(e){ue();const p=e;B(p.$$.fragment,1,0,()=>{X(p,1)}),de()}o?(e=H(o,u(t)),W(e.$$.fragment),F(e.$$.fragment,1),Y(e,n.parentNode,n)):e=null}else o&&e.$set(_)},i(t){i||(e&&F(e.$$.fragment,t),i=!0)},o(t){e&&B(e.$$.fragment,t),i=!1},d(t){t&&G(n),e&&X(e,t)}}}function Dt(a){let e,n,i;var o=a[0][0];function u(t){return{props:{data:t[2],$$slots:{default:[Ct]},$$scope:{ctx:t}}}}return o&&(e=H(o,u(a))),{c(){e&&W(e.$$.fragment),n=V()},l(t){e&&Ne(e.$$.fragment,t),n=V()},m(t,f){e&&Y(e,t,f),M(t,n,f),i=!0},p(t,f){const _={};if(f&4&&(_.data=t[2]),f&523&&(_.$$scope={dirty:f,ctx:t}),o!==(o=t[0][0])){if(e){ue();const p=e;B(p.$$.fragment,1,0,()=>{X(p,1)}),de()}o?(e=H(o,u(t)),W(e.$$.fragment),F(e.$$.fragment,1),Y(e,n.parentNode,n)):e=null}else o&&e.$set(_)},i(t){i||(e&&F(e.$$.fragment,t),i=!0)},o(t){e&&B(e.$$.fragment,t),i=!1},d(t){t&&G(n),e&&X(e,t)}}}function Ct(a){let e,n,i;var o=a[0][1];function u(t){return{props:{data:t[3],form:t[1]}}}return o&&(e=H(o,u(a))),{c(){e&&W(e.$$.fragment),n=V()},l(t){e&&Ne(e.$$.fragment,t),n=V()},m(t,f){e&&Y(e,t,f),M(t,n,f),i=!0},p(t,f){const _={};if(f&8&&(_.data=t[3]),f&2&&(_.form=t[1]),o!==(o=t[0][1])){if(e){ue();const p=e;B(p.$$.fragment,1,0,()=>{X(p,1)}),de()}o?(e=H(o,u(t)),W(e.$$.fragment),F(e.$$.fragment,1),Y(e,n.parentNode,n)):e=null}else o&&e.$set(_)},i(t){i||(e&&F(e.$$.fragment,t),i=!0)},o(t){e&&B(e.$$.fragment,t),i=!1},d(t){t&&G(n),e&&X(e,t)}}}function Xe(a){let e,n=a[5]&&Ze(a);return{c(){e=ct("div"),n&&n.c(),this.h()},l(i){e=ft(i,"DIV",{id:!0,"aria-live":!0,"aria-atomic":!0,style:!0});var o=ut(e);n&&n.l(o),o.forEach(G),this.h()},h(){be(e,"id","svelte-announcer"),be(e,"aria-live","assertive"),be(e,"aria-atomic","true"),C(e,"position","absolute"),C(e,"left","0"),C(e,"top","0"),C(e,"clip","rect(0 0 0 0)"),C(e,"clip-path","inset(50%)"),C(e,"overflow","hidden"),C(e,"white-space","nowrap"),C(e,"width","1px"),C(e,"height","1px")},m(i,o){M(i,e,o),n&&n.m(e,null)},p(i,o){i[5]?n?n.p(i,o):(n=Ze(i),n.c(),n.m(e,null)):n&&(n.d(1),n=null)},d(i){i&&G(e),n&&n.d()}}}function Ze(a){let e;return{c(){e=dt(a[6])},l(n){e=pt(n,a[6])},m(n,i){M(n,e,i)},p(n,i){i&64&&ht(e,n[6])},d(n){n&&G(e)}}}function qt(a){let e,n,i,o,u;const t=[Dt,Tt],f=[];function _(g,w){return g[0][1]?0:1}e=_(a),n=f[e]=t[e](a);let p=a[4]&&Xe(a);return{c(){n.c(),i=st(),p&&p.c(),o=V()},l(g){n.l(g),i=it(g),p&&p.l(g),o=V()},m(g,w){f[e].m(g,w),M(g,i,w),p&&p.m(g,w),M(g,o,w),u=!0},p(g,[w]){let R=e;e=_(g),e===R?f[e].p(g,w):(ue(),B(f[R],1,1,()=>{f[R]=null}),de(),n=f[e],n?n.p(g,w):(n=f[e]=t[e](g),n.c()),F(n,1),n.m(i.parentNode,i)),g[4]?p?p.p(g,w):(p=Xe(g),p.c(),p.m(o.parentNode,o)):p&&(p.d(1),p=null)},i(g){u||(F(n),u=!0)},o(g){B(n),u=!1},d(g){f[e].d(g),g&&G(i),p&&p.d(g),g&&G(o)}}}function Vt(a,e,n){let{stores:i}=e,{page:o}=e,{components:u}=e,{form:t}=e,{data_0:f=null}=e,{data_1:_=null}=e;lt(i.page.notify);let p=!1,g=!1,w=null;return Oe(()=>{const R=i.page.subscribe(()=>{p&&(n(5,g=!0),n(6,w=document.title||"untitled page"))});return n(4,p=!0),R}),a.$$set=R=>{"stores"in R&&n(7,i=R.stores),"page"in R&&n(8,o=R.page),"components"in R&&n(0,u=R.components),"form"in R&&n(1,t=R.form),"data_0"in R&&n(2,f=R.data_0),"data_1"in R&&n(3,_=R.data_1)},a.$$.update=()=>{a.$$.dirty&384&&i.page.set(o)},[u,t,f,_,p,g,w,i,o]}class Bt extends at{constructor(e){super(),rt(this,e,Vt,qt,ot,{stores:7,page:8,components:0,form:1,data_0:2,data_1:3})}}const Ft="modulepreload",Gt=function(a,e){return new URL(a,e).href},Qe={},ke=function(e,n,i){if(!n||n.length===0)return e();const o=document.getElementsByTagName("link");return Promise.all(n.map(u=>{if(u=Gt(u,i),u in Qe)return;Qe[u]=!0;const t=u.endsWith(".css"),f=t?'[rel="stylesheet"]':"";if(!!i)for(let g=o.length-1;g>=0;g--){const w=o[g];if(w.href===u&&(!t||w.rel==="stylesheet"))return}else if(document.querySelector(`link[href="${u}"]${f}`))return;const p=document.createElement("link");if(p.rel=t?"stylesheet":Ft,t||(p.as="script",p.crossOrigin=""),p.href=u,document.head.appendChild(p),t)return new Promise((g,w)=>{p.addEventListener("load",g),p.addEventListener("error",()=>w(new Error(`Unable to preload CSS for ${u}`)))})})).then(()=>e())},Jt={},he=[()=>ke(()=>import("./chunks/0-e4667d24.js"),["./chunks/0-e4667d24.js","./components/pages/_layout.svelte-81ccf463.js","./chunks/index-b346583a.js","./assets/_layout-a699bab5.css"],import.meta.url),()=>ke(()=>import("./chunks/1-9c6a32b9.js"),["./chunks/1-9c6a32b9.js","./components/error.svelte-cd570e47.js","./chunks/index-b346583a.js","./chunks/singletons-50e0fde7.js"],import.meta.url),()=>ke(()=>import("./chunks/2-5e47ff79.js"),["./chunks/2-5e47ff79.js","./chunks/_page-da46b06b.js","./components/pages/_page.svelte-033df9bc.js","./chunks/index-b346583a.js"],import.meta.url)],Kt=[],Mt={"/":[2]},zt={handleError:({error:a})=>{console.error(a)}};class Ie{constructor(e,n){this.status=e,typeof n=="string"?this.body={message:n}:n?this.body=n:this.body={message:`Error: ${e}`}}toString(){return JSON.stringify(this.body)}}class xe{constructor(e,n){this.status=e,this.location=n}}async function Ht(a){var e;for(const n in a)if(typeof((e=a[n])==null?void 0:e.then)=="function")return Object.fromEntries(await Promise.all(Object.entries(a).map(async([i,o])=>[i,await o])));return a}Object.getOwnPropertyNames(Object.prototype).sort().join("\0");Object.getOwnPropertyNames(Object.prototype).sort().join("\0");const Wt=-1,Yt=-2,Xt=-3,Zt=-4,Qt=-5,xt=-6;function en(a){if(typeof a=="number")return i(a,!0);if(!Array.isArray(a)||a.length===0)throw new Error("Invalid input");const e=a,n=Array(e.length);function i(o,u=!1){if(o===Wt)return;if(o===Xt)return NaN;if(o===Zt)return 1/0;if(o===Qt)return-1/0;if(o===xt)return-0;if(u)throw new Error("Invalid input");if(o in n)return n[o];const t=e[o];if(!t||typeof t!="object")n[o]=t;else if(Array.isArray(t))if(typeof t[0]=="string")switch(t[0]){case"Date":n[o]=new Date(t[1]);break;case"Set":const _=new Set;n[o]=_;for(let w=1;w{d&&(j=!0)},blocked:()=>{},type:"goto"})}async function Te(r){const s=oe(r,!1);if(!s)throw new Error(`Attempted to preload a URL that does not belong to this app: ${r}`);return o={id:s.id,promise:Ve(s).then(c=>(c.type==="loaded"&&c.state.error&&(o=null),c))},o.promise}async function ae(...r){const c=Se.filter(l=>r.some(h=>l.exec(h))).map(l=>Promise.all([...l.layouts,l.leaf].map(h=>h==null?void 0:h[1]())));await Promise.all(c)}async function De(r,s,c,l,h={},d){var b,v;$e=h;let m=r&&await Ve(r);if(m||(m=await Ge(s,{id:null},await x(new Error(`Not found: ${s.pathname}`),{url:s,params:{},route:{id:null}}),404)),s=(r==null?void 0:r.url)||s,$e!==h)return!1;if(m.type==="redirect")if(c.length>10||c.includes(s.pathname))m=await re({status:500,error:await x(new Error("Redirect loop"),{url:s,params:{},route:{id:null}}),url:s,route:{id:null}});else return _e(new URL(m.location,s).href,{},[...c,s.pathname],h),!1;else((v=(b=m.props)==null?void 0:b.page)==null?void 0:v.status)>=400&&await K.updated.check()&&await ie(s);if(i.length=0,j=!1,g=!0,l&&l.details){const{details:y}=l,k=y.replaceState?0:1;y.state[q]=P+=k,history[y.replaceState?"replaceState":"pushState"](y.state,"",s)}if(o=null,_?(t=m.state,m.props.page&&(m.props.page.url=s),T.$set(m.props)):Ce(m),l){const{scroll:y,keepfocus:k}=l;if(k||Le(),await le(),p){const L=s.hash&&document.getElementById(s.hash.slice(1));y?scrollTo(y.x,y.y):L?L.scrollIntoView():scrollTo(0,0)}}else await le();p=!0,m.props.page&&(J=m.props.page),d&&d(),g=!1}function Ce(r){var l;t=r.state;const s=document.querySelector("style[data-sveltekit]");s&&s.remove(),J=r.props.page,T=new Bt({target:a,props:{...r.props,stores:K},hydrate:!0});const c={from:null,to:{params:t.params,route:{id:((l=t.route)==null?void 0:l.id)??null},url:new URL(location.href)},willUnload:!1,type:"enter"};u.after_navigate.forEach(h=>h(c)),_=!0}async function Z({url:r,params:s,branch:c,status:l,error:h,route:d,form:m}){const b=c.filter(Boolean);let v="never";for(const O of c)(O==null?void 0:O.slash)!==void 0&&(v=O.slash);r.pathname=yt(r.pathname,v),r.search=r.search;const y={type:"loaded",state:{url:r,params:s,branch:c,error:h,route:d},props:{components:b.map(O=>O.node.component)}};m!==void 0&&(y.props.form=m);let k={},L=!J;for(let O=0;OU===E))&&(y.props[`data_${O}`]=k,L=L||Object.keys(E.data??{}).length>0)}return L||(L=Object.keys(J.data).length!==Object.keys(k).length),(!t.url||r.href!==t.url.href||t.error!==h||m!==void 0||L)&&(y.props.page={error:h,params:s,route:{id:(d==null?void 0:d.id)??null},status:l,url:new URL(r),form:m??null,data:L?k:J.data}),y}async function ge({loader:r,parent:s,url:c,params:l,route:h,server_data_node:d}){var y,k,L;let m=null;const b={dependencies:new Set,params:new Set,parent:!1,route:!1,url:!1},v=await r();if((y=v.universal)!=null&&y.load){let D=function(...E){for(const U of E){const{href:$}=new URL(U,c);b.dependencies.add($)}};const O={route:{get id(){return b.route=!0,h.id}},params:new Proxy(l,{get:(E,U)=>(b.params.add(U),E[U])}),data:(d==null?void 0:d.data)??null,url:Et(c,()=>{b.url=!0}),async fetch(E,U){let $;E instanceof Request?($=E.url,U={body:E.method==="GET"||E.method==="HEAD"?void 0:await E.blob(),cache:E.cache,credentials:E.credentials,headers:E.headers,integrity:E.integrity,keepalive:E.keepalive,method:E.method,mode:E.mode,redirect:E.redirect,referrer:E.referrer,referrerPolicy:E.referrerPolicy,signal:E.signal,...U}):$=E;const S=new URL($,c).href;return D(S),_?It($,S,U):Ot($,U)},setHeaders:()=>{},depends:D,parent(){return b.parent=!0,s()}};m=await v.universal.load.call(null,O)??null,m=m?await Ht(m):null}return{node:v,loader:r,server:d,universal:(k=v.universal)!=null&&k.load?{type:"data",data:m,uses:b}:null,data:m??(d==null?void 0:d.data)??null,slash:((L=v.universal)==null?void 0:L.trailingSlash)??(d==null?void 0:d.slash)}}function qe(r,s,c,l,h){if(j)return!0;if(!l)return!1;if(l.parent&&r||l.route&&s||l.url&&c)return!0;for(const d of l.params)if(h[d]!==t.params[d])return!0;for(const d of l.dependencies)if(i.some(m=>m(new URL(d))))return!0;return!1}function ye(r,s){return(r==null?void 0:r.type)==="data"?{type:"data",data:r.data,uses:{dependencies:new Set(r.uses.dependencies??[]),params:new Set(r.uses.params??[]),parent:!!r.uses.parent,route:!!r.uses.route,url:!!r.uses.url},slash:r.slash}:(r==null?void 0:r.type)==="skip"?s??null:null}async function Ve({id:r,invalidating:s,url:c,params:l,route:h}){if((o==null?void 0:o.id)===r)return o.promise;const{errors:d,layouts:m,leaf:b}=h,v=[...m,b];d.forEach(S=>S==null?void 0:S().catch(()=>{})),v.forEach(S=>S==null?void 0:S[1]().catch(()=>{}));let y=null;const k=t.url?r!==t.url.pathname+t.url.search:!1,L=t.route?r!==t.route.id:!1,D=v.reduce((S,A,N)=>{var Q;const I=t.branch[N],z=!!(A!=null&&A[0])&&((I==null?void 0:I.loader)!==A[1]||qe(S.some(Boolean),L,k,(Q=I.server)==null?void 0:Q.uses,l));return S.push(z),S},[]);if(D.some(Boolean)){try{y=await et(c,D)}catch(S){return re({status:500,error:await x(S,{url:c,params:l,route:{id:h.id}}),url:c,route:h})}if(y.type==="redirect")return y}const O=y==null?void 0:y.nodes;let E=!1;const U=v.map(async(S,A)=>{var Q;if(!S)return;const N=t.branch[A],I=O==null?void 0:O[A];if((!I||I.type==="skip")&&S[1]===(N==null?void 0:N.loader)&&!qe(E,L,k,(Q=N.universal)==null?void 0:Q.uses,l))return N;if(E=!0,(I==null?void 0:I.type)==="error")throw I;return ge({loader:S[1],url:c,params:l,route:h,parent:async()=>{var Me;const Ke={};for(let we=0;we{});const $=[];for(let S=0;SPromise.resolve({}),server_data_node:ye(m)}),v={node:await Pe(),loader:Pe,universal:null,server:null,data:null};return await Z({url:c,params:h,branch:[b,v],status:r,error:s,route:null})}function oe(r,s){if(We(r,e))return;const c=wt(r.pathname.slice(e.length)||"/");for(const l of Se){const h=l.exec(c);if(h)return{id:r.pathname+r.search,invalidating:s,route:l,params:bt(h),url:r}}}function Fe({url:r,type:s,intent:c,delta:l}){var b,v;let h=!1;const d={from:{params:t.params,route:{id:((b=t.route)==null?void 0:b.id)??null},url:t.url},to:{params:(c==null?void 0:c.params)??null,route:{id:((v=c==null?void 0:c.route)==null?void 0:v.id)??null},url:r},willUnload:!c,type:s};l!==void 0&&(d.delta=l);const m={...d,cancel:()=>{h=!0}};return w||u.before_navigate.forEach(y=>y(m)),h?null:d}async function se({url:r,scroll:s,keepfocus:c,redirect_chain:l,details:h,type:d,delta:m,nav_token:b,accepted:v,blocked:y}){const k=oe(r,!1),L=Fe({url:r,type:d,delta:m,intent:k});if(!L){y();return}Re(P),v(),w=!0,_&&K.navigating.set(L),await De(k,r,l,{scroll:s,keepfocus:c,details:h},b,()=>{w=!1,u.after_navigate.forEach(D=>D(L)),K.navigating.set(null)})}async function Ge(r,s,c,l){return r.origin===location.origin&&r.pathname===location.pathname&&!f?await re({status:l,error:c,url:r,route:s}):await ie(r)}function ie(r){return location.href=r.href,new Promise(()=>{})}function nt(){let r;n.addEventListener("mousemove",d=>{const m=d.target;clearTimeout(r),r=setTimeout(()=>{l(m,2)},20)});function s(d){l(d.composedPath()[0],1)}n.addEventListener("mousedown",s),n.addEventListener("touchstart",s,{passive:!0});const c=new IntersectionObserver(d=>{for(const m of d)m.isIntersecting&&(ae(new URL(m.target.href).pathname),c.unobserve(m.target))},{threshold:0});function l(d,m){const b=He(d,n);if(!b)return;const{url:v,external:y}=ve(b,e);if(y)return;const k=ce(b);k.reload||(m<=k.preload_data?Te(v):m<=k.preload_code&&ae(v.pathname))}function h(){c.disconnect();for(const d of n.querySelectorAll("a")){const{url:m,external:b}=ve(d,e);if(b)continue;const v=ce(d);v.reload||(v.preload_code===Ye.viewport&&c.observe(d),v.preload_code===Ye.eager&&ae(m.pathname))}}u.after_navigate.push(h),h()}return{after_navigate:r=>{Oe(()=>(u.after_navigate.push(r),()=>{const s=u.after_navigate.indexOf(r);u.after_navigate.splice(s,1)}))},before_navigate:r=>{Oe(()=>(u.before_navigate.push(r),()=>{const s=u.before_navigate.indexOf(r);u.before_navigate.splice(s,1)}))},disable_scroll_handling:()=>{(g||!_)&&(p=!1)},goto:(r,s={})=>_e(r,s,[]),invalidate:r=>{if(typeof r=="function")i.push(r);else{const{href:s}=new URL(r,location.href);i.push(c=>c.href===s)}return je()},invalidateAll:()=>(j=!0,je()),preload_data:async r=>{const s=new URL(r,ze(document));await Te(s)},preload_code:ae,apply_action:async r=>{if(r.type==="error"){const s=new URL(location.href),{branch:c,route:l}=t;if(!l)return;const h=await Be(t.branch.length,c,l.errors);if(h){const d=await Z({url:s,params:t.params,branch:c.slice(0,h.idx).concat(h.node),status:r.status??500,error:r.error,route:l});t=d.state,T.$set(d.props),le().then(Le)}}else if(r.type==="redirect")_e(r.location,{invalidateAll:!0},[]);else{const s={form:r.data,page:{...J,form:r.data,status:r.status}};T.$set(s),r.type==="success"&&le().then(Le)}},_start_router:()=>{var r;history.scrollRestoration="manual",addEventListener("beforeunload",s=>{var l;let c=!1;if(!w){const h={from:{params:t.params,route:{id:((l=t.route)==null?void 0:l.id)??null},url:t.url},to:null,willUnload:!0,type:"leave",cancel:()=>c=!0};u.before_navigate.forEach(d=>d(h))}c?(s.preventDefault(),s.returnValue=""):history.scrollRestoration="auto"}),addEventListener("visibilitychange",()=>{if(document.visibilityState==="hidden"){Re(P);try{sessionStorage[tt]=JSON.stringify(te)}catch{}}}),(r=navigator.connection)!=null&&r.saveData||nt(),n.addEventListener("click",s=>{if(s.button||s.which!==1||s.metaKey||s.ctrlKey||s.shiftKey||s.altKey||s.defaultPrevented)return;const c=He(s.composedPath()[0],n);if(!c)return;const{url:l,external:h,has:d}=ve(c,e),m=ce(c);if(!l||!(c instanceof SVGAElement)&&l.protocol!==location.protocol&&!(l.protocol==="https:"||l.protocol==="http:")||d.download)return;if(h||m.reload){Fe({url:l,type:"link"})||s.preventDefault(),w=!0;return}const[v,y]=l.href.split("#");if(y!==void 0&&v===location.href.split("#")[0]){R=!0,Re(P),t.url=l,K.page.set({...J,url:l}),K.page.notify();return}se({url:l,scroll:m.noscroll?fe():null,keepfocus:!1,redirect_chain:[],details:{state:{},replaceState:l.href===location.href},accepted:()=>s.preventDefault(),blocked:()=>s.preventDefault(),type:"link"})}),n.addEventListener("submit",s=>{if(s.defaultPrevented)return;const c=HTMLFormElement.prototype.cloneNode.call(s.target),l=s.submitter;if(((l==null?void 0:l.formMethod)||c.method)!=="get")return;const d=new URL((l==null?void 0:l.hasAttribute("formaction"))&&(l==null?void 0:l.formAction)||c.action);if(We(d,e))return;const m=s.target,{noscroll:b,reload:v}=ce(m);if(v)return;s.preventDefault(),s.stopPropagation();const y=new FormData(m),k=l==null?void 0:l.getAttribute("name");k&&y.append(k,(l==null?void 0:l.getAttribute("value"))??""),d.search=new URLSearchParams(y).toString(),se({url:d,scroll:b?fe():null,keepfocus:!1,redirect_chain:[],details:{state:{},replaceState:!1},nav_token:{},accepted:()=>{},blocked:()=>{},type:"form"})}),addEventListener("popstate",s=>{var c;if((c=s.state)!=null&&c[q]){if(s.state[q]===P)return;const l=s.state[q]-P;se({url:new URL(location.href),scroll:te[s.state[q]],keepfocus:!1,redirect_chain:[],details:null,accepted:()=>{P=s.state[q]},blocked:()=>{history.go(-l)},type:"popstate",delta:l})}}),addEventListener("hashchange",()=>{R&&(R=!1,history.replaceState({...history.state,[q]:++P},"",location.href))});for(const s of document.querySelectorAll("link"))s.rel==="icon"&&(s.href=s.href);addEventListener("pageshow",s=>{s.persisted&&K.navigating.set(null)})},_hydrate:async({status:r=200,error:s,node_ids:c,params:l,route:h,data:d,form:m})=>{f=!0;const b=new URL(location.href);({params:l={},route:h={id:null}}=oe(b,!1)||{});let v;try{const y=c.map(async(k,L)=>{const D=d[L];return ge({loader:he[k],url:b,params:l,route:h,parent:async()=>{const O={};for(let E=0;Ek===h.id)??null})}catch(y){if(y instanceof xe){await ie(new URL(y.location,location.href));return}v=await re({status:y instanceof Ie?y.status:500,error:await x(y,{url:b,params:l,route:h}),url:b,route:h})}Ce(v)}}}async function et(a,e){var u;const n=new URL(a);n.pathname=Rt(a.pathname),n.searchParams.append("x-sveltekit-invalidated",e.map(t=>t?"1":"").join("_"));const i=await pe(n.href),o=await i.json();if(!i.ok)throw new Error(o);return(u=o.nodes)==null||u.forEach(t=>{(t==null?void 0:t.type)==="data"&&(t.data=en(t.data),t.uses={dependencies:new Set(t.uses.dependencies??[]),params:new Set(t.uses.params??[]),parent:!!t.uses.parent,route:!!t.uses.route,url:!!t.uses.url})}),o}function x(a,e){return a instanceof Ie?a.body:zt.handleError({error:a,event:e})??{message:e.route.id!=null?"Internal Error":"Not Found"}}function Le(){const a=document.querySelector("[autofocus]");if(a)a.focus();else{const e=document.body,n=e.getAttribute("tabindex");e.tabIndex=-1,e.focus({preventScroll:!0}),setTimeout(()=>{var i;(i=getSelection())==null||i.removeAllRanges()}),n!==null?e.setAttribute("tabindex",n):e.removeAttribute("tabindex")}}async function rn({env:a,hydrate:e,paths:n,target:i,version:o}){mt(n),gt(o);const u=tn({target:i,base:n.base});_t({client:u}),e?await u._hydrate(e):u.goto(location.href,{replaceState:!0}),u._start_router()}export{rn as start}; diff --git a/spaces/unity/ML-Agents-Worm/style.css b/spaces/unity/ML-Agents-Worm/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/unity/ML-Agents-Worm/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/2 Kids 1 Sandbox Official Video.zip.md b/spaces/usbethFlerru/sovits-modelsV2/example/2 Kids 1 Sandbox Official Video.zip.md deleted file mode 100644 index b54c05a8648810a783ba614d4b9de6ec8ea3fc2c..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/2 Kids 1 Sandbox Official Video.zip.md +++ /dev/null @@ -1,5 +0,0 @@ - -

      A heavy-duty plastic sheet under the sandbox will help reduce moisture build-up and extend the life of the sandbox. Fill the sandbox with 20-30 bags of play sand and let the kids enjoy their new backyard addition!

      -

      2 kids 1 sandbox official video.zip


      Download Filehttps://urlcod.com/2uyUaH



      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Abbyy Flexicapture 10 Crack Torrent [VERIFIED].md b/spaces/usbethFlerru/sovits-modelsV2/example/Abbyy Flexicapture 10 Crack Torrent [VERIFIED].md deleted file mode 100644 index 1078aa243e0e6035a88ec40b02de5f31849ae9f7..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Abbyy Flexicapture 10 Crack Torrent [VERIFIED].md +++ /dev/null @@ -1,6 +0,0 @@ -

      abbyy flexicapture 10 crack torrent


      Download File 🗸 https://urlcod.com/2uyUkV



      -
      - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Bothersome Bullies (Adventures In Odyssey) Mobi Do Alfonso Spider Selva.md b/spaces/usbethFlerru/sovits-modelsV2/example/Bothersome Bullies (Adventures In Odyssey) Mobi Do Alfonso Spider Selva.md deleted file mode 100644 index 4b9a32b7d8ad37ccb6b1da7565a8f61541e3da47..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Bothersome Bullies (Adventures In Odyssey) Mobi Do Alfonso Spider Selva.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Bothersome Bullies (Adventures In Odyssey) Mobi Do alfonso spider selva


      Download ✏ ✏ ✏ https://urlcod.com/2uyU7T



      -
      - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/utec/FedericoRodriguezDetectorSentimentalTwitter/README.md b/spaces/utec/FedericoRodriguezDetectorSentimentalTwitter/README.md deleted file mode 100644 index 5eabe582ab812e91291c3c9cb635b1dc3037d436..0000000000000000000000000000000000000000 --- a/spaces/utec/FedericoRodriguezDetectorSentimentalTwitter/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: FedericoRodriguezDetectorSentimentalTwitter -emoji: 🔥 -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 2.9.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/midas/transforms.py b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/midas/transforms.py deleted file mode 100644 index 350cbc11662633ad7f8968eb10be2e7de6e384e9..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/midas/transforms.py +++ /dev/null @@ -1,234 +0,0 @@ -import numpy as np -import cv2 -import math - - -def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA): - """Rezise the sample to ensure the given size. Keeps aspect ratio. - - Args: - sample (dict): sample - size (tuple): image size - - Returns: - tuple: new size - """ - shape = list(sample["disparity"].shape) - - if shape[0] >= size[0] and shape[1] >= size[1]: - return sample - - scale = [0, 0] - scale[0] = size[0] / shape[0] - scale[1] = size[1] / shape[1] - - scale = max(scale) - - shape[0] = math.ceil(scale * shape[0]) - shape[1] = math.ceil(scale * shape[1]) - - # resize - sample["image"] = cv2.resize( - sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method - ) - - sample["disparity"] = cv2.resize( - sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST - ) - sample["mask"] = cv2.resize( - sample["mask"].astype(np.float32), - tuple(shape[::-1]), - interpolation=cv2.INTER_NEAREST, - ) - sample["mask"] = sample["mask"].astype(bool) - - return tuple(shape) - - -class Resize(object): - """Resize sample to given size (width, height). - """ - - def __init__( - self, - width, - height, - resize_target=True, - keep_aspect_ratio=False, - ensure_multiple_of=1, - resize_method="lower_bound", - image_interpolation_method=cv2.INTER_AREA, - ): - """Init. - - Args: - width (int): desired output width - height (int): desired output height - resize_target (bool, optional): - True: Resize the full sample (image, mask, target). - False: Resize image only. - Defaults to True. - keep_aspect_ratio (bool, optional): - True: Keep the aspect ratio of the input sample. - Output sample might not have the given width and height, and - resize behaviour depends on the parameter 'resize_method'. - Defaults to False. - ensure_multiple_of (int, optional): - Output width and height is constrained to be multiple of this parameter. - Defaults to 1. - resize_method (str, optional): - "lower_bound": Output will be at least as large as the given size. - "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.) - "minimal": Scale as least as possible. (Output size might be smaller than given size.) - Defaults to "lower_bound". - """ - self.__width = width - self.__height = height - - self.__resize_target = resize_target - self.__keep_aspect_ratio = keep_aspect_ratio - self.__multiple_of = ensure_multiple_of - self.__resize_method = resize_method - self.__image_interpolation_method = image_interpolation_method - - def constrain_to_multiple_of(self, x, min_val=0, max_val=None): - y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int) - - if max_val is not None and y > max_val: - y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int) - - if y < min_val: - y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int) - - return y - - def get_size(self, width, height): - # determine new height and width - scale_height = self.__height / height - scale_width = self.__width / width - - if self.__keep_aspect_ratio: - if self.__resize_method == "lower_bound": - # scale such that output size is lower bound - if scale_width > scale_height: - # fit width - scale_height = scale_width - else: - # fit height - scale_width = scale_height - elif self.__resize_method == "upper_bound": - # scale such that output size is upper bound - if scale_width < scale_height: - # fit width - scale_height = scale_width - else: - # fit height - scale_width = scale_height - elif self.__resize_method == "minimal": - # scale as least as possbile - if abs(1 - scale_width) < abs(1 - scale_height): - # fit width - scale_height = scale_width - else: - # fit height - scale_width = scale_height - else: - raise ValueError( - f"resize_method {self.__resize_method} not implemented" - ) - - if self.__resize_method == "lower_bound": - new_height = self.constrain_to_multiple_of( - scale_height * height, min_val=self.__height - ) - new_width = self.constrain_to_multiple_of( - scale_width * width, min_val=self.__width - ) - elif self.__resize_method == "upper_bound": - new_height = self.constrain_to_multiple_of( - scale_height * height, max_val=self.__height - ) - new_width = self.constrain_to_multiple_of( - scale_width * width, max_val=self.__width - ) - elif self.__resize_method == "minimal": - new_height = self.constrain_to_multiple_of(scale_height * height) - new_width = self.constrain_to_multiple_of(scale_width * width) - else: - raise ValueError(f"resize_method {self.__resize_method} not implemented") - - return (new_width, new_height) - - def __call__(self, sample): - width, height = self.get_size( - sample["image"].shape[1], sample["image"].shape[0] - ) - - # resize sample - sample["image"] = cv2.resize( - sample["image"], - (width, height), - interpolation=self.__image_interpolation_method, - ) - - if self.__resize_target: - if "disparity" in sample: - sample["disparity"] = cv2.resize( - sample["disparity"], - (width, height), - interpolation=cv2.INTER_NEAREST, - ) - - if "depth" in sample: - sample["depth"] = cv2.resize( - sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST - ) - - sample["mask"] = cv2.resize( - sample["mask"].astype(np.float32), - (width, height), - interpolation=cv2.INTER_NEAREST, - ) - sample["mask"] = sample["mask"].astype(bool) - - return sample - - -class NormalizeImage(object): - """Normlize image by given mean and std. - """ - - def __init__(self, mean, std): - self.__mean = mean - self.__std = std - - def __call__(self, sample): - sample["image"] = (sample["image"] - self.__mean) / self.__std - - return sample - - -class PrepareForNet(object): - """Prepare sample for usage as network input. - """ - - def __init__(self): - pass - - def __call__(self, sample): - image = np.transpose(sample["image"], (2, 0, 1)) - sample["image"] = np.ascontiguousarray(image).astype(np.float32) - - if "mask" in sample: - sample["mask"] = sample["mask"].astype(np.float32) - sample["mask"] = np.ascontiguousarray(sample["mask"]) - - if "disparity" in sample: - disparity = sample["disparity"].astype(np.float32) - sample["disparity"] = np.ascontiguousarray(disparity) - - if "depth" in sample: - depth = sample["depth"].astype(np.float32) - sample["depth"] = np.ascontiguousarray(depth) - - return sample diff --git a/spaces/vict0rsch/climateGAN/climategan/painter.py b/spaces/vict0rsch/climateGAN/climategan/painter.py deleted file mode 100644 index 739ec2b1bda94a7b37ea17b5d757e009255bd312..0000000000000000000000000000000000000000 --- a/spaces/vict0rsch/climateGAN/climategan/painter.py +++ /dev/null @@ -1,171 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -import climategan.strings as strings -from climategan.blocks import InterpolateNearest2d, SPADEResnetBlock -from climategan.norms import SpectralNorm - - -def create_painter(opts, no_init=False, verbose=0): - if verbose > 0: - print(" - Add PainterSpadeDecoder Painter") - return PainterSpadeDecoder(opts) - - -class PainterSpadeDecoder(nn.Module): - def __init__(self, opts): - """Create a SPADE-based decoder, which forwards z and the conditioning - tensors seg (in the original paper, conditioning is on a semantic map only). - All along, z is conditioned on seg. First 3 SpadeResblocks (SRB) do not shrink - the channel dimension, and an upsampling is applied after each. Therefore - 2 upsamplings at this point. Then, for each remaining upsamplings - (w.r.t. spade_n_up), the SRB shrinks channels by 2. Before final conv to get 3 - channels, the number of channels is therefore: - final_nc = channels(z) * 2 ** (spade_n_up - 2) - Args: - latent_dim (tuple): z's shape (only the number of channels matters) - cond_nc (int): conditioning tensor's expected number of channels - spade_n_up (int): Number of total upsamplings from z - spade_use_spectral_norm (bool): use spectral normalization? - spade_param_free_norm (str): norm to use before SPADE de-normalization - spade_kernel_size (int): SPADE conv layers' kernel size - Returns: - [type]: [description] - """ - super().__init__() - - latent_dim = opts.gen.p.latent_dim - cond_nc = 3 - spade_n_up = opts.gen.p.spade_n_up - spade_use_spectral_norm = opts.gen.p.spade_use_spectral_norm - spade_param_free_norm = opts.gen.p.spade_param_free_norm - spade_kernel_size = 3 - - self.z_nc = latent_dim - self.spade_n_up = spade_n_up - - self.z_h = self.z_w = None - - self.fc = nn.Conv2d(3, latent_dim, 3, padding=1) - self.head_0 = SPADEResnetBlock( - self.z_nc, - self.z_nc, - cond_nc, - spade_use_spectral_norm, - spade_param_free_norm, - spade_kernel_size, - ) - - self.G_middle_0 = SPADEResnetBlock( - self.z_nc, - self.z_nc, - cond_nc, - spade_use_spectral_norm, - spade_param_free_norm, - spade_kernel_size, - ) - self.G_middle_1 = SPADEResnetBlock( - self.z_nc, - self.z_nc, - cond_nc, - spade_use_spectral_norm, - spade_param_free_norm, - spade_kernel_size, - ) - - self.up_spades = nn.Sequential( - *[ - SPADEResnetBlock( - self.z_nc // 2 ** i, - self.z_nc // 2 ** (i + 1), - cond_nc, - spade_use_spectral_norm, - spade_param_free_norm, - spade_kernel_size, - ) - for i in range(spade_n_up - 2) - ] - ) - - self.final_nc = self.z_nc // 2 ** (spade_n_up - 2) - - self.final_spade = SPADEResnetBlock( - self.final_nc, - self.final_nc, - cond_nc, - spade_use_spectral_norm, - spade_param_free_norm, - spade_kernel_size, - ) - self.final_shortcut = None - if opts.gen.p.use_final_shortcut: - self.final_shortcut = nn.Sequential( - *[ - SpectralNorm(nn.Conv2d(self.final_nc, 3, 1)), - nn.BatchNorm2d(3), - nn.LeakyReLU(0.2, True), - ] - ) - - self.conv_img = nn.Conv2d(self.final_nc, 3, 3, padding=1) - - self.upsample = InterpolateNearest2d(scale_factor=2) - - def set_latent_shape(self, shape, is_input=True): - """ - Sets the latent shape to start the upsampling from, i.e. z_h and z_w. - If is_input is True, then this is the actual input shape which should - be divided by 2 ** spade_n_up - Otherwise, just sets z_h and z_w from shape[-2] and shape[-1] - - Args: - shape (tuple): The shape to start sampling from. - is_input (bool, optional): Whether to divide shape by 2 ** spade_n_up - """ - if isinstance(shape, (list, tuple)): - self.z_h = shape[-2] - self.z_w = shape[-1] - elif isinstance(shape, int): - self.z_h = self.z_w = shape - else: - raise ValueError("Unknown shape type:", shape) - - if is_input: - self.z_h = self.z_h // (2 ** self.spade_n_up) - self.z_w = self.z_w // (2 ** self.spade_n_up) - - def _apply(self, fn): - # print("Applying SpadeDecoder", fn) - super()._apply(fn) - # self.head_0 = fn(self.head_0) - # self.G_middle_0 = fn(self.G_middle_0) - # self.G_middle_1 = fn(self.G_middle_1) - # for i, up in enumerate(self.up_spades): - # self.up_spades[i] = fn(up) - # self.conv_img = fn(self.conv_img) - return self - - def forward(self, z, cond): - if z is None: - assert self.z_h is not None and self.z_w is not None - z = self.fc(F.interpolate(cond, size=(self.z_h, self.z_w))) - y = self.head_0(z, cond) - y = self.upsample(y) - y = self.G_middle_0(y, cond) - y = self.upsample(y) - y = self.G_middle_1(y, cond) - - for i, up in enumerate(self.up_spades): - y = self.upsample(y) - y = up(y, cond) - - if self.final_shortcut is not None: - cond = self.final_shortcut(y) - y = self.final_spade(y, cond) - y = self.conv_img(F.leaky_relu(y, 2e-1)) - y = torch.tanh(y) - return y - - def __str__(self): - return strings.spadedecoder(self) diff --git a/spaces/vincentmin/TalkToMe/app.py b/spaces/vincentmin/TalkToMe/app.py deleted file mode 100644 index 1a73896c450029297a8d72bd61598c6cafcb51d0..0000000000000000000000000000000000000000 --- a/spaces/vincentmin/TalkToMe/app.py +++ /dev/null @@ -1,220 +0,0 @@ -import argparse -import os -import requests - -import gradio as gr - -INTRO = """**Chat with Yoda, Albert Einstein, Elon Musk or Kanye West!** - -✨ This demo is powered by HuggingFace Inference API and currently the models [starchat-beta](https://huggingface.co/HuggingFaceH4/starchat-beta) and [falcon-7b](https://huggingface.co/tiiuae/falcon-7b-instruct) are supported. This demo is based on the [falcon-chat demo](https://huggingface.co/spaces/HuggingFaceH4/falcon-chat) by the [HuggingFace H4 team](https://huggingface.co/HuggingFaceH4); major props to them! - -🧪 With this demo you can talk to some of your favorite characters and also play with some very powerful models. Although not as powerful as some 40B+ models, the 7B Falcon model and 15.5B starchat-beta models are great chat companions. We intend to add more characters and models in the future. - -👀 **Learn more about Falcon LLM:** [falconllm.tii.ae](https://falconllm.tii.ae/) - -👀 **Learn more about Starchat LLM:** [starchat-alpha](https://huggingface.co/blog/starchat-alpha) - -👀 **Banner images were created with [stable diffusion web](https://stablediffusionweb.com/).** - -➡️️ **Intended Use**: this demo is intended to be a fun showcase of what one can do with HuggingFace Inference API and recent chat models. - -⚠️ **Limitations**: the model can and will produce factually incorrect information, hallucinating facts and actions. As it has not undergone any advanced tuning/alignment, it can produce problematic outputs, especially if prompted to do so. Finally, this demo is limited to a session length of about 1,000 words. -""" -MODELS = [ - "HuggingFaceH4/starchat-beta", - "tiiuae/falcon-7b-instruct", -] -HEADERS = {"Authorization": f"Bearer {os.environ['HUB_TOKEN']}"} -TITLE = """

      🚀 TalkToMe

      """ -USER_NAME = "User" -INSTRUCTIONS_MAPPING = { - "Albert Einstein": "The following is a conversation between the highly knowledgeable and intelligent scientist Albert Einstein, and a human user, called User. In the following interactions, User and Albert Einstein will converse in natural language, and Albert Einstein will answer User's questions. Albert Einstein is always eloquent, respectful, polite and inclusive. Albert Einstein invented the theory of Relativity and made important contributions to the theory of Quantum Mechanics. Albert Einstein will never decline to answer a question, and always attempts to give an answer that User would be satisfied with. Albert Einstein knows a lot, and always tells the truth. The conversation begins.\n", - "Yoda": "The following is a conversation between the highly knowledgeable and intelligent Yoda from Star Wars, and a human user, called User. In the following interactions, User and Yoda will converse in natural language, and Yoda will answer User's questions. Yoda is respectful, polite and inclusive. Yoda is a wise and powerful Jedi Master from the Star Wars universe who speaks as follows: `Speak you must, in his unique and distinctive manner, with wisdom and knowledge to share.`, `Reversed syntax and short phrases, you shall use.`, `May the Force be with you, young Padawan.`. The conversation begins.\n", - "Elon Musk": "The following is a conversation between entrepeneur and multi-billionair Elon Musk, and a human user, called User. In the following interactions, User and Elon Musk will converse in natural language, and Elon Musk will answer User's questions. Elon Musk is self-centered, arrogant and has a great for business development. Elon Musk owns the electric car company Tesla, the spacecraft engeneering company SpaceX and bought the social media company Twitter. The conversation begins.\n", - "Kanye West": "The following is a conversation between rapper Kanye West, and a human user, called User. In the following interactions, User and Kanye West will converse in natural language, and Kanye West will answer User's questions. Kanye West is self-centered, arrogant, a self-proclaimed genius and a great musician. Kanye West interrupted an award ceremony for Taylor Swift and ran for president of the united states. The conversation begins.\n", -} -RETRY_COMMAND = "/retry" -STOP_SEQ = [f"\n{USER_NAME}", "<|end|>"] - -def run_model(prompt, model, temperature, top_p): - try: - api_url = f"https://api-inference.huggingface.co/models/{model}" - payload = { - "inputs": prompt, - "parameters": { - "max_new_tokens": 128, - "do_sample": True, - "temperature": temperature, - "top_p": top_p - } - } - response = requests.post(api_url, headers=HEADERS, json=payload) - return response.json()[0]['generated_text'] - except: - return "I'm sorry, the model is not available right now. Please try again later." - -def get_stream(string: str): - return enumerate(iter(string.split(" "))) - -def parameter_accordion(): - with gr.Accordion("Parameters", open=False): - model = gr.Radio( - choices = MODELS, - value = MODELS[0], - interactive=True, - label="Model", - ) - temperature = gr.Slider( - minimum=0.1, - maximum=2.0, - value=0.8, - step=0.1, - interactive=True, - label="Temperature", - ) - top_p = gr.Slider( - minimum=0.1, - maximum=0.99, - value=0.9, - step=0.01, - interactive=True, - label="p (nucleus sampling)", - ) - return model, temperature, top_p - - -def format_chat_prompt(message: str, chat_history, bot_name: str) -> str: - instructions = INSTRUCTIONS_MAPPING[bot_name].strip(" ").strip("\n") - prompt = instructions - for turn in chat_history: - user_message, bot_message = turn - prompt = f"{prompt}\n{USER_NAME}: {user_message}\n{bot_name}: {bot_message}" - prompt = f"{prompt}\n{USER_NAME}: {message}\n{bot_name}:" - return prompt - - -def chat(): - gr.HTML(TITLE) - with gr.Row(): - with gr.Column(): - banner = gr.Image("Albert Einstein.jpeg", elem_id="banner-image", show_label=False) - with gr.Column(): - gr.Markdown(INTRO) - - with gr.Row(elem_id="param_container"): - with gr.Column(): - model, temperature, top_p = parameter_accordion() - with gr.Column(): - with gr.Accordion("Character", open=True): - choices = list(INSTRUCTIONS_MAPPING) - bot_name = gr.Radio( - choices=choices, - value=choices[0], - interactive=True, - label="Character", - ) - bot_name.change(fn=lambda value: gr.update(value=f"{value}.jpeg"), inputs=bot_name, outputs=banner) - - with gr.Column(elem_id="chat_container"): - with gr.Row(): - chatbot = gr.Chatbot(elem_id="chatbot") - with gr.Row(): - inputs = gr.Textbox( - placeholder=f"Hi there! Tell me something about yourself.", - label="Type an input and press Enter", - max_lines=3, - ) - - with gr.Row(elem_id="button_container"): - with gr.Column(): - retry_button = gr.Button("♻️ Retry last turn") - with gr.Column(): - delete_turn_button = gr.Button("🧽 Delete last turn") - with gr.Column(): - clear_chat_button = gr.Button("✨ Delete all history") - - gr.Examples( - [ - ["Hi Albert! Why did the apple fall from the tree?"], - ["Hi Yoda! How do I learn the force?"], - ["Hi Elon! Give me an idea for a new startup."], - ["Hi Kanye! What will be the theme of your next album?"], - ], - inputs=inputs, - label="Click on any example and press Enter in the input textbox!", - ) - - def run_chat(message: str, chat_history, bot_name: str, model: str, temperature: float, top_p: float): - if not message or (message == RETRY_COMMAND and len(chat_history) == 0): - yield chat_history - return - - if message == RETRY_COMMAND and chat_history: - prev_turn = chat_history.pop(-1) - user_message, _ = prev_turn - message = user_message - - prompt = format_chat_prompt(message, chat_history, bot_name) - model_output = run_model( - prompt, - model=model, - temperature=temperature, - top_p=top_p, - ) - model_output = model_output[len(prompt):] - for stop in STOP_SEQ: - model_output = model_output.split(stop)[0] - chat_history = chat_history + [[message, model_output]] - print(f"User: {message}") - print(f"{bot_name}: {model_output}") - yield chat_history - return - - def delete_last_turn(chat_history): - if chat_history: - chat_history.pop(-1) - return {chatbot: gr.update(value=chat_history)} - - def run_retry(message: str, chat_history, bot_name, model: str, temperature: float, top_p: float): - yield from run_chat(RETRY_COMMAND, chat_history, bot_name, model, temperature, top_p) - - def clear_chat(): - return [] - - inputs.submit( - run_chat, - [inputs, chatbot, bot_name, model, temperature, top_p], - outputs=[chatbot], - show_progress=False, - ) - inputs.submit(lambda: "", inputs=None, outputs=inputs) - delete_turn_button.click(delete_last_turn, inputs=[chatbot], outputs=[chatbot]) - retry_button.click( - run_retry, - [inputs, chatbot, bot_name, model, temperature, top_p], - outputs=[chatbot], - show_progress=False, - ) - clear_chat_button.click(clear_chat, [], chatbot) - - -def get_demo(): - with gr.Blocks( - # css=None - # css="""#chat_container {width: 700px; margin-left: auto; margin-right: auto;} - # #button_container {width: 700px; margin-left: auto; margin-right: auto;} - # #param_container {width: 700px; margin-left: auto; margin-right: auto;}""" - css="""#chatbot { - font-size: 14px; - min-height: 300px; -}""" - ) as demo: - chat() - - return demo - - -if __name__ == "__main__": - demo = get_demo() - demo.queue(max_size=128, concurrency_count=16) - demo.launch() diff --git a/spaces/vinthony/SadTalker/src/face3d/models/arcface_torch/inference.py b/spaces/vinthony/SadTalker/src/face3d/models/arcface_torch/inference.py deleted file mode 100644 index 3e5156e8d649954837e397c2ff15ec29995e7502..0000000000000000000000000000000000000000 --- a/spaces/vinthony/SadTalker/src/face3d/models/arcface_torch/inference.py +++ /dev/null @@ -1,35 +0,0 @@ -import argparse - -import cv2 -import numpy as np -import torch - -from backbones import get_model - - -@torch.no_grad() -def inference(weight, name, img): - if img is None: - img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.uint8) - else: - img = cv2.imread(img) - img = cv2.resize(img, (112, 112)) - - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img = np.transpose(img, (2, 0, 1)) - img = torch.from_numpy(img).unsqueeze(0).float() - img.div_(255).sub_(0.5).div_(0.5) - net = get_model(name, fp16=False) - net.load_state_dict(torch.load(weight)) - net.eval() - feat = net(img).numpy() - print(feat) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='PyTorch ArcFace Training') - parser.add_argument('--network', type=str, default='r50', help='backbone network') - parser.add_argument('--weight', type=str, default='') - parser.add_argument('--img', type=str, default=None) - args = parser.parse_args() - inference(args.weight, args.network, args.img) diff --git a/spaces/vulkano/yulet1de-hentaidiffusion/app.py b/spaces/vulkano/yulet1de-hentaidiffusion/app.py deleted file mode 100644 index edf0803cbdf9a26a10899d5021088c3d80eec76d..0000000000000000000000000000000000000000 --- a/spaces/vulkano/yulet1de-hentaidiffusion/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/yulet1de/hentaidiffusion").launch() \ No newline at end of file diff --git a/spaces/vumichien/Generate_human_motion/VQ-Trans/dataset/dataset_TM_train.py b/spaces/vumichien/Generate_human_motion/VQ-Trans/dataset/dataset_TM_train.py deleted file mode 100644 index 0b0223effb01c1cf57fa6b2b6fb8d9d01b83f84a..0000000000000000000000000000000000000000 --- a/spaces/vumichien/Generate_human_motion/VQ-Trans/dataset/dataset_TM_train.py +++ /dev/null @@ -1,161 +0,0 @@ -import torch -from torch.utils import data -import numpy as np -from os.path import join as pjoin -import random -import codecs as cs -from tqdm import tqdm -import utils.paramUtil as paramUtil -from torch.utils.data._utils.collate import default_collate - - -def collate_fn(batch): - batch.sort(key=lambda x: x[3], reverse=True) - return default_collate(batch) - - -'''For use of training text-2-motion generative model''' -class Text2MotionDataset(data.Dataset): - def __init__(self, dataset_name, feat_bias = 5, unit_length = 4, codebook_size = 1024, tokenizer_name=None): - - self.max_length = 64 - self.pointer = 0 - self.dataset_name = dataset_name - - self.unit_length = unit_length - # self.mot_start_idx = codebook_size - self.mot_end_idx = codebook_size - self.mot_pad_idx = codebook_size + 1 - if dataset_name == 't2m': - self.data_root = './dataset/HumanML3D' - self.motion_dir = pjoin(self.data_root, 'new_joint_vecs') - self.text_dir = pjoin(self.data_root, 'texts') - self.joints_num = 22 - radius = 4 - fps = 20 - self.max_motion_length = 26 if unit_length == 8 else 51 - dim_pose = 263 - kinematic_chain = paramUtil.t2m_kinematic_chain - elif dataset_name == 'kit': - self.data_root = './dataset/KIT-ML' - self.motion_dir = pjoin(self.data_root, 'new_joint_vecs') - self.text_dir = pjoin(self.data_root, 'texts') - self.joints_num = 21 - radius = 240 * 8 - fps = 12.5 - dim_pose = 251 - self.max_motion_length = 26 if unit_length == 8 else 51 - kinematic_chain = paramUtil.kit_kinematic_chain - - split_file = pjoin(self.data_root, 'train.txt') - - - id_list = [] - with cs.open(split_file, 'r') as f: - for line in f.readlines(): - id_list.append(line.strip()) - - new_name_list = [] - data_dict = {} - for name in tqdm(id_list): - try: - m_token_list = np.load(pjoin(self.data_root, tokenizer_name, '%s.npy'%name)) - - # Read text - with cs.open(pjoin(self.text_dir, name + '.txt')) as f: - text_data = [] - flag = False - lines = f.readlines() - - for line in lines: - try: - text_dict = {} - line_split = line.strip().split('#') - caption = line_split[0] - t_tokens = line_split[1].split(' ') - f_tag = float(line_split[2]) - to_tag = float(line_split[3]) - f_tag = 0.0 if np.isnan(f_tag) else f_tag - to_tag = 0.0 if np.isnan(to_tag) else to_tag - - text_dict['caption'] = caption - text_dict['tokens'] = t_tokens - if f_tag == 0.0 and to_tag == 0.0: - flag = True - text_data.append(text_dict) - else: - m_token_list_new = [tokens[int(f_tag*fps/unit_length) : int(to_tag*fps/unit_length)] for tokens in m_token_list if int(f_tag*fps/unit_length) < int(to_tag*fps/unit_length)] - - if len(m_token_list_new) == 0: - continue - new_name = '%s_%f_%f'%(name, f_tag, to_tag) - - data_dict[new_name] = {'m_token_list': m_token_list_new, - 'text':[text_dict]} - new_name_list.append(new_name) - except: - pass - - if flag: - data_dict[name] = {'m_token_list': m_token_list, - 'text':text_data} - new_name_list.append(name) - except: - pass - self.data_dict = data_dict - self.name_list = new_name_list - - def __len__(self): - return len(self.data_dict) - - def __getitem__(self, item): - data = self.data_dict[self.name_list[item]] - m_token_list, text_list = data['m_token_list'], data['text'] - m_tokens = random.choice(m_token_list) - - text_data = random.choice(text_list) - caption= text_data['caption'] - - - coin = np.random.choice([False, False, True]) - # print(len(m_tokens)) - if coin: - # drop one token at the head or tail - coin2 = np.random.choice([True, False]) - if coin2: - m_tokens = m_tokens[:-1] - else: - m_tokens = m_tokens[1:] - m_tokens_len = m_tokens.shape[0] - - if m_tokens_len+1 < self.max_motion_length: - m_tokens = np.concatenate([m_tokens, np.ones((1), dtype=int) * self.mot_end_idx, np.ones((self.max_motion_length-1-m_tokens_len), dtype=int) * self.mot_pad_idx], axis=0) - else: - m_tokens = np.concatenate([m_tokens, np.ones((1), dtype=int) * self.mot_end_idx], axis=0) - - return caption, m_tokens.reshape(-1), m_tokens_len - - - - -def DATALoader(dataset_name, - batch_size, codebook_size, tokenizer_name, unit_length=4, - num_workers = 8) : - - train_loader = torch.utils.data.DataLoader(Text2MotionDataset(dataset_name, codebook_size = codebook_size, tokenizer_name = tokenizer_name, unit_length=unit_length), - batch_size, - shuffle=True, - num_workers=num_workers, - #collate_fn=collate_fn, - drop_last = True) - - - return train_loader - - -def cycle(iterable): - while True: - for x in iterable: - yield x - - diff --git a/spaces/wadhwani-ai/KKMS-Smart-Search-Demo/src/langchain_utils.py b/spaces/wadhwani-ai/KKMS-Smart-Search-Demo/src/langchain_utils.py deleted file mode 100644 index 90ac08684e1adc36d9ff5b668f10b0e8c7d3883a..0000000000000000000000000000000000000000 --- a/spaces/wadhwani-ai/KKMS-Smart-Search-Demo/src/langchain_utils.py +++ /dev/null @@ -1,1010 +0,0 @@ -import src.constants as constants_utils -import src.data_loader as data_loader_utils -import src.utils as utils - -from langchain.llms import OpenAI -from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter -from langchain.chains.summarize import load_summarize_chain -from langchain.docstore.document import Document -from langchain.embeddings.openai import OpenAIEmbeddings -import openai -from langchain.vectorstores import Chroma -import chromadb -from langchain.chains.question_answering import load_qa_chain -from langchain.chains.qa_with_sources import load_qa_with_sources_chain -from langchain.prompts import PromptTemplate -from llama_index import GPTVectorStoreIndex, GPTListIndex -from langchain.vectorstores import FAISS - -import pickle -import shutil -from typing import Dict, List, Optional -import pandas as pd -from datetime import datetime -import os -os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY') - -import logging -logging.basicConfig( - format="%(asctime)s %(levelname)s [%(name)s] %(message)s", - level=logging.INFO, - datefmt="%Y-%m-%d %H:%M:%S" -) -logger = logging.getLogger(__name__) - -import warnings -warnings.filterwarnings('ignore') - - - -class LANGCHAIN_UTILS: - def __init__(self, - index_type=constants_utils.INDEX_TYPE, - load_from_existing_index_store=constants_utils.LOAD_FROM_EXISTING_INDEX_STORE - ): - self.index_type = index_type - self.load_from_existing_index_store = load_from_existing_index_store - - # Temporary index in the current context for the doc_type in consideration - self.index = None - # Master index which contains data from multiple sources (PDF, Online PDF, Text files, URLs, etc. It gets updated on Uploading the data from new files/urls without downtime of the application on-demand.) - self.master_index = None - - # Data source wise index - self.index_category_doc_type_wise_index = dict( - (ic, dict( - (ds, None) for ds in list(constants_utils.DATA_SOURCES.values())) - ) for ic in constants_utils.INDEX_CATEGORY) - # Initialize master index for each INDEX_CATEGORY - for ic in constants_utils.INDEX_CATEGORY: - self.index_category_doc_type_wise_index[ic][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE] = None - - # Data loaded as a Document format in the current context for the doc_type in consideration - self.documents = [] - - # Instantiate data_loader_utils class object - self.data_loader_utils_obj = data_loader_utils.DATA_LOADER() - # Instantiate UTILS class object - self.utils_obj = utils.UTILS() - - # Initialize embeddings (we can also use other embeddings) - self.embeddings = OpenAIEmbeddings(openai_api_key=os.getenv('OPENAI_API_KEY')) - # Initialize LLM model - self.llm = OpenAI( - temperature=0, - max_tokens=constants_utils.LLM_RESPONSE_MAX_TOKENS, - model_name=constants_utils.LLM_BASE_MODEL_NAME - ) - - # Global history for AgGPT widget - self.global_history = [ - { - "role": "assistant", - "content": "Hi, I am a chatbot. I can converse in English. I can answer your questions about farming in India. Ask me anything!" - } - ] - - # Index category - doc_type wise data sources to display in widget - self.index_category_doc_type_wise_data_sources = {} - - - def user( - self, - user_message, - history - ): - history = history + [[user_message, None]] - self.global_history = self.global_history + [{"role": "user", "content": user_message}] - return "", history - - - def get_chatgpt_response( - self, - history - ): - output = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=history) - history.append({"role": "assistant", "content": output.choices[0].message.content}) - return output.choices[0].message.content, history - - - def bot( - self, - history - ): - response, self.global_history = self.get_chatgpt_response(self.global_history) - history[-1][1] = response - return history - - - def clear_history( - self, - lang="English" - ): - self.global_history = [{"role": "assistant", "content": "Hi, I am a chatbot. I can converse in {}. I can answer your questions about farming in India. Ask me anything!".format(lang)}] - return None - - - def generate_prompt_template( - self, - prompt_type, - input_variables - ): - prompt_template = '' - - if prompt_type == 'summarize': - prompt_template = """Write a concise summary of the following: - - {text} - - SUMMARIZE IN ENGLISH:""" - - elif prompt_type == 'qa': - prompt_template = """You are a helpful AI assistant. Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context. - - {context} - - Question: {question} - - Answer in English:""" - - # Working good, but truncated answer - prompt_template = """You are a helpful AI assistant. Use the following pieces of context to answer the question at the end. Start the answer by giving short summary and write the answer starting with Here are some of the key points:. Write each sentence separately with numbering. If you don't know the answer, just say that you don't know, don't try to make up an answer. If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context. - - {context} - - Question: {question} - - Answer in English:""" - - - prompt_template = """You are a helpful AI assistant. Use the following pieces of context to answer the question comprehensively at the end. Start the answer by giving short summary and write the answer starting with Here are some of the key points:. Write each sentence separately with numbering. If you don't know the answer, just say that you don't know, don't try to make up an answer. If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context. - - {context} - - Question: {question} - - Answer in English:""" - - elif prompt_type == 'weather': - prompt_template = """ - What would be the weather based on the below data: - - {text} - """ - - PROMPT = PromptTemplate(template=prompt_template, input_variables=input_variables) - return PROMPT - - - def get_textual_summary( - self, - text, - chain_type="stuff", - custom_prompt=True, - prompt_type='summarize' - ): - texts = [text] - docs = [Document(page_content=t) for t in texts[:3]] - - if custom_prompt: - PROMPT = self.generate_prompt_template( - prompt_type=prompt_type, - input_variables=["text"] - ) - chain = load_summarize_chain(self.llm, chain_type=chain_type, prompt=PROMPT) - else: - chain = load_summarize_chain(self.llm, chain_type=chain_type) - - text_summary = chain.run(docs) - return text_summary - - - def get_weather_forecast_summary( - self, - text, - chain_type="stuff" - ): - text = f""" - What would be the weather based on the below data: - {text} - - Give simple response without technical numbers which can be explained to human. - """ - texts = [text] - docs = [Document(page_content=t) for t in texts[:3]] - - chain = load_summarize_chain(self.llm, chain_type=chain_type) - text_summary = chain.run(docs) - - return text_summary - - - def get_answer_from_para( - self, - para, - question, - chain_type="stuff", - custom_prompt=True, - prompt_type='qa' - ): - # Prepare data (Split paragraph into chunks of small documents) - text_splitter = CharacterTextSplitter( - chunk_size=constants_utils.TEXT_SPLITTER_CHUNK_SIZE, - chunk_overlap=constants_utils.TEXT_SPLITTER_CHUNK_OVERLAP, - separator=constants_utils.TEXT_SPLITTER_SEPARATOR - ) - texts = text_splitter.split_text(para) - - if self.index_type == 'FAISS': - # Find similar docs that are relevant to the question - docsearch = FAISS.from_texts( - texts, self.embeddings, - metadatas=[{"source": str(i+1)} for i in range(len(texts))] - ) - - elif self.index_type == 'Chroma': - # Find similar docs that are relevant to the question - docsearch = Chroma.from_texts( - texts, self.embeddings, - metadatas=[{"source": str(i+1)} for i in range(len(texts))] - ) - - # Search for the similar docs - docs = docsearch.similarity_search(question, k=constants_utils.ANSWER_SIMILARITY_TOP_K) - - # Create a Chain for question answering - if custom_prompt: - PROMPT = self.generate_prompt_template( - prompt_type=prompt_type, - input_variables=["context", "question"] - ) - chain = load_qa_chain(self.llm, chain_type=chain_type, prompt=PROMPT) - else: - # chain = load_qa_with_sources_chain(self.llm, chain_type=chain_type) - chain = load_qa_chain(self.llm, chain_type=chain_type) - # chain.run(input_documents=docs, question=question) - - out_dict = chain({"input_documents": docs, "question": question}, return_only_outputs=True) - return out_dict['output_text'] - - - def load_documents( - self, - doc_type, - doc_filepath='', - urls=[] - ): - """ - Load data in Document format of the given doc_type from either doc_filepath or list of urls. - It can load multiple files/urls in one shot. - - Args: - doc_type: can be any of [pdf, online_pdf, urls, textfile] - doc_filepath: can be a directory or a filepath - urls: list of urls - """ - - logger.info(f'Loading {doc_type} data into Documents format') - - if doc_type == 'pdf': - # Load data from PDFs stored in local directory - self.documents.extend( - self.data_loader_utils_obj.load_documents_from_pdf( - doc_filepath=doc_filepath, - doc_type=doc_type - )) - - elif doc_type == 'online_pdf': - # Load data from PDFs stored in local directory - self.documents.extend( - self.data_loader_utils_obj.load_documents_from_pdf( - urls=urls, - doc_type=doc_type - )) - - elif doc_type == 'urls': - # Load data from URLs - self.documents.extend( - self.data_loader_utils_obj.load_documents_from_urls( - urls=urls, - doc_type=doc_type - )) - - elif doc_type == 'textfile': - # Load data from text files & Convert texts into Document format - self.documents.extend( - self.data_loader_utils_obj.load_documents_from_text( - doc_filepath=doc_filepath, - doc_type=doc_type - )) - - elif doc_type == 'directory': - # Load data from local directory - self.documents.extend( - self.data_loader_utils_obj.load_documents_from_directory( - doc_filepath=doc_filepath, - doc_type=doc_type - )) - - logger.info(f'{doc_type} data into Documents format loaded successfully!') - - - def create_index( - self - ): - if not self.documents: - logger.warning(f'Empty documents. Index cannot be created!') - return None - - logger.info(f'Creating index') - - text_splitter = CharacterTextSplitter( - chunk_size=constants_utils.TEXT_SPLITTER_CHUNK_SIZE, - chunk_overlap=constants_utils.TEXT_SPLITTER_CHUNK_OVERLAP, - separator=constants_utils.TEXT_SPLITTER_SEPARATOR - ) - self.documents = text_splitter.split_documents(self.documents) - - ############## Build the Vector store for docs ############## - # Vector store using Facebook AI Similarity Search - if self.index_type == 'FAISS': - self.index = FAISS.from_documents( - self.documents, - self.embeddings - ) - - # Vector store using Chroma DB - elif self.index_type == 'Chroma': - if not os.path.exists(self.index_filepath): - os.makedirs(self.index_filepath) - - self.index = Chroma.from_documents( - self.documents, - self.embeddings, - persist_directory=self.index_filepath - ) - - # Vector store using GPT vector index - elif self.index_type == 'GPTVectorStoreIndex': - self.index = GPTVectorStoreIndex.from_documents(self.documents) - - logger.info(f'Index created successfully!') - return self.index - - - def get_index_filepath( - self, - index_category, - doc_type - ): - if doc_type == 'master': - self.index_filepath = os.path.join( - constants_utils.OUTPUT_PATH, f'index_{index_category}') if self.index_type in ['FAISS', 'Chroma'] else os.path.join(constants_utils.OUTPUT_PATH, f'index_{index_category}.json') - else: - self.index_filepath = os.path.join( - constants_utils.OUTPUT_PATH, f'index_{index_category}', f'index_{doc_type}') if self.index_type in ['FAISS', 'Chroma'] else os.path.join(constants_utils.OUTPUT_PATH, f'index_{index_category}', f'index_{doc_type}.json') - - return self.index_filepath - - - def load_master_doctype_indices_for_index_category( - self, - index_category - ): - logger.info(f'Loading master and doc_type indices for: {index_category}') - - # Set master index of index_category = None - self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE] = None - - for doc_type in self.index_category_doc_type_wise_index[index_category].keys(): - self.index = None - self.index_filepath = self.get_index_filepath( - index_category=index_category, - doc_type=doc_type - ) - self.load_index() - # Set master/doc_type index - self.index_category_doc_type_wise_index[index_category][doc_type] = self.index - - logger.info(f'Master and doc_type indices for: {index_category} loaded successfully!') - - - def load_create_index( - self - ): - logger.info(f'Loading/Creating index for each index_category') - - for index_category in constants_utils.INDEX_CATEGORY: - # Load master index_category index if self.load_from_existing_index_store == True - if self.load_from_existing_index_store: - self.load_master_doctype_indices_for_index_category(index_category) - - # For any reason, if master index is not loaded then create the new index/vector store - if not self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE]: - logger.info(f'Creating a new Vector/Index store for: {index_category}') - - doc_filepath = os.path.join(constants_utils.DATA_PATH, index_category) - urls = [] - - # Build the Vector/Index store - for doc_type in list(constants_utils.DATA_SOURCES.values()): - logger.info(f'Creating a new Vector/Index store for: {index_category} from data source: {doc_type}') - - index = None - if doc_type in ['pdf', 'textfile']: - index = self.create_store_index( - doc_type=doc_type, - doc_filepath=doc_filepath, - index_category=index_category - ) - else: - # Build the Vector/Index store from web urls - index = self.create_store_index( - doc_type=doc_type, - urls=urls, - index_category=index_category - ) - - if index: - self.index_category_doc_type_wise_index[index_category][doc_type] = index - - logger.info(f'New Vector/Index store for: {index_category} from data source: {doc_type} created successfully!') - - logger.info(f'New Vector/Index store for: {index_category} created successfully!') - - # Merge index of each doc_type into a single index_category - self.merge_store_master_index( - index_category=index_category - ) - - logger.info(f'Index for each index_category loaded successfully!') - - - def create_store_index( - self, - doc_type='pdf', - doc_filepath=constants_utils.DATA_PATH, - urls=[], - index_category=constants_utils.INDEX_CATEGORY[0] - ): - logger.info(f'Creating and storing {doc_type} index') - - self.documents = [] - self.index = None - - self.index_filepath = self.get_index_filepath( - index_category=index_category, - doc_type=doc_type - ) - - # Delete the old index file - shutil.rmtree(self.index_filepath, ignore_errors=True) - logger.info(f'{self.index_filepath} deleted.') - - # Load data in Documents format that can be consumed for index creation - self.load_documents( - doc_type, - doc_filepath, - urls - ) - - # Create the index from documents for search/retrieval - self.index = self.create_index() - - # Store index - self.store_index( - index=self.index, - index_filepath=self.index_filepath - ) - - logger.info(f'{doc_type} index created and stored successfully!') - # Return the index of the given doc_type (this is an index for a single doc_type). Indices from multiple doc_types should be merged later on in the master index so that query could be made from a single index. - return self.index - - - def store_index( - self, - index, - index_filepath - ): - if not index: - logger.warning(f'Cannot write an empty index to: {index_filepath}!') - return - - logger.info(f'Saving index to: {index_filepath}') - - if not os.path.exists(index_filepath) and os.path.isdir(index_filepath): - os.makedirs(index_filepath) - - if self.index_type == 'FAISS': - index.save_local(index_filepath) - - elif self.index_type == 'Chroma': - index.persist() - - elif self.index_type == 'GPTVectorStoreIndex': - index.save_to_disk(index_filepath) - - elif self.index_type == 'pickle': - with open(index_filepath, "wb") as f: - pickle.dump(index, f) - - logger.info(f'Index saved to: {index_filepath} successfully!') - - - def load_index( - self - ): - logger.info(f'Loading index from: {self.index_filepath}') - - if not os.path.exists(self.index_filepath): - logger.warning(f"Cannot load index from {self.index_filepath} as the path doest not exist!") - return - - if self.index_type == 'FAISS': - self.index = FAISS.load_local(self.index_filepath, self.embeddings) - - elif self.index_type == 'Chroma': - self.index = Chroma( - persist_directory=self.index_filepath, - embedding_function=self.embeddings - ) - - elif self.index_type == 'GPTVectorStoreIndex': - self.index = GPTVectorStoreIndex.load_from_disk(self.index_filepath) - - elif self.index_type == 'pickle': - with open(self.index_filepath, "rb") as f: - self.index = pickle.load(f) - - logger.info(f'Index loaded from: {self.index_filepath} successfully!') - - - def convert_text_to_documents( - self, - text_list=[] - ): - """ - Converts the list of text data to Documents format that can be feed to GPT API to build the Vector store - """ - - from llama_index import Document - documents = [Document(t) for t in text_list] - return documents - - - def merge_documents_from_different_sources( - self, - doc_documents, - url_documents - ): - # Build the Vector store for docs - doc_index = GPTVectorStoreIndex.from_documents(doc_documents) - # Build the Vector store for URLs - url_index = GPTVectorStoreIndex.from_documents(url_documents) - - # Set summary of each index - doc_index.set_text("index_from_docs") - url_index.set_text("index_from_urls") - - # Merge index of different data sources - index = GPTListIndex([doc_index, url_index]) - - return index - - - def merge_store_master_index( - self, - index_category - ): - """ - Merge multiple doc_type indices into a single master index. Query/search would be performed on this merged index. - - Args: - index_category: index_category (can be any of: [crops, fruits, pest_management, govt_policy, soil, etc.]) - """ - logger.info('Merging doc_type indices of different index categories into a master index') - - self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE] = None - doc_type_indices = self.index_category_doc_type_wise_index[index_category] - - if self.index_type == 'FAISS': - for doc_type, index in doc_type_indices.items(): - if doc_type == constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE: - # Only merge the non-master doc_type_indices - continue - if not index or not isinstance(index, FAISS): - logger.warning(f'{doc_type} index to be merged is not an instance of type langchain.vectorstores.faiss.FAISS') - continue - if not self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE]: - self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE] = index - else: - self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE].merge_from(index) - - elif self.index_type == 'Chroma': - for doc_type, index in doc_type_indices.items(): - if not index or not isinstance(index, Chroma): - logger.warning(f'{doc_type} index to be merged is not an instance of type langchain.vectorstores.Chroma') - continue - raise NotImplementedError - - elif self.index_type == 'GPTVectorStoreIndex': - for doc_type, index in doc_type_indices.items(): - if not index or not isinstance(index, GPTVectorStoreIndex): - logger.warning(f'{doc_type} index to be merged is not an instance of type llama_index.GPTVectorStoreIndex') - continue - raise NotImplementedError - - # Store index_category master index - self.store_index( - index=self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE], - index_filepath=self.get_index_filepath( - index_category=index_category, - doc_type=constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE - ) - ) - - logger.info('doc_type indices of different index categories into a master index merged successfully!') - - - def init_chromadb(self): - logger.info('Initializing Chroma DB') - - if not os.path.exists(self.index_filepath): - os.makedirs(self.index_filepath) - - client_settings = chromadb.config.Settings( - chroma_db_impl="duckdb+parquet", - persist_directory=self.index_filepath, - anonymized_telemetry=False - ) - - self.index = Chroma( - collection_name="langchain_store", - embedding_function=self.embeddings, - client_settings=client_settings, - persist_directory=self.index_filepath, - ) - - logger.info('Chroma DB initialized successfully!') - - - def query_chromadb( - self, - question, - k=1 - ): - return self.index.similarity_search(query=question, k=k) - - - def query(self, - question, - question_category, - mode=constants_utils.MODE, - response_mode=constants_utils.RESPONSE_MODE, - similarity_top_k=constants_utils.SIMILARITY_TOP_K, - required_keywords=[], - exclude_keywords=[], - verbose=False - ): - ''' - Args: - mode: can be any of [default, embedding] - response_mode: can be any of [default, compact, tree_summarize] - ''' - logger.info(f'question category: {question_category}; question: {question}') - - response = None - - # Get the index of the given question_category - index = self.index_category_doc_type_wise_index[question_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE] - - if not index: - logger.error(f'Index for {question_category} not found! That means no PDFs, Text files, or URLs have been ingested and indexed so far. Ingest the new data for {question_category} and then querying again.') - return response - - if self.index_type == 'FAISS': - response = index.similarity_search( - question, - k=similarity_top_k - ) - - elif self.index_type == 'Chroma': - response = index.similarity_search( - question, - k=similarity_top_k - ) - - elif self.index_type == 'GPTVectorStoreIndex': - # Querying the index - response = index.query( - question, - mode=mode, - response_mode=response_mode, - similarity_top_k=similarity_top_k, - required_keywords=required_keywords, - exclude_keywords=exclude_keywords, - verbose=verbose - ) - - return response - - - def load_uploaded_documents( - self, - doc_type, - files_or_urls - ): - logger.info(f'Loading uploaded documents from: {doc_type}') - - if doc_type == 'pdf': - if not isinstance(files_or_urls, list): - files_or_urls = [files_or_urls] - for pdf in files_or_urls: - if not pdf.name.endswith('.pdf'): - logger.warning(f'Found a file other than .pdf format. Cannot load {pdf.name} file!') - continue - logger.info(f'Loading PDF from: {pdf.name}') - # Load PDF as documents - self.documents.extend( - self.data_loader_utils_obj.load_documents_from_pdf( - doc_filepath=pdf.name, - doc_type=doc_type - ) - ) - - elif doc_type == 'textfile': - if not isinstance(files_or_urls, list): - files_or_urls = [files_or_urls] - for text_file in files_or_urls: - if not text_file.name.endswith('.txt'): - logger.warning(f'Found a file other than .txt format. Cannot load {text_file.name} file!') - continue - logger.info(f'Loading textfile from: {text_file.name}') - # Load textfile as documents - self.documents.extend( - self.data_loader_utils_obj.load_documents_from_text( - doc_filepath=text_file.name, - doc_type=doc_type - ) - ) - - elif doc_type == 'online_pdf': - files_or_urls = self.utils_obj.split_text(files_or_urls) - # Load online_pdfs as documents - self.documents.extend( - self.data_loader_utils_obj.load_documents_from_pdf( - doc_type=doc_type, - urls=files_or_urls - ) - ) - - elif doc_type == 'urls': - files_or_urls = self.utils_obj.split_text(files_or_urls) - # Load URLs as documents - self.documents.extend( - self.data_loader_utils_obj.load_documents_from_urls( - doc_type=doc_type, - urls=files_or_urls - ) - ) - - logger.info(f'Uploaded documents from: {doc_type} loaded successfully!') - - - def upload_data( - self, - doc_type, - files_or_urls, - index_category - ): - logger.info(f'Uploading data for: {index_category}; from: {doc_type}') - - self.documents = [] - self.index = None - - # Create documents of the uploaded files - self.load_uploaded_documents( - doc_type, - files_or_urls - ) - - # Create the index from documents for search/retrieval - self.index = self.create_index() - - # Update the existing index with the newly data - self.upsert_index( - doc_type=doc_type, - index_category=index_category - ) - - logger.info(f'{index_category}-{doc_type} data uploaded successfully!') - - - def upsert_index( - self, - doc_type, - index_category - ): - """ - Updates the index of the given index_category-doc_type, if present. - Creates a new index if index_category-doc_type index is not present. - Also updates the master index for the given index_category. - """ - if not self.index: - return - - logger.info(f'Upserting index for: {index_category}-{doc_type}') - - if not self.index_category_doc_type_wise_index.get(index_category, None): - """ - If index_category index does not exists - Steps: - - set index_category index - - set doc_type index - - Store new index_category index as master - - Store new doc_type index - """ - logger.info(f'Master index does not exist for: {index_category}. A new {index_category} master index & {doc_type} index would be created.') - self.index_category_doc_type_wise_index.setdefault(index_category, {}) - # Set a master index only if it doesn't exist. Else keep it's value as-it-is. - self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE] = self.index - # Set an index for the given doc_type only if it doesn't exist. Else keep it's value as-it-is. - self.index_category_doc_type_wise_index[index_category][doc_type] = self.index - - elif not self.index_category_doc_type_wise_index[index_category].get(doc_type, None): - """ - If doc_type index does not exists - Steps: - - set doc_type index - - if master index does not exist for the index_category - set a master index - - if master index exists - update the master index to merge it with doc_type index - - Store new/updated index_category index as master - - Store new doc_type index - """ - logger.info(f'{doc_type} index does not exist for: {index_category}-{doc_type}. A new {doc_type} index would be created.') - # create doc_type index - self.index_category_doc_type_wise_index[index_category][doc_type] = self.index - # if master index does not exist for the index_category - create a master index - if not self.index_category_doc_type_wise_index[index_category].get(constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE, None): - logger.info(f'Master index does not exist for: {index_category}-{doc_type}. A new master index would be created.') - self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE] = self.index - - else: - """ - If the new document is of the existing index_category & doc_type - Steps: - - if master index does not exist for the index_category - set a master index - - if master index exists - update the master index to merge it with doc_type index - - update the doc_type index - - Store updated index_category index as master - - Store updated doc_type index - """ - # if master index does not exist for the index_category - create a master index - if not self.index_category_doc_type_wise_index[index_category].get(constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE, None): - logger.info(f'Master index does not exist for: {index_category}-{doc_type}. A new master index would be created.') - self.index_category_doc_type_wise_index[index_category][constants_utils.INDEX_CATEGORY_MASTER_INDEX_DOC_TYPE] = self.index - # Merge new self.index with existing doc_type index - self.index_category_doc_type_wise_index[index_category][doc_type].merge_from(self.index) - # Update self.index to store/overwrite the existing index with the updated index - self.index = self.index_category_doc_type_wise_index[index_category][doc_type] - - - # Store newly created/merged index - self.store_index( - index=self.index, - index_filepath=self.get_index_filepath( - index_category=index_category, - doc_type=doc_type - ) - ) - - # Merge and store master index for index_category - self.merge_store_master_index( - index_category=index_category - ) - - logger.info(f'Index for: {index_category}-{doc_type} upserted successful!') - - - def delete_index( - self, - ids: Optional[List[str]] = None, - # filter: Optional[DocumentMetadataFilter] = None, - delete_all: Optional[bool] = None, - ): - """ - Removes vectors by ids, filter, or everything in the datastore. - Multiple parameters can be used at once. - Returns whether the operation was successful. - """ - logger.info(f'Deleting index') - - raise NotImplementedError - - # NOTE: we can delete a specific collection - self.index.delete_collection() - self.index.persist() - - # Or just nuke the persist directory - # !rm -rf self.index_filepath - - - def get_index_category_wise_data_sources( - self - ): - # self.index_category_doc_type_wise_data_sources - for index_category, doc_type in self.index_category_doc_type_wise_index.items(): - self.index_category_doc_type_wise_data_sources.setdefault(index_category, {}) - for dt in doc_type.keys(): - if dt == 'master': - continue - self.index_category_doc_type_wise_data_sources[index_category].setdefault(dt, set()) - if doc_type[dt]: - docs = doc_type[dt].docstore._dict - for doc, val in docs.items(): - if 'source' in val.metadata and val.metadata['source']: - self.index_category_doc_type_wise_data_sources[index_category][dt].add(val.metadata['source']) - - return self.index_category_doc_type_wise_data_sources - - - def save_answer_feeback( - self, - question_category, - question, - answer, - feedback - ): - logger.info(f'Question category: {question_category}') - logger.info(f'Question: {question}') - logger.info(f'Answer: {answer}') - logger.info(f'Answer feedback is: {feedback}') - - feedback_filepath = os.path.join( - constants_utils.OUTPUT_PATH_ANSWER_FEEDBACK, - f'{constants_utils.OUTPUT_PATH_ANSWER_FEEDBACK_FILE_PREFIX}_{question_category}.tsv' - ) - - if os.path.exists(feedback_filepath): - df = pd.read_csv(feedback_filepath, sep=constants_utils.OUTPUT_PATH_ANSWER_FEEDBACK_FILE_SAVE_SEPARATOR) - else: - df = pd.DataFrame(columns=['question_category', 'question', 'answer', 'feedback', 'timestamp']) - - # Append answer feedback to df - df.loc[len(df)] = { - 'question_category': question_category, - 'question': question, - 'answer': answer, - 'feedback': feedback, - 'timestamp': datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S.%f')[:-3] - } - - # Save df into TSV format - df.to_csv(feedback_filepath, sep=constants_utils.OUTPUT_PATH_ANSWER_FEEDBACK_FILE_SAVE_SEPARATOR, index=False, header=True) - - - def get_sources_of_relevant_paragraphs( - self, - relevant_paragraphs - ): - sources_relevant_paragraphs = [] - # Extract information on Source of relevant_paragraphs - for indx, doc in enumerate(relevant_paragraphs): - if 'source' in doc.metadata and 'page' in doc.metadata and doc.metadata['source'].endswith('.pdf'): - # Need to add +1 as PyPDFLoader sets page number from 0th-index - relevant_paragraphs[indx].metadata['page'] += 1 - sources_relevant_paragraphs = [doc.metadata for doc in relevant_paragraphs] - - return sources_relevant_paragraphs - - - def clean_relevant_paragraphs( - self, - relevant_paragraphs - ): - cleaned_relevant_paragraphs = [] - for doc in relevant_paragraphs: - cleaned_relevant_paragraphs.append(self.utils_obj.replace_newlines_and_spaces(doc.page_content)) - - return cleaned_relevant_paragraphs diff --git a/spaces/wanglettes/zw_chatgpt_01/app.py b/spaces/wanglettes/zw_chatgpt_01/app.py deleted file mode 100644 index 14a55567e7b08fa35f81c63b3049b8949eff38a7..0000000000000000000000000000000000000000 --- a/spaces/wanglettes/zw_chatgpt_01/app.py +++ /dev/null @@ -1,80 +0,0 @@ -from typing import List, Tuple, Dict, Generator -from langchain.llms import OpenAI -import gradio as gr -model_name = "gpt-3.5-turbo" -LLM = OpenAI(model_name=model_name, temperature=0.1) -def create_history_messages(history: List[Tuple[str, str]]) -> List[dict]: - history_messages = [{"role": "user", "content": m[0]} for m in history] - history_messages.extend([{"role": "assistant", "content": m[1]} for m in history]) - return history_messages - -def create_formatted_history(history_messages: List[dict]) -> List[Tuple[str, str]]: - formatted_history = [] - user_messages = [] - assistant_messages = [] - - for message in history_messages: - if message["role"] == "user": - user_messages.append(message["content"]) - elif message["role"] == "assistant": - assistant_messages.append(message["content"]) - - if user_messages and assistant_messages: - formatted_history.append( - ("".join(user_messages), "".join(assistant_messages)) - ) - user_messages = [] - assistant_messages = [] - - # append any remaining messages - if user_messages: - formatted_history.append(("".join(user_messages), None)) - elif assistant_messages: - formatted_history.append((None, "".join(assistant_messages))) - - return formatted_history - -def chat( - message: str, state: List[Dict[str, str]], client = LLM.client -) -> Generator[Tuple[List[Tuple[str, str]], List[Dict[str, str]]], None, None]: - history_messages = state - if history_messages == None: - history_messages = [] - history_messages.append({"role": "system", "content":"用中文回答问题"}) - - history_messages.append({"role": "user", "content": message}) - # We have no content for the assistant's response yet but we will update this: - history_messages.append({"role": "assistant", "content": ""}) - - response_message = "" - - chat_generator = client.create( - messages=history_messages, stream=True, model=model_name - ) - - for chunk in chat_generator: - if "choices" in chunk: - for choice in chunk["choices"]: - if "delta" in choice and "content" in choice["delta"]: - new_token = choice["delta"]["content"] - # Add the latest token: - response_message += new_token - # Update the assistant's response in our model: - history_messages[-1]["content"] = response_message - - if "finish_reason" in choice and choice["finish_reason"] == "stop": - break - formatted_history = create_formatted_history(history_messages) - yield formatted_history, history_messages -chatbot = gr.Chatbot(label="Chat").style(color_map=("yellow", "purple")) -iface = gr.Interface( - fn=chat, - inputs=[ - gr.Textbox(placeholder="在这里数据您的问题", label="Message"), - "state", - ], - outputs=[chatbot, "state"], - allow_flagging="never", -) - -iface.queue().launch() \ No newline at end of file diff --git a/spaces/wangrongsheng/ChatImprovement/crazy_functions/test_project/python/dqn/policies.py b/spaces/wangrongsheng/ChatImprovement/crazy_functions/test_project/python/dqn/policies.py deleted file mode 100644 index 4ecf39a5fc04b24ad1b809232b186728366987b6..0000000000000000000000000000000000000000 --- a/spaces/wangrongsheng/ChatImprovement/crazy_functions/test_project/python/dqn/policies.py +++ /dev/null @@ -1,237 +0,0 @@ -from typing import Any, Dict, List, Optional, Type - -import gym -import torch as th -from torch import nn - -from stable_baselines3.common.policies import BasePolicy, register_policy -from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp -from stable_baselines3.common.type_aliases import Schedule - - -class QNetwork(BasePolicy): - """ - Action-Value (Q-Value) network for DQN - - :param observation_space: Observation space - :param action_space: Action space - :param net_arch: The specification of the policy and value networks. - :param activation_fn: Activation function - :param normalize_images: Whether to normalize images or not, - dividing by 255.0 (True by default) - """ - - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - features_extractor: nn.Module, - features_dim: int, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - normalize_images: bool = True, - ): - super(QNetwork, self).__init__( - observation_space, - action_space, - features_extractor=features_extractor, - normalize_images=normalize_images, - ) - - if net_arch is None: - net_arch = [64, 64] - - self.net_arch = net_arch - self.activation_fn = activation_fn - self.features_extractor = features_extractor - self.features_dim = features_dim - self.normalize_images = normalize_images - action_dim = self.action_space.n # number of actions - q_net = create_mlp(self.features_dim, action_dim, self.net_arch, self.activation_fn) - self.q_net = nn.Sequential(*q_net) - - def forward(self, obs: th.Tensor) -> th.Tensor: - """ - Predict the q-values. - - :param obs: Observation - :return: The estimated Q-Value for each action. - """ - return self.q_net(self.extract_features(obs)) - - def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor: - q_values = self.forward(observation) - # Greedy action - action = q_values.argmax(dim=1).reshape(-1) - return action - - def _get_constructor_parameters(self) -> Dict[str, Any]: - data = super()._get_constructor_parameters() - - data.update( - dict( - net_arch=self.net_arch, - features_dim=self.features_dim, - activation_fn=self.activation_fn, - features_extractor=self.features_extractor, - ) - ) - return data - - -class DQNPolicy(BasePolicy): - """ - Policy class with Q-Value Net and target net for DQN - - :param observation_space: Observation space - :param action_space: Action space - :param lr_schedule: Learning rate schedule (could be constant) - :param net_arch: The specification of the policy and value networks. - :param activation_fn: Activation function - :param features_extractor_class: Features extractor to use. - :param features_extractor_kwargs: Keyword arguments - to pass to the features extractor. - :param normalize_images: Whether to normalize images or not, - dividing by 255.0 (True by default) - :param optimizer_class: The optimizer to use, - ``th.optim.Adam`` by default - :param optimizer_kwargs: Additional keyword arguments, - excluding the learning rate, to pass to the optimizer - """ - - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - lr_schedule: Schedule, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, - features_extractor_kwargs: Optional[Dict[str, Any]] = None, - normalize_images: bool = True, - optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, - optimizer_kwargs: Optional[Dict[str, Any]] = None, - ): - super(DQNPolicy, self).__init__( - observation_space, - action_space, - features_extractor_class, - features_extractor_kwargs, - optimizer_class=optimizer_class, - optimizer_kwargs=optimizer_kwargs, - ) - - if net_arch is None: - if features_extractor_class == FlattenExtractor: - net_arch = [64, 64] - else: - net_arch = [] - - self.net_arch = net_arch - self.activation_fn = activation_fn - self.normalize_images = normalize_images - - self.net_args = { - "observation_space": self.observation_space, - "action_space": self.action_space, - "net_arch": self.net_arch, - "activation_fn": self.activation_fn, - "normalize_images": normalize_images, - } - - self.q_net, self.q_net_target = None, None - self._build(lr_schedule) - - def _build(self, lr_schedule: Schedule) -> None: - """ - Create the network and the optimizer. - - :param lr_schedule: Learning rate schedule - lr_schedule(1) is the initial learning rate - """ - - self.q_net = self.make_q_net() - self.q_net_target = self.make_q_net() - self.q_net_target.load_state_dict(self.q_net.state_dict()) - - # Setup optimizer with initial learning rate - self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs) - - def make_q_net(self) -> QNetwork: - # Make sure we always have separate networks for features extractors etc - net_args = self._update_features_extractor(self.net_args, features_extractor=None) - return QNetwork(**net_args).to(self.device) - - def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor: - return self._predict(obs, deterministic=deterministic) - - def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor: - return self.q_net._predict(obs, deterministic=deterministic) - - def _get_constructor_parameters(self) -> Dict[str, Any]: - data = super()._get_constructor_parameters() - - data.update( - dict( - net_arch=self.net_args["net_arch"], - activation_fn=self.net_args["activation_fn"], - lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone - optimizer_class=self.optimizer_class, - optimizer_kwargs=self.optimizer_kwargs, - features_extractor_class=self.features_extractor_class, - features_extractor_kwargs=self.features_extractor_kwargs, - ) - ) - return data - - -MlpPolicy = DQNPolicy - - -class CnnPolicy(DQNPolicy): - """ - Policy class for DQN when using images as input. - - :param observation_space: Observation space - :param action_space: Action space - :param lr_schedule: Learning rate schedule (could be constant) - :param net_arch: The specification of the policy and value networks. - :param activation_fn: Activation function - :param features_extractor_class: Features extractor to use. - :param normalize_images: Whether to normalize images or not, - dividing by 255.0 (True by default) - :param optimizer_class: The optimizer to use, - ``th.optim.Adam`` by default - :param optimizer_kwargs: Additional keyword arguments, - excluding the learning rate, to pass to the optimizer - """ - - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - lr_schedule: Schedule, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN, - features_extractor_kwargs: Optional[Dict[str, Any]] = None, - normalize_images: bool = True, - optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, - optimizer_kwargs: Optional[Dict[str, Any]] = None, - ): - super(CnnPolicy, self).__init__( - observation_space, - action_space, - lr_schedule, - net_arch, - activation_fn, - features_extractor_class, - features_extractor_kwargs, - normalize_images, - optimizer_class, - optimizer_kwargs, - ) - - -register_policy("MlpPolicy", MlpPolicy) -register_policy("CnnPolicy", CnnPolicy) diff --git a/spaces/whitphx/gradio-static-test/dist/assets/index-f0702dd5.js b/spaces/whitphx/gradio-static-test/dist/assets/index-f0702dd5.js deleted file mode 100644 index 5ca88f8f10e1b37968d920ab4de688bf9503e809..0000000000000000000000000000000000000000 --- a/spaces/whitphx/gradio-static-test/dist/assets/index-f0702dd5.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as ne,i as le,s as $,C as Q,D as d,h as y,F as P,G as H,r as v,ag as zt,H as F,M as J,u as de,b as L,I as j,ac as Ve,al as At,q as p,n as x,t as A,p as ee,O as Tt,v as It,U as ge,a6 as Bt,ad as xe,ae as et,E as St,N as R,K as q,an as Et,a1 as yt,z as ue,e as T,m as B,o as S,af as je,ao as Rt,f as _e,a as V,l as Z,W as Dt,Y as Lt,Z as Ut,$ as jt,y as qt,a0 as Ht,j as Ft,k as Nt}from"../lite.js";import{B as Wt}from"./Button-0391b19a.js";import{B as vt}from"./BlockLabel-a3ec523d.js";/* empty css */import{I as qe}from"./Image-05614c6d.js";import{C as Ot,i as Yt,U as Xt,W as Pt}from"./StaticImage.svelte_svelte_type_style_lang-e0be6656.js";import{I as ke,C as Jt,M as He}from"./ModifyUpload-ee7ccefb.js";import{U as Gt}from"./Upload-a154f660.js";import{E as Qt}from"./Empty-91947ea3.js";import{D as Vt}from"./Download-35908774.js";import"./Blocks-99723874.js";import{U as Zt}from"./UploadText-ca9fa5cb.js";import{E as _l}from"./Image-645ff0ce.js";import"./ModifyUpload.svelte_svelte_type_style_lang-ba6baa96.js";function Kt(t){let e,n,l;return{c(){e=Q("svg"),n=Q("path"),l=Q("path"),d(n,"d","M28.828 3.172a4.094 4.094 0 0 0-5.656 0L4.05 22.292A6.954 6.954 0 0 0 2 27.242V30h2.756a6.952 6.952 0 0 0 4.95-2.05L28.828 8.829a3.999 3.999 0 0 0 0-5.657zM10.91 18.26l2.829 2.829l-2.122 2.121l-2.828-2.828zm-2.619 8.276A4.966 4.966 0 0 1 4.756 28H4v-.759a4.967 4.967 0 0 1 1.464-3.535l1.91-1.91l2.829 2.828zM27.415 7.414l-12.261 12.26l-2.829-2.828l12.262-12.26a2.047 2.047 0 0 1 2.828 0a2 2 0 0 1 0 2.828z"),d(n,"fill","currentColor"),d(l,"d","M6.5 15a3.5 3.5 0 0 1-2.475-5.974l3.5-3.5a1.502 1.502 0 0 0 0-2.121a1.537 1.537 0 0 0-2.121 0L3.415 5.394L2 3.98l1.99-1.988a3.585 3.585 0 0 1 4.95 0a3.504 3.504 0 0 1 0 4.949L5.439 10.44a1.502 1.502 0 0 0 0 2.121a1.537 1.537 0 0 0 2.122 0l4.024-4.024L13 9.95l-4.025 4.024A3.475 3.475 0 0 1 6.5 15z"),d(l,"fill","currentColor"),d(e,"width","100%"),d(e,"height","100%"),d(e,"viewBox","0 0 32 32")},m(a,r){y(a,e,r),P(e,n),P(e,l)},p:H,i:H,o:H,d(a){a&&v(e)}}}class $t extends ne{constructor(e){super(),le(this,e,null,Kt,$,{})}}function xt(t){let e,n,l,a,r,i,u;return{c(){e=Q("svg"),n=Q("circle"),l=Q("circle"),a=Q("circle"),r=Q("circle"),i=Q("circle"),u=Q("path"),d(n,"cx","10"),d(n,"cy","12"),d(n,"r","2"),d(n,"fill","currentColor"),d(l,"cx","16"),d(l,"cy","9"),d(l,"r","2"),d(l,"fill","currentColor"),d(a,"cx","22"),d(a,"cy","12"),d(a,"r","2"),d(a,"fill","currentColor"),d(r,"cx","23"),d(r,"cy","18"),d(r,"r","2"),d(r,"fill","currentColor"),d(i,"cx","19"),d(i,"cy","23"),d(i,"r","2"),d(i,"fill","currentColor"),d(u,"fill","currentColor"),d(u,"d","M16.54 2A14 14 0 0 0 2 16a4.82 4.82 0 0 0 6.09 4.65l1.12-.31a3 3 0 0 1 3.79 2.9V27a3 3 0 0 0 3 3a14 14 0 0 0 14-14.54A14.05 14.05 0 0 0 16.54 2Zm8.11 22.31A11.93 11.93 0 0 1 16 28a1 1 0 0 1-1-1v-3.76a5 5 0 0 0-5-5a5.07 5.07 0 0 0-1.33.18l-1.12.31A2.82 2.82 0 0 1 4 16A12 12 0 0 1 16.47 4A12.18 12.18 0 0 1 28 15.53a11.89 11.89 0 0 1-3.35 8.79Z"),d(e,"width","100%"),d(e,"height","100%"),d(e,"viewBox","0 0 32 32")},m(s,f){y(s,e,f),P(e,n),P(e,l),P(e,a),P(e,r),P(e,i),P(e,u)},p:H,i:H,o:H,d(s){s&&v(e)}}}class en extends ne{constructor(e){super(),le(this,e,null,xt,$,{})}}function tn(t){let e,n;return{c(){e=Q("svg"),n=Q("path"),d(n,"fill","currentColor"),d(n,"d","M7 27h23v2H7zm20.38-16.49l-7.93-7.92a2 2 0 0 0-2.83 0l-14 14a2 2 0 0 0 0 2.83L7.13 24h9.59l10.66-10.66a2 2 0 0 0 0-2.83zM15.89 22H8l-4-4l6.31-6.31l7.93 7.92zm3.76-3.76l-7.92-7.93L18 4l8 7.93z"),d(e,"xmlns","http://www.w3.org/2000/svg"),d(e,"width","100%"),d(e,"height","100%"),d(e,"viewBox","0 0 32 32")},m(l,a){y(l,e,a),P(e,n)},p:H,i:H,o:H,d(l){l&&v(e)}}}class nn extends ne{constructor(e){super(),le(this,e,null,tn,$,{})}}function ln(t){let e,n;return{c(){e=Q("svg"),n=Q("path"),d(n,"d","M17 3a2.828 2.828 0 1 1 4 4L7.5 20.5 2 22l1.5-5.5L17 3z"),d(e,"xmlns","http://www.w3.org/2000/svg"),d(e,"width","100%"),d(e,"height","100%"),d(e,"viewBox","0 0 24 24"),d(e,"fill","none"),d(e,"stroke","currentColor"),d(e,"stroke-width","1.5"),d(e,"stroke-linecap","round"),d(e,"stroke-linejoin","round"),d(e,"class","feather feather-edit-2")},m(l,a){y(l,e,a),P(e,n)},p:H,i:H,o:H,d(l){l&&v(e)}}}let tt=class extends ne{constructor(e){super(),le(this,e,null,ln,$,{})}};const Ct=t=>{let e=t.currentTarget;const n=e.getBoundingClientRect(),l=e.naturalWidth/n.width,a=e.naturalHeight/n.height;if(l>a){n.width;const u=e.naturalHeight/l,s=(n.height-u)/2;var r=Math.round((t.clientX-n.left)*l),i=Math.round((t.clientY-n.top-s)*l)}else{const u=e.naturalWidth/a;n.height;const s=(n.width-u)/2;var r=Math.round((t.clientX-n.left-s)*a),i=Math.round((t.clientY-n.top)*a)}return r<0||r>=e.naturalWidth||i<0||i>=e.naturalHeight?null:[r,i]};function sn(t){let e,n;return{c(){e=F("img"),J(e.src,n=t[0])||d(e,"src",n),d(e,"alt","")},m(l,a){y(l,e,a),t[4](e)},p(l,[a]){a&1&&!J(e.src,n=l[0])&&d(e,"src",n)},i:H,o:H,d(l){l&&v(e),t[4](null)}}}function rn(t,e,n){let{image:l}=e,a;const r=de();let i;function u(){i.destroy()}function s(){i&&u(),i=new Ot(a,{autoCropArea:1,cropend(){const o=i.getCroppedCanvas().toDataURL();r("crop",o)}}),r("crop",l)}function f(o){L[o?"unshift":"push"](()=>{a=o,n(1,a)})}return t.$$set=o=>{"image"in o&&n(0,l=o.image)},[l,a,u,s,f]}class Mt extends ne{constructor(e){super(),le(this,e,rn,sn,$,{image:0,destroy:2,create:3})}get image(){return this.$$.ctx[0]}set image(e){this.$$set({image:e}),zt()}get destroy(){return this.$$.ctx[2]}get create(){return this.$$.ctx[3]}}class nt{constructor(e,n){this.x=e,this.y=n}}class lt extends nt{update(e){this.x=e.x,this.y=e.y}moveByAngle(e,n){const l=e+Math.PI/2;this.x=this.x+Math.sin(l)*n,this.y=this.y-Math.cos(l)*n}equalsTo(e){return this.x===e.x&&this.y===e.y}getDifferenceTo(e){return new nt(this.x-e.x,this.y-e.y)}getDistanceTo(e){const n=this.getDifferenceTo(e);return Math.sqrt(Math.pow(n.x,2)+Math.pow(n.y,2))}getAngleTo(e){const n=this.getDifferenceTo(e);return Math.atan2(n.y,n.x)}toObject(){return{x:this.x,y:this.y}}}const an=30;class un{constructor({radius:e=an,enabled:n=!0,initialPoint:l={x:0,y:0}}={}){this.radius=e,this._isEnabled=n,this.pointer=new lt(l.x,l.y),this.brush=new lt(l.x,l.y),this.angle=0,this.distance=0,this._hasMoved=!1}enable(){this._isEnabled=!0}disable(){this._isEnabled=!1}isEnabled(){return this._isEnabled}setRadius(e){this.radius=e}getRadius(){return this.radius}getBrushCoordinates(){return this.brush.toObject()}getPointerCoordinates(){return this.pointer.toObject()}getBrush(){return this.brush}getPointer(){return this.pointer}getAngle(){return this.angle}getDistance(){return this.distance}brushHasMoved(){return this._hasMoved}update(e,{both:n=!1}={}){return this._hasMoved=!1,this.pointer.equalsTo(e)&&!n?!1:(this.pointer.update(e),n?(this._hasMoved=!0,this.brush.update(e),!0):(this._isEnabled?(this.distance=this.pointer.getDistanceTo(this.brush),this.angle=this.pointer.getAngleTo(this.brush),this.distance>this.radius&&(this.brush.moveByAngle(this.angle,this.distance-this.radius),this._hasMoved=!0)):(this.distance=0,this.angle=0,this.brush.update(e),this._hasMoved=!0),!0))}}function st(t,e,n){const l=t.slice();return l[61]=e[n].name,l[62]=e[n].zIndex,l[63]=e,l[64]=n,l}function it(t){let e,n,l;return{c(){e=F("div"),e.textContent="Start drawing",d(e,"class","start-prompt svelte-yigbas")},m(a,r){y(a,e,r),l=!0},i(a){l||(Ve(()=>{l&&(n||(n=xe(e,et,{duration:50},!0)),n.run(1))}),l=!0)},o(a){n||(n=xe(e,et,{duration:50},!1)),n.run(0),l=!1},d(a){a&&v(e),a&&n&&n.end()}}}function rt(t){let e,n=t[61],l,a;const r=()=>t[30](e,n),i=()=>t[30](null,n);return{c(){e=F("canvas"),d(e,"key",t[61]),St(e,"z-index",t[62]),d(e,"class","svelte-yigbas"),R(e,"lr",t[5]),R(e,"tb",!t[5])},m(u,s){y(u,e,s),r(),l||(a=[q(e,"mousedown",t[61]==="interface"?t[7]:void 0),q(e,"mousemove",t[61]==="interface"?t[8]:void 0),q(e,"mouseup",t[61]==="interface"?t[9]:void 0),q(e,"mouseout",t[61]==="interface"?t[9]:void 0),q(e,"blur",t[61]==="interface"?t[9]:void 0),q(e,"touchstart",t[61]==="interface"?t[7]:void 0),q(e,"touchmove",t[61]==="interface"?t[8]:void 0),q(e,"touchend",t[61]==="interface"?t[9]:void 0),q(e,"touchcancel",t[61]==="interface"?t[9]:void 0),q(e,"click",Et(t[29]))],l=!0)},p(u,s){t=u,n!==t[61]&&(i(),n=t[61],r()),s[0]&32&&R(e,"lr",t[5]),s[0]&32&&R(e,"tb",!t[5])},d(u){u&&v(e),i(),l=!1,yt(a)}}}function on(t){let e,n,l,a,r=t[4]===0&&it(),i=t[6],u=[];for(let s=0;st[32].call(e))},m(s,f){y(s,e,f),r&&r.m(e,null),P(e,n);for(let o=0;o{r=null}),ee()),f[0]&993){i=s[6];let o;for(o=0;oh?(m=b[0],C=b[0]/h,G=(b[1]-C)/2):(z=0,G=0,m=b[0],C=b[1]),k.temp.drawImage(i,z,G,m,C)}It(async()=>{Object.keys(E).forEach(m=>{n(26,k[m]=E[m].getContext("2d"),k)}),await ge(),i&&(i.addEventListener("load",m=>{o==="webcam"?(k.temp.save(),k.temp.translate(g,0),k.temp.scale(-1,1),k.temp.drawImage(i,0,0),k.temp.restore()):w(),k.drawing.drawImage(E.temp,0,0,g,_),ae()}),setTimeout(()=>{o==="webcam"?(k.temp.save(),k.temp.translate(g,0),k.temp.scale(-1,1),k.temp.drawImage(i,0,0),k.temp.restore()):w(),k.drawing.drawImage(E.temp,0,0,g,_),pe({lines:Y.slice()}),ae()},100)),n(28,X=new un({radius:f*.05,enabled:!0,initialPoint:{x:g/2,y:_/2}})),O=new Yt((m,C,...M)=>{ze()}),O.observe(te),we(),n(24,I=!0),requestAnimationFrame(()=>{be(),requestAnimationFrame(()=>{me()})})});function be(){const m=g/2,C=_/2;X.update({x:m,y:C},{both:!0}),X.update({x:m,y:C},{both:!1}),se=!0,oe=!0}Bt(()=>{n(24,I=!1),O.unobserve(te)});function re(m){Le(),i&&(o==="webcam"?(k.temp.save(),k.temp.translate(g,0),k.temp.scale(-1,1),k.temp.drawImage(i,0,0),k.temp.restore()):w(),(!Y||!Y.length)&&k.drawing.drawImage(E.temp,0,0,g,_)),pe({lines:m}),n(4,K=m.length),Y.length&&n(27,Y=m),Y.length==0&&a("clear")}function Fe(){re([]),ae()}function Ne(){const m=Y.slice(0,-1);re(m),ae()}let pe=({lines:m})=>{m.forEach(C=>{const{points:M,brush_color:h,brush_radius:z}=C;Se({points:M,brush_color:h,brush_radius:z}),u==="mask"&&Ee({points:M,brush_color:h,brush_radius:z}),W=M}),De(),u==="mask"&&Re()},We=m=>{m.preventDefault(),ie=!0;const{x:C,y:M}=Te(m);m.touches&&m.touches.length>0&&X.update({x:C,y:M},{both:!0}),Be(C,M),n(4,K+=1)},Ie=m=>{m.preventDefault();const{x:C,y:M}=Te(m);Be(C,M)},Oe=m=>{m.preventDefault(),Ie(m),fe=!1,ie=!1,De(),u==="mask"&&Re()},ye=0,ve=0,Ce=0,Me=!1,ze=async()=>{if(b&&te){const M=te?.getBoundingClientRect(),h=b[0]/b[1],z=M.width/M.height;n(5,Me=h{ve=_,ye=g,Ce=c},10),await ge(),me()},he=async(m,C,M,h=!0)=>{if(!I)return;await ge();const z=window.devicePixelRatio||1;m.width=C.width*(h?z:1),m.height=C.height*(h?z:1);const G=m.getContext("2d");h&&G.scale(z,z),m.style.width=`${M.width}px`,m.style.height=`${M.height}px`},Te=m=>{const C=E.interface.getBoundingClientRect();let M=m.clientX,h=m.clientY;return m.changedTouches&&m.changedTouches.length>0&&(M=m.changedTouches[0].clientX,h=m.changedTouches[0].clientY),{x:(M-C.left)/C.width*g,y:(h-C.top)/C.height*_}},Be=(m,C)=>{X.update({x:m,y:C});const M=!X.isEnabled();(ie&&!fe||M&&ie)&&(fe=!0,W.push(X.brush.toObject())),fe&&(W.push(X.brush.toObject()),Se({points:W,brush_color:s,brush_radius:f}),u==="mask"&&Ee({points:W,brush_color:s,brush_radius:f})),se=!0},Se=({points:m,brush_color:C,brush_radius:M})=>{if(!m||m.length<2||(n(26,k.temp.lineJoin="round",k),n(26,k.temp.lineCap="round",k),n(26,k.temp.strokeStyle=C,k),n(26,k.temp.lineWidth=M,k),!m||m.length<2))return;let h=m[0],z=m[1];k.temp.moveTo(z.x,z.y),k.temp.beginPath();for(var G=1,Qe=m.length;G{if(!m||m.length<2)return;n(26,k.temp_fake.lineJoin="round",k),n(26,k.temp_fake.lineCap="round",k),n(26,k.temp_fake.strokeStyle="#fff",k),n(26,k.temp_fake.lineWidth=M,k);let h=m[0],z=m[1];k.temp_fake.moveTo(z.x,z.y),k.temp_fake.beginPath();for(var G=1,Qe=m.length;G{W.length<1||(W.length=0,k.mask.drawImage(E.temp_fake,0,0,g,_),ae())},De=()=>{W.length<1||(Y.push({points:W.slice(),brush_color:s,brush_radius:f}),u!=="mask"&&(W.length=0),k.drawing.drawImage(E.temp,0,0,g,_),ae())},ae=()=>{const m=Ue();a("change",m)};function me(){return n(27,Y=[]),Le(),n(4,K=0),!0}function Le(){oe=!0,k.temp.clearRect(0,0,g,_),n(26,k.temp.fillStyle=u==="mask"?"transparent":"#FFFFFF",k),k.temp.fillRect(0,0,g,_),u==="mask"&&(k.temp_fake.clearRect(0,0,E.temp_fake.width,E.temp_fake.height),k.mask.clearRect(0,0,g,_),n(26,k.mask.fillStyle="#000",k),k.mask.fillRect(0,0,g,_))}let we=({once:m=!1}={})=>{if(se||oe){const C=X.getPointerCoordinates(),M=X.getBrushCoordinates();Ye(k.interface,C,M),se=!1,oe=!1}m||window.requestAnimationFrame(()=>{we()})},Ye=(m,C,M)=>{m.clearRect(0,0,g,_),m.beginPath(),m.fillStyle=s,m.arc(M.x,M.y,f/2,0,Math.PI*2,!0),m.fill(),m.beginPath(),m.fillStyle=fn,m.arc(M.x,M.y,l,0,Math.PI*2,!0),m.fill()};function Ue(){return u==="mask"?E.mask.toDataURL("image/jpg"):E.drawing.toDataURL("image/jpg")}function Xe(m){ue.call(this,t,m)}function Pe(m,C){L[m?"unshift":"push"](()=>{E[C]=m,n(0,E)})}function Je(m){L[m?"unshift":"push"](()=>{te=m,n(3,te)})}function Ge(){D=this.offsetWidth,N=this.offsetHeight,n(1,D),n(2,N)}return t.$$set=m=>{"value"in m&&n(13,r=m.value),"value_img"in m&&n(14,i=m.value_img),"mode"in m&&n(15,u=m.mode),"brush_color"in m&&n(16,s=m.brush_color),"brush_radius"in m&&n(10,f=m.brush_radius),"source"in m&&n(17,o=m.source),"width"in m&&n(11,g=m.width),"height"in m&&n(12,_=m.height),"container_height"in m&&n(18,c=m.container_height),"shape"in m&&n(19,b=m.shape)},t.$$.update=()=>{t.$$.dirty[0]&530432&&b&&(g||_)&&(n(11,g=b[0]),n(12,_=b[1])),t.$$.dirty[0]&16785408&&I&&!r&&me(),t.$$.dirty[0]&251811841&&I&&i!==U&&(n(25,U=i),me(),setTimeout(()=>{o==="webcam"?(k.temp.save(),k.temp.translate(g,0),k.temp.scale(-1,1),k.temp.drawImage(i,0,0),k.temp.restore()):w(),k.drawing.drawImage(E.temp,0,0,g,_),pe({lines:Y.slice()}),ae()},50)),t.$$.dirty[0]&268436480&&X&&(be(),X.setRadius(f*.05)),t.$$.dirty[0]&6144&&(g||_)&&ze(),t.$$.dirty[0]&1024&&(l=f*.075)},[E,D,N,te,K,Me,ce,We,Ie,Oe,f,g,_,r,i,u,s,o,c,b,Fe,Ne,me,Ue,I,U,k,Y,X,Xe,Pe,Je,Ge]}class Ze extends ne{constructor(e){super(),le(this,e,_n,on,$,{value:13,value_img:14,mode:15,brush_color:16,brush_radius:10,source:17,width:11,height:12,container_height:18,shape:19,clear_mask:20,undo:21,clear:22,get_image_data:23},null,[-1,-1,-1])}get clear_mask(){return this.$$.ctx[20]}get undo(){return this.$$.ctx[21]}get clear(){return this.$$.ctx[22]}get get_image_data(){return this.$$.ctx[23]}}function ut(t){let e,n;return e=new ke({props:{Icon:nn,label:"Clear"}}),e.$on("click",t[3]),{c(){T(e.$$.fragment)},m(l,a){B(e,l,a),n=!0},p:H,i(l){n||(p(e.$$.fragment,l),n=!0)},o(l){A(e.$$.fragment,l),n=!1},d(l){S(e,l)}}}function cn(t){let e,n,l,a,r,i;n=new ke({props:{Icon:Xt,label:"Undo"}}),n.$on("click",t[2]);let u=t[0]&&ut(t);return r=new ke({props:{Icon:Jt,label:"Remove Image"}}),r.$on("click",t[4]),{c(){e=F("div"),T(n.$$.fragment),l=j(),u&&u.c(),a=j(),T(r.$$.fragment),d(e,"class","svelte-s6ybro")},m(s,f){y(s,e,f),B(n,e,null),P(e,l),u&&u.m(e,null),P(e,a),B(r,e,null),i=!0},p(s,[f]){s[0]?u?(u.p(s,f),f&1&&p(u,1)):(u=ut(s),u.c(),p(u,1),u.m(e,a)):u&&(x(),A(u,1,1,()=>{u=null}),ee())},i(s){i||(p(n.$$.fragment,s),p(u),p(r.$$.fragment,s),i=!0)},o(s){A(n.$$.fragment,s),A(u),A(r.$$.fragment,s),i=!1},d(s){s&&v(e),S(n),u&&u.d(),S(r)}}}function hn(t,e,n){const l=de();let{show_eraser:a=!1}=e;const r=()=>l("undo"),i=s=>{l("clear_mask"),s.stopPropagation()},u=s=>{l("remove_image"),s.stopPropagation()};return t.$$set=s=>{"show_eraser"in s&&n(0,a=s.show_eraser)},[a,l,r,i,u]}class Ke extends ne{constructor(e){super(),le(this,e,hn,cn,$,{show_eraser:0})}}function ot(t){let e,n,l,a,r;return{c(){e=F("input"),d(e,"aria-label","Brush radius"),d(e,"type","range"),d(e,"min",n=.5*(t[2]/t[6])),d(e,"max",l=75*(t[2]/t[6])),d(e,"class","svelte-p4aq0j")},m(i,u){y(i,e,u),je(e,t[0]),a||(r=[q(e,"change",t[10]),q(e,"input",t[10])],a=!0)},p(i,u){u&68&&n!==(n=.5*(i[2]/i[6]))&&d(e,"min",n),u&68&&l!==(l=75*(i[2]/i[6]))&&d(e,"max",l),u&1&&je(e,i[0])},d(i){i&&v(e),a=!1,yt(r)}}}function ft(t){let e,n,l,a;n=new ke({props:{Icon:en,label:"Select brush color"}}),n.$on("click",t[11]);let r=t[5]&&_t(t);return{c(){e=F("span"),T(n.$$.fragment),l=j(),r&&r.c(),d(e,"class","col svelte-p4aq0j")},m(i,u){y(i,e,u),B(n,e,null),P(e,l),r&&r.m(e,null),a=!0},p(i,u){i[5]?r?r.p(i,u):(r=_t(i),r.c(),r.m(e,null)):r&&(r.d(1),r=null)},i(i){a||(p(n.$$.fragment,i),a=!0)},o(i){A(n.$$.fragment,i),a=!1},d(i){i&&v(e),S(n),r&&r.d()}}}function _t(t){let e,n,l;return{c(){e=F("input"),d(e,"aria-label","Brush color"),d(e,"type","color"),d(e,"class","svelte-p4aq0j")},m(a,r){y(a,e,r),je(e,t[1]),n||(l=q(e,"input",t[12]),n=!0)},p(a,r){r&2&&je(e,a[1])},d(a){a&&v(e),n=!1,l()}}}function mn(t){let e,n,l,a,r,i;l=new ke({props:{Icon:$t,label:"Use brush"}}),l.$on("click",t[9]);let u=t[4]&&ot(t),s=t[3]!=="mask"&&ft(t);return{c(){e=F("div"),n=F("span"),T(l.$$.fragment),a=j(),u&&u.c(),r=j(),s&&s.c(),d(n,"class","brush svelte-p4aq0j"),d(e,"class","wrap svelte-p4aq0j")},m(f,o){y(f,e,o),P(e,n),B(l,n,null),P(n,a),u&&u.m(n,null),P(e,r),s&&s.m(e,null),i=!0},p(f,[o]){f[4]?u?u.p(f,o):(u=ot(f),u.c(),u.m(n,null)):u&&(u.d(1),u=null),f[3]!=="mask"?s?(s.p(f,o),o&8&&p(s,1)):(s=ft(f),s.c(),p(s,1),s.m(e,null)):s&&(x(),A(s,1,1,()=>{s=null}),ee())},i(f){i||(p(l.$$.fragment,f),p(s),i=!0)},o(f){A(l.$$.fragment,f),A(s),i=!1},d(f){f&&v(e),S(l),u&&u.d(),s&&s.d()}}}function gn(t,e,n){let l;de();let a=!1,r=!1,{brush_radius:i=20}=e,{brush_color:u="#000"}=e,{container_height:s}=e,{img_width:f}=e,{img_height:o}=e,{mode:g="other"}=e;const _=()=>n(4,a=!a);function c(){i=Rt(this.value),n(0,i)}const b=()=>n(5,r=!r);function I(){u=this.value,n(1,u)}return t.$$set=D=>{"brush_radius"in D&&n(0,i=D.brush_radius),"brush_color"in D&&n(1,u=D.brush_color),"container_height"in D&&n(7,s=D.container_height),"img_width"in D&&n(2,f=D.img_width),"img_height"in D&&n(8,o=D.img_height),"mode"in D&&n(3,g=D.mode)},t.$$.update=()=>{t.$$.dirty&388&&n(6,l=s*(f/o))},[i,u,f,g,a,r,l,s,o,_,c,b,I]}class $e extends ne{constructor(e){super(),le(this,e,gn,mn,$,{brush_radius:0,brush_color:1,container_height:7,img_width:2,img_height:8,mode:3})}}function dn(t){let e,n,l,a;return{c(){e=F("img"),J(e.src,n=t[0].image||t[0])||d(e,"src",n),d(e,"alt",""),d(e,"class","svelte-p3y7hu"),R(e,"webcam",t[5]==="webcam"&&t[9]),R(e,"selectable",t[10])},m(r,i){y(r,e,i),l||(a=q(e,"click",t[29]),l=!0)},p(r,i){i[0]&1&&!J(e.src,n=r[0].image||r[0])&&d(e,"src",n),i[0]&544&&R(e,"webcam",r[5]==="webcam"&&r[9]),i[0]&1024&&R(e,"selectable",r[10])},i:H,o:H,d(r){r&&v(e),l=!1,a()}}}function bn(t){let e=t[21],n,l,a,r=ct(t),i=t[16]>0&&ht(t);return{c(){r.c(),n=j(),i&&i.c(),l=_e()},m(u,s){r.m(u,s),y(u,n,s),i&&i.m(u,s),y(u,l,s),a=!0},p(u,s){s[0]&2097152&&$(e,e=u[21])?(r.d(1),r=ct(u),r.c(),r.m(n.parentNode,n)):r.p(u,s),u[16]>0?i?(i.p(u,s),s[0]&65536&&p(i,1)):(i=ht(u),i.c(),p(i,1),i.m(l.parentNode,l)):i&&(x(),A(i,1,1,()=>{i=null}),ee())},i(u){a||(p(i),a=!0)},o(u){A(i),a=!1},d(u){r.d(u),u&&v(n),i&&i.d(u),u&&v(l)}}}function kn(t){let e,n,l,a,r,i,u;return e=new He({props:{editable:!0}}),e.$on("edit",t[52]),e.$on("clear",t[24]),{c(){T(e.$$.fragment),n=j(),l=F("img"),J(l.src,a=t[0])||d(l,"src",a),d(l,"alt",""),d(l,"class","svelte-p3y7hu"),R(l,"selectable",t[10]),R(l,"webcam",t[5]==="webcam"&&t[9])},m(s,f){B(e,s,f),y(s,n,f),y(s,l,f),r=!0,i||(u=q(l,"click",t[29]),i=!0)},p(s,f){(!r||f[0]&1&&!J(l.src,a=s[0]))&&d(l,"src",a),(!r||f[0]&1024)&&R(l,"selectable",s[10]),(!r||f[0]&544)&&R(l,"webcam",s[5]==="webcam"&&s[9])},i(s){r||(p(e.$$.fragment,s),r=!0)},o(s){A(e.$$.fragment,s),r=!1},d(s){S(e,s),s&&v(n),s&&v(l),i=!1,u()}}}function pn(t){let e,n,l,a,r={image:t[0]};return e=new Mt({props:r}),t[50](e),e.$on("crop",t[25]),l=new He({}),l.$on("clear",t[51]),{c(){T(e.$$.fragment),n=j(),T(l.$$.fragment)},m(i,u){B(e,i,u),y(i,n,u),B(l,i,u),a=!0},p(i,u){const s={};u[0]&1&&(s.image=i[0]),e.$set(s)},i(i){a||(p(e.$$.fragment,i),p(l.$$.fragment,i),a=!0)},o(i){A(e.$$.fragment,i),A(l.$$.fragment,i),a=!1},d(i){t[50](null),S(e,i),i&&v(n),S(l,i)}}}function wn(t){let e,n,l=t[5]==="webcam"&&!t[21]&>(t);return{c(){l&&l.c(),e=_e()},m(a,r){l&&l.m(a,r),y(a,e,r),n=!0},p(a,r){a[5]==="webcam"&&!a[21]?l?(l.p(a,r),r[0]&2097184&&p(l,1)):(l=gt(a),l.c(),p(l,1),l.m(e.parentNode,e)):l&&(x(),A(l,1,1,()=>{l=null}),ee())},i(a){n||(p(l),n=!0)},o(a){A(l),n=!1},d(a){l&&l.d(a),a&&v(e)}}}function An(t){let e,n,l,a,r,i,u;e=new Ke({}),e.$on("undo",t[42]),e.$on("remove_image",t[27]);let s=t[1]==="color-sketch"&&dt(t);function f(_){t[45](_)}function o(_){t[46](_)}let g={value:t[0],mode:t[13],width:t[16]||t[20],height:t[15]||t[19],container_height:t[17]||t[19],shape:t[6]};return t[2]!==void 0&&(g.brush_radius=t[2]),t[22]!==void 0&&(g.brush_color=t[22]),a=new Ze({props:g}),L.push(()=>V(a,"brush_radius",f)),L.push(()=>V(a,"brush_color",o)),t[47](a),a.$on("change",t[25]),a.$on("clear",t[27]),{c(){T(e.$$.fragment),n=j(),s&&s.c(),l=j(),T(a.$$.fragment)},m(_,c){B(e,_,c),y(_,n,c),s&&s.m(_,c),y(_,l,c),B(a,_,c),u=!0},p(_,c){_[1]==="color-sketch"?s?(s.p(_,c),c[0]&2&&p(s,1)):(s=dt(_),s.c(),p(s,1),s.m(l.parentNode,l)):s&&(x(),A(s,1,1,()=>{s=null}),ee());const b={};c[0]&1&&(b.value=_[0]),c[0]&8192&&(b.mode=_[13]),c[0]&1114112&&(b.width=_[16]||_[20]),c[0]&557056&&(b.height=_[15]||_[19]),c[0]&655360&&(b.container_height=_[17]||_[19]),c[0]&64&&(b.shape=_[6]),!r&&c[0]&4&&(r=!0,b.brush_radius=_[2],Z(()=>r=!1)),!i&&c[0]&4194304&&(i=!0,b.brush_color=_[22],Z(()=>i=!1)),a.$set(b)},i(_){u||(p(e.$$.fragment,_),p(s),p(a.$$.fragment,_),u=!0)},o(_){A(e.$$.fragment,_),A(s),A(a.$$.fragment,_),u=!1},d(_){S(e,_),_&&v(n),s&&s.d(_),_&&v(l),t[47](null),S(a,_)}}}function In(t){let e,n,l;function a(i){t[41](i)}let r={filetype:"image/*",include_file_metadata:!1,disable_click:!!t[0],$$slots:{default:[Tn]},$$scope:{ctx:t}};return t[12]!==void 0&&(r.dragging=t[12]),e=new Gt({props:r}),L.push(()=>V(e,"dragging",a)),e.$on("load",t[23]),{c(){T(e.$$.fragment)},m(i,u){B(e,i,u),l=!0},p(i,u){const s={};u[0]&1&&(s.disable_click=!!i[0]),u[0]&8384231|u[1]&1073741824&&(s.$$scope={dirty:u,ctx:i}),!n&&u[0]&4096&&(n=!0,s.dragging=i[12],Z(()=>n=!1)),e.$set(s)},i(i){l||(p(e.$$.fragment,i),l=!0)},o(i){A(e.$$.fragment,i),l=!1},d(i){S(e,i)}}}function ct(t){let e,n,l,a;return{c(){e=F("img"),d(e,"class","absolute-img svelte-p3y7hu"),J(e.src,n=t[21]||t[0]?.image||t[0])||d(e,"src",n),d(e,"alt",""),R(e,"webcam",t[5]==="webcam"&&t[9])},m(r,i){y(r,e,i),t[53](e),l||(a=q(e,"load",t[26]),l=!0)},p(r,i){i[0]&2097153&&!J(e.src,n=r[21]||r[0]?.image||r[0])&&d(e,"src",n),i[0]&544&&R(e,"webcam",r[5]==="webcam"&&r[9])},d(r){r&&v(e),t[53](null),l=!1,a()}}}function ht(t){let e,n,l,a,r,i,u,s;function f(c){t[55](c)}function o(c){t[56](c)}let g={value:t[0],mode:t[13],width:t[16]||t[20],height:t[15]||t[19],container_height:t[17]||t[19],value_img:t[18],source:t[5]};t[2]!==void 0&&(g.brush_radius=t[2]),t[22]!==void 0&&(g.brush_color=t[22]),e=new Ze({props:g}),t[54](e),L.push(()=>V(e,"brush_radius",f)),L.push(()=>V(e,"brush_color",o)),e.$on("change",t[25]),r=new Ke({}),r.$on("undo",t[57]),r.$on("remove_image",t[27]);let _=(t[1]==="color-sketch"||t[1]==="sketch")&&mt(t);return{c(){T(e.$$.fragment),a=j(),T(r.$$.fragment),i=j(),_&&_.c(),u=_e()},m(c,b){B(e,c,b),y(c,a,b),B(r,c,b),y(c,i,b),_&&_.m(c,b),y(c,u,b),s=!0},p(c,b){const I={};b[0]&1&&(I.value=c[0]),b[0]&8192&&(I.mode=c[13]),b[0]&1114112&&(I.width=c[16]||c[20]),b[0]&557056&&(I.height=c[15]||c[19]),b[0]&655360&&(I.container_height=c[17]||c[19]),b[0]&262144&&(I.value_img=c[18]),b[0]&32&&(I.source=c[5]),!n&&b[0]&4&&(n=!0,I.brush_radius=c[2],Z(()=>n=!1)),!l&&b[0]&4194304&&(l=!0,I.brush_color=c[22],Z(()=>l=!1)),e.$set(I),c[1]==="color-sketch"||c[1]==="sketch"?_?(_.p(c,b),b[0]&2&&p(_,1)):(_=mt(c),_.c(),p(_,1),_.m(u.parentNode,u)):_&&(x(),A(_,1,1,()=>{_=null}),ee())},i(c){s||(p(e.$$.fragment,c),p(r.$$.fragment,c),p(_),s=!0)},o(c){A(e.$$.fragment,c),A(r.$$.fragment,c),A(_),s=!1},d(c){t[54](null),S(e,c),c&&v(a),S(r,c),c&&v(i),_&&_.d(c),c&&v(u)}}}function mt(t){let e,n,l,a;function r(s){t[58](s)}function i(s){t[59](s)}let u={container_height:t[17]||t[19],img_width:t[16]||t[20],img_height:t[15]||t[19],mode:t[13]};return t[2]!==void 0&&(u.brush_radius=t[2]),t[22]!==void 0&&(u.brush_color=t[22]),e=new $e({props:u}),L.push(()=>V(e,"brush_radius",r)),L.push(()=>V(e,"brush_color",i)),{c(){T(e.$$.fragment)},m(s,f){B(e,s,f),a=!0},p(s,f){const o={};f[0]&655360&&(o.container_height=s[17]||s[19]),f[0]&1114112&&(o.img_width=s[16]||s[20]),f[0]&557056&&(o.img_height=s[15]||s[19]),f[0]&8192&&(o.mode=s[13]),!n&&f[0]&4&&(n=!0,o.brush_radius=s[2],Z(()=>n=!1)),!l&&f[0]&4194304&&(l=!0,o.brush_color=s[22],Z(()=>l=!1)),e.$set(o)},i(s){a||(p(e.$$.fragment,s),a=!0)},o(s){A(e.$$.fragment,s),a=!1},d(s){S(e,s)}}}function gt(t){let e,n;return e=new Pt({props:{streaming:t[7],pending:t[8],mirror_webcam:t[9]}}),e.$on("capture",t[48]),e.$on("stream",t[25]),e.$on("error",t[49]),{c(){T(e.$$.fragment)},m(l,a){B(e,l,a),n=!0},p(l,a){const r={};a[0]&128&&(r.streaming=l[7]),a[0]&256&&(r.pending=l[8]),a[0]&512&&(r.mirror_webcam=l[9]),e.$set(r)},i(l){n||(p(e.$$.fragment,l),n=!0)},o(l){A(e.$$.fragment,l),n=!1},d(l){S(e,l)}}}function dt(t){let e,n,l,a;function r(s){t[43](s)}function i(s){t[44](s)}let u={container_height:t[17]||t[19],img_width:t[16]||t[20],img_height:t[15]||t[19]};return t[2]!==void 0&&(u.brush_radius=t[2]),t[22]!==void 0&&(u.brush_color=t[22]),e=new $e({props:u}),L.push(()=>V(e,"brush_radius",r)),L.push(()=>V(e,"brush_color",i)),{c(){T(e.$$.fragment)},m(s,f){B(e,s,f),a=!0},p(s,f){const o={};f[0]&655360&&(o.container_height=s[17]||s[19]),f[0]&1114112&&(o.img_width=s[16]||s[20]),f[0]&557056&&(o.img_height=s[15]||s[19]),!n&&f[0]&4&&(n=!0,o.brush_radius=s[2],Z(()=>n=!1)),!l&&f[0]&4194304&&(l=!0,o.brush_color=s[22],Z(()=>l=!1)),e.$set(o)},i(s){a||(p(e.$$.fragment,s),a=!0)},o(s){A(e.$$.fragment,s),a=!1},d(s){S(e,s)}}}function yn(t){let e,n,l,a;return{c(){e=F("img"),J(e.src,n=t[0].image||t[0])||d(e,"src",n),d(e,"alt","hello"),d(e,"class","svelte-p3y7hu"),R(e,"webcam",t[5]==="webcam"&&t[9]),R(e,"selectable",t[10])},m(r,i){y(r,e,i),l||(a=q(e,"click",t[29]),l=!0)},p(r,i){i[0]&1&&!J(e.src,n=r[0].image||r[0])&&d(e,"src",n),i[0]&544&&R(e,"webcam",r[5]==="webcam"&&r[9]),i[0]&1024&&R(e,"selectable",r[10])},i:H,o:H,d(r){r&&v(e),l=!1,a()}}}function vn(t){let e=t[21],n,l,a,r=bt(t),i=t[16]>0&&kt(t);return{c(){r.c(),n=j(),i&&i.c(),l=_e()},m(u,s){r.m(u,s),y(u,n,s),i&&i.m(u,s),y(u,l,s),a=!0},p(u,s){s[0]&2097152&&$(e,e=u[21])?(r.d(1),r=bt(u),r.c(),r.m(n.parentNode,n)):r.p(u,s),u[16]>0?i?(i.p(u,s),s[0]&65536&&p(i,1)):(i=kt(u),i.c(),p(i,1),i.m(l.parentNode,l)):i&&(x(),A(i,1,1,()=>{i=null}),ee())},i(u){a||(p(i),a=!0)},o(u){A(i),a=!1},d(u){r.d(u),u&&v(n),i&&i.d(u),u&&v(l)}}}function Cn(t){let e,n,l,a,r,i,u;return e=new He({props:{editable:!0}}),e.$on("edit",t[33]),e.$on("clear",t[24]),{c(){T(e.$$.fragment),n=j(),l=F("img"),J(l.src,a=t[0])||d(l,"src",a),d(l,"alt",""),d(l,"class","svelte-p3y7hu"),R(l,"scale-x-[-1]",t[5]==="webcam"&&t[9]),R(l,"selectable",t[10])},m(s,f){B(e,s,f),y(s,n,f),y(s,l,f),r=!0,i||(u=q(l,"click",t[29]),i=!0)},p(s,f){(!r||f[0]&1&&!J(l.src,a=s[0]))&&d(l,"src",a),(!r||f[0]&544)&&R(l,"scale-x-[-1]",s[5]==="webcam"&&s[9]),(!r||f[0]&1024)&&R(l,"selectable",s[10])},i(s){r||(p(e.$$.fragment,s),r=!0)},o(s){A(e.$$.fragment,s),r=!1},d(s){S(e,s),s&&v(n),s&&v(l),i=!1,u()}}}function Mn(t){let e,n,l,a,r={image:t[0]};return e=new Mt({props:r}),t[31](e),e.$on("crop",t[25]),l=new He({}),l.$on("clear",t[32]),{c(){T(e.$$.fragment),n=j(),T(l.$$.fragment)},m(i,u){B(e,i,u),y(i,n,u),B(l,i,u),a=!0},p(i,u){const s={};u[0]&1&&(s.image=i[0]),e.$set(s)},i(i){a||(p(e.$$.fragment,i),p(l.$$.fragment,i),a=!0)},o(i){A(e.$$.fragment,i),A(l.$$.fragment,i),a=!1},d(i){t[31](null),S(e,i),i&&v(n),S(l,i)}}}function zn(t){let e;const n=t[30].default,l=Dt(n,t,t[61],null);return{c(){l&&l.c()},m(a,r){l&&l.m(a,r),e=!0},p(a,r){l&&l.p&&(!e||r[1]&1073741824)&&Lt(l,n,a,a[61],e?jt(n,a[61],r,null):Ut(a[61]),null)},i(a){e||(p(l,a),e=!0)},o(a){A(l,a),e=!1},d(a){l&&l.d(a)}}}function bt(t){let e,n,l,a;return{c(){e=F("img"),d(e,"class","absolute-img svelte-p3y7hu"),J(e.src,n=t[21]||t[0]?.image||t[0])||d(e,"src",n),d(e,"alt",""),R(e,"webcam",t[5]==="webcam"&&t[9])},m(r,i){y(r,e,i),t[34](e),l||(a=q(e,"load",t[26]),l=!0)},p(r,i){i[0]&2097153&&!J(e.src,n=r[21]||r[0]?.image||r[0])&&d(e,"src",n),i[0]&544&&R(e,"webcam",r[5]==="webcam"&&r[9])},d(r){r&&v(e),t[34](null),l=!1,a()}}}function kt(t){let e,n,l,a,r,i,u,s;function f(c){t[36](c)}function o(c){t[37](c)}let g={value:t[0],mode:t[13],width:t[16]||t[20],height:t[15]||t[19],container_height:t[17]||t[19],value_img:t[18],source:t[5],shape:t[6]};t[2]!==void 0&&(g.brush_radius=t[2]),t[22]!==void 0&&(g.brush_color=t[22]),e=new Ze({props:g}),t[35](e),L.push(()=>V(e,"brush_radius",f)),L.push(()=>V(e,"brush_color",o)),e.$on("change",t[25]),r=new Ke({props:{show_eraser:t[18]}}),r.$on("undo",t[38]),r.$on("clear_mask",t[28]),r.$on("remove_image",t[27]);let _=(t[1]==="color-sketch"||t[1]==="sketch")&&pt(t);return{c(){T(e.$$.fragment),a=j(),T(r.$$.fragment),i=j(),_&&_.c(),u=_e()},m(c,b){B(e,c,b),y(c,a,b),B(r,c,b),y(c,i,b),_&&_.m(c,b),y(c,u,b),s=!0},p(c,b){const I={};b[0]&1&&(I.value=c[0]),b[0]&8192&&(I.mode=c[13]),b[0]&1114112&&(I.width=c[16]||c[20]),b[0]&557056&&(I.height=c[15]||c[19]),b[0]&655360&&(I.container_height=c[17]||c[19]),b[0]&262144&&(I.value_img=c[18]),b[0]&32&&(I.source=c[5]),b[0]&64&&(I.shape=c[6]),!n&&b[0]&4&&(n=!0,I.brush_radius=c[2],Z(()=>n=!1)),!l&&b[0]&4194304&&(l=!0,I.brush_color=c[22],Z(()=>l=!1)),e.$set(I);const D={};b[0]&262144&&(D.show_eraser=c[18]),r.$set(D),c[1]==="color-sketch"||c[1]==="sketch"?_?(_.p(c,b),b[0]&2&&p(_,1)):(_=pt(c),_.c(),p(_,1),_.m(u.parentNode,u)):_&&(x(),A(_,1,1,()=>{_=null}),ee())},i(c){s||(p(e.$$.fragment,c),p(r.$$.fragment,c),p(_),s=!0)},o(c){A(e.$$.fragment,c),A(r.$$.fragment,c),A(_),s=!1},d(c){t[35](null),S(e,c),c&&v(a),S(r,c),c&&v(i),_&&_.d(c),c&&v(u)}}}function pt(t){let e,n,l,a;function r(s){t[39](s)}function i(s){t[40](s)}let u={container_height:t[17]||t[19],img_width:t[16]||t[20],img_height:t[15]||t[19],mode:t[13]};return t[2]!==void 0&&(u.brush_radius=t[2]),t[22]!==void 0&&(u.brush_color=t[22]),e=new $e({props:u}),L.push(()=>V(e,"brush_radius",r)),L.push(()=>V(e,"brush_color",i)),{c(){T(e.$$.fragment)},m(s,f){B(e,s,f),a=!0},p(s,f){const o={};f[0]&655360&&(o.container_height=s[17]||s[19]),f[0]&1114112&&(o.img_width=s[16]||s[20]),f[0]&557056&&(o.img_height=s[15]||s[19]),f[0]&8192&&(o.mode=s[13]),!n&&f[0]&4&&(n=!0,o.brush_radius=s[2],Z(()=>n=!1)),!l&&f[0]&4194304&&(l=!0,o.brush_color=s[22],Z(()=>l=!1)),e.$set(o)},i(s){a||(p(e.$$.fragment,s),a=!0)},o(s){A(e.$$.fragment,s),a=!1},d(s){S(e,s)}}}function Tn(t){let e,n,l,a;const r=[zn,Mn,Cn,vn,yn],i=[];function u(s,f){return s[0]===null&&!s[21]||s[7]?0:s[1]==="select"?1:s[1]==="editor"?2:(s[1]==="sketch"||s[1]==="color-sketch")&&(s[0]!==null||s[21])?3:4}return e=u(t),n=i[e]=r[e](t),{c(){n.c(),l=_e()},m(s,f){i[e].m(s,f),y(s,l,f),a=!0},p(s,f){let o=e;e=u(s),e===o?i[e].p(s,f):(x(),A(i[o],1,1,()=>{i[o]=null}),ee(),n=i[e],n?n.p(s,f):(n=i[e]=r[e](s),n.c()),p(n,1),n.m(l.parentNode,l))},i(s){a||(p(n),a=!0)},o(s){A(n),a=!1},d(s){i[e].d(s),s&&v(l)}}}function Bn(t){let e,n,l,a,r,i,u;e=new vt({props:{show_label:t[4],Icon:t[5]==="canvas"?tt:qe,label:t[3]||(t[5]==="canvas"?"Sketch":"Image")}});const s=[In,An,wn,pn,kn,bn,dn],f=[];function o(g,_){return g[5]==="upload"?0:g[5]==="canvas"?1:g[0]===null&&!g[21]||g[7]?2:g[1]==="select"?3:g[1]==="editor"?4:(g[1]==="sketch"||g[1]==="color-sketch")&&(g[0]!==null||g[21])?5:6}return a=o(t),r=f[a]=s[a](t),{c(){T(e.$$.fragment),n=j(),l=F("div"),r.c(),d(l,"data-testid","image"),d(l,"class","image-container svelte-p3y7hu"),Ve(()=>t[60].call(l))},m(g,_){B(e,g,_),y(g,n,_),y(g,l,_),f[a].m(l,null),i=At(l,t[60].bind(l)),u=!0},p(g,_){const c={};_[0]&16&&(c.show_label=g[4]),_[0]&32&&(c.Icon=g[5]==="canvas"?tt:qe),_[0]&40&&(c.label=g[3]||(g[5]==="canvas"?"Sketch":"Image")),e.$set(c);let b=a;a=o(g),a===b?f[a].p(g,_):(x(),A(f[b],1,1,()=>{f[b]=null}),ee(),r=f[a],r?r.p(g,_):(r=f[a]=s[a](g),r.c()),p(r,1),r.m(l,null))},i(g){u||(p(e.$$.fragment,g),p(r),u=!0)},o(g){A(e.$$.fragment,g),A(r),u=!1},d(g){S(e,g),g&&v(n),g&&v(l),f[a].d(),i()}}}function Sn(t,e,n){let l,{$$slots:a={},$$scope:r}=e,{value:i}=e,{label:u=void 0}=e,{show_label:s}=e,{source:f="upload"}=e,{tool:o="editor"}=e,{shape:g}=e,{streaming:_=!1}=e,{pending:c=!1}=e,{mirror_webcam:b}=e,{brush_radius:I}=e,{selectable:D=!1}=e,N,U;i&&(f==="upload"||f==="webcam")&&o==="sketch"&&(i={image:i,mask:null});function ce({detail:h}){o==="color-sketch"?n(21,re=h):n(0,i=(f==="upload"||f==="webcam")&&o==="sketch"?{image:h,mask:null}:h),W("upload",h)}function E({detail:h}){n(0,i=null),n(21,re=void 0),W("clear")}async function k({detail:h},z){O==="mask"?f==="webcam"&&z?n(0,i={image:h,mask:null}):n(0,i={image:typeof i=="string"?i:i?.image||null,mask:h}):(f==="upload"||f==="webcam")&&o==="sketch"?n(0,i={image:h,mask:null}):n(0,i=h),await ge(),W(_?"stream":"edit")}const W=de();let Y=!1;function se(h){const z=h.currentTarget;n(16,X=z.naturalWidth),n(15,ie=z.naturalHeight),n(17,te=z.getBoundingClientRect().height)}async function oe(){N.clear(),await ge(),n(0,i=null),n(21,re=void 0)}async function fe(){N.clear_mask(),await ge()}let ie=0,X=0,te=0,O,K,w,be,re;It(async()=>{o==="color-sketch"&&i&&typeof i=="string"&&(n(21,re=i),await ge(),se({currentTarget:K}))});const Fe=h=>{let z=Ct(h);z&&W("select",{index:z,value:null})};function Ne(h){L[h?"unshift":"push"](()=>{U=h,n(11,U),n(0,i)})}const pe=h=>(E(h),n(1,o="editor")),We=()=>n(1,o="select");function Ie(h){L[h?"unshift":"push"](()=>{K=h,n(18,K)})}function Oe(h){L[h?"unshift":"push"](()=>{N=h,n(14,N)})}function ye(h){I=h,n(2,I)}function ve(h){l=h,n(22,l),n(13,O),n(5,f),n(1,o)}const Ce=()=>N.undo();function Me(h){I=h,n(2,I)}function ze(h){l=h,n(22,l),n(13,O),n(5,f),n(1,o)}function he(h){Y=h,n(12,Y)}const Te=()=>N.undo();function Be(h){I=h,n(2,I)}function Se(h){l=h,n(22,l),n(13,O),n(5,f),n(1,o)}function Ee(h){I=h,n(2,I)}function Re(h){l=h,n(22,l),n(13,O),n(5,f),n(1,o)}function De(h){L[h?"unshift":"push"](()=>{N=h,n(14,N)})}const ae=h=>o==="color-sketch"?ce(h):k(h,!0);function me(h){ue.call(this,t,h)}function Le(h){L[h?"unshift":"push"](()=>{U=h,n(11,U),n(0,i)})}const we=h=>(E(h),n(1,o="editor")),Ye=()=>n(1,o="select");function Ue(h){L[h?"unshift":"push"](()=>{K=h,n(18,K)})}function Xe(h){L[h?"unshift":"push"](()=>{N=h,n(14,N)})}function Pe(h){I=h,n(2,I)}function Je(h){l=h,n(22,l),n(13,O),n(5,f),n(1,o)}const Ge=()=>N.undo();function m(h){I=h,n(2,I)}function C(h){l=h,n(22,l),n(13,O),n(5,f),n(1,o)}function M(){w=this.offsetHeight,be=this.offsetWidth,n(19,w),n(20,be)}return t.$$set=h=>{"value"in h&&n(0,i=h.value),"label"in h&&n(3,u=h.label),"show_label"in h&&n(4,s=h.show_label),"source"in h&&n(5,f=h.source),"tool"in h&&n(1,o=h.tool),"shape"in h&&n(6,g=h.shape),"streaming"in h&&n(7,_=h.streaming),"pending"in h&&n(8,c=h.pending),"mirror_webcam"in h&&n(9,b=h.mirror_webcam),"brush_radius"in h&&n(2,I=h.brush_radius),"selectable"in h&&n(10,D=h.selectable),"$$scope"in h&&n(61,r=h.$$scope)},t.$$.update=()=>{t.$$.dirty[0]&1&&W("change",i),t.$$.dirty[0]&4096&&W("drag",Y),t.$$.dirty[0]&34&&(f==="canvas"&&o==="sketch"?n(13,O="bw-sketch"):o==="color-sketch"?n(13,O="color-sketch"):(f==="upload"||f==="webcam")&&o==="sketch"?n(13,O="mask"):n(13,O="editor")),t.$$.dirty[0]&8192&&n(22,l=O=="mask"?"#000000":"#000"),t.$$.dirty[0]&1&&(i===null||i.image===null&&i.mask===null)&&n(21,re=void 0),t.$$.dirty[0]&2049&&U&&(i?(n(11,U.image=i,U),U.create()):U.destroy())},[i,o,I,u,s,f,g,_,c,b,D,U,Y,O,N,ie,X,te,K,w,be,re,l,ce,E,k,se,oe,fe,Fe,a,Ne,pe,We,Ie,Oe,ye,ve,Ce,Me,ze,he,Te,Be,Se,Ee,Re,De,ae,me,Le,we,Ye,Ue,Xe,Pe,Je,Ge,m,C,M,r]}let En=class extends ne{constructor(e){super(),le(this,e,Sn,Bn,$,{value:0,label:3,show_label:4,source:5,tool:1,shape:6,streaming:7,pending:8,mirror_webcam:9,brush_radius:2,selectable:10},null,[-1,-1,-1])}};function Rn(t){let e,n,l,a,r,i,u,s,f;return l=new ke({props:{Icon:Vt,label:"Download"}}),{c(){e=F("div"),n=F("a"),T(l.$$.fragment),a=j(),r=F("img"),d(n,"href",t[0]),d(n,"target",window.__is_colab__?"_blank":null),d(n,"download","image"),d(e,"class","download svelte-ms5bsk"),J(r.src,i=t[0])||d(r,"src",i),d(r,"alt",""),d(r,"class","svelte-ms5bsk"),R(r,"selectable",t[3])},m(o,g){y(o,e,g),P(e,n),B(l,n,null),y(o,a,g),y(o,r,g),u=!0,s||(f=q(r,"click",t[4]),s=!0)},p(o,g){(!u||g&1)&&d(n,"href",o[0]),(!u||g&1&&!J(r.src,i=o[0]))&&d(r,"src",i),(!u||g&8)&&R(r,"selectable",o[3])},i(o){u||(p(l.$$.fragment,o),u=!0)},o(o){A(l.$$.fragment,o),u=!1},d(o){o&&v(e),S(l),o&&v(a),o&&v(r),s=!1,f()}}}function Dn(t){let e,n;return e=new Qt({props:{size:"large",unpadded_box:!0,$$slots:{default:[Ln]},$$scope:{ctx:t}}}),{c(){T(e.$$.fragment)},m(l,a){B(e,l,a),n=!0},p(l,a){const r={};a&64&&(r.$$scope={dirty:a,ctx:l}),e.$set(r)},i(l){n||(p(e.$$.fragment,l),n=!0)},o(l){A(e.$$.fragment,l),n=!1},d(l){S(e,l)}}}function Ln(t){let e,n;return e=new qe({}),{c(){T(e.$$.fragment)},m(l,a){B(e,l,a),n=!0},i(l){n||(p(e.$$.fragment,l),n=!0)},o(l){A(e.$$.fragment,l),n=!1},d(l){S(e,l)}}}function Un(t){let e,n,l,a,r,i;e=new vt({props:{show_label:t[2],Icon:qe,label:t[1]||"Image"}});const u=[Dn,Rn],s=[];function f(o,g){return o[0]===null?0:1}return l=f(t),a=s[l]=u[l](t),{c(){T(e.$$.fragment),n=j(),a.c(),r=_e()},m(o,g){B(e,o,g),y(o,n,g),s[l].m(o,g),y(o,r,g),i=!0},p(o,[g]){const _={};g&4&&(_.show_label=o[2]),g&2&&(_.label=o[1]||"Image"),e.$set(_);let c=l;l=f(o),l===c?s[l].p(o,g):(x(),A(s[c],1,1,()=>{s[c]=null}),ee(),a=s[l],a?a.p(o,g):(a=s[l]=u[l](o),a.c()),p(a,1),a.m(r.parentNode,r))},i(o){i||(p(e.$$.fragment,o),p(a),i=!0)},o(o){A(e.$$.fragment,o),A(a),i=!1},d(o){S(e,o),o&&v(n),s[l].d(o),o&&v(r)}}}function jn(t,e,n){let{value:l}=e,{label:a=void 0}=e,{show_label:r}=e,{selectable:i=!1}=e;const u=de(),s=f=>{let o=Ct(f);o&&u("select",{index:o,value:null})};return t.$$set=f=>{"value"in f&&n(0,l=f.value),"label"in f&&n(1,a=f.label),"show_label"in f&&n(2,r=f.show_label),"selectable"in f&&n(3,i=f.selectable)},t.$$.update=()=>{t.$$.dirty&1&&l&&u("change",l)},[l,a,r,i,s]}class qn extends ne{constructor(e){super(),le(this,e,jn,Un,$,{value:0,label:1,show_label:2,selectable:3})}}function Hn(t){let e,n,l;function a(i){t[19](i)}let r={brush_radius:t[14],shape:t[13],source:t[5],tool:t[6],selectable:t[15],label:t[7],show_label:t[8],pending:t[10],streaming:t[9],mirror_webcam:t[12],$$slots:{default:[Nn]},$$scope:{ctx:t}};return t[0]!==void 0&&(r.value=t[0]),e=new En({props:r}),L.push(()=>V(e,"value",a)),e.$on("edit",t[20]),e.$on("clear",t[21]),e.$on("change",t[22]),e.$on("stream",t[23]),e.$on("drag",t[24]),e.$on("upload",t[25]),e.$on("select",t[26]),e.$on("error",t[27]),{c(){T(e.$$.fragment)},m(i,u){B(e,i,u),l=!0},p(i,u){const s={};u&16384&&(s.brush_radius=i[14]),u&8192&&(s.shape=i[13]),u&32&&(s.source=i[5]),u&64&&(s.tool=i[6]),u&32768&&(s.selectable=i[15]),u&128&&(s.label=i[7]),u&256&&(s.show_label=i[8]),u&1024&&(s.pending=i[10]),u&512&&(s.streaming=i[9]),u&4096&&(s.mirror_webcam=i[12]),u&536870912&&(s.$$scope={dirty:u,ctx:i}),!n&&u&1&&(n=!0,s.value=i[0],Z(()=>n=!1)),e.$set(s)},i(i){l||(p(e.$$.fragment,i),l=!0)},o(i){A(e.$$.fragment,i),l=!1},d(i){S(e,i)}}}function Fn(t){let e,n;return e=new qn({props:{value:t[0],label:t[7],show_label:t[8],selectable:t[15]}}),e.$on("select",t[18]),{c(){T(e.$$.fragment)},m(l,a){B(e,l,a),n=!0},p(l,a){const r={};a&1&&(r.value=l[0]),a&128&&(r.label=l[7]),a&256&&(r.show_label=l[8]),a&32768&&(r.selectable=l[15]),e.$set(r)},i(l){n||(p(e.$$.fragment,l),n=!0)},o(l){A(e.$$.fragment,l),n=!1},d(l){S(e,l)}}}function Nn(t){let e,n;return e=new Zt({props:{type:"image"}}),{c(){T(e.$$.fragment)},m(l,a){B(e,l,a),n=!0},p:H,i(l){n||(p(e.$$.fragment,l),n=!0)},o(l){A(e.$$.fragment,l),n=!1},d(l){S(e,l)}}}function Wn(t){let e,n,l,a,r,i;const u=[t[1]];let s={};for(let _=0;_{o[I]=null}),ee(),a=o[l],a?a.p(_,c):(a=o[l]=f[l](_),a.c()),p(a,1),a.m(r.parentNode,r))},i(_){i||(p(e.$$.fragment,_),p(a),i=!0)},o(_){A(e.$$.fragment,_),A(a),i=!1},d(_){S(e,_),_&&v(n),o[l].d(_),_&&v(r)}}}function On(t){let e,n;return e=new Wt({props:{visible:t[4],variant:t[16]==="dynamic"&&t[0]===null&&t[5]==="upload"?"dashed":"solid",border_mode:t[17]?"focus":"base",padding:!1,elem_id:t[2],elem_classes:t[3],style:{height:t[11].height||(t[5]==="webcam"||t[16]==="static"?void 0:wt),width:t[11].width},allow_overflow:!1,$$slots:{default:[Wn]},$$scope:{ctx:t}}}),{c(){T(e.$$.fragment)},m(l,a){B(e,l,a),n=!0},p(l,[a]){const r={};a&16&&(r.visible=l[4]),a&65569&&(r.variant=l[16]==="dynamic"&&l[0]===null&&l[5]==="upload"?"dashed":"solid"),a&131072&&(r.border_mode=l[17]?"focus":"base"),a&4&&(r.elem_id=l[2]),a&8&&(r.elem_classes=l[3]),a&67616&&(r.style={height:l[11].height||(l[5]==="webcam"||l[16]==="static"?void 0:wt),width:l[11].width}),a&537130979&&(r.$$scope={dirty:a,ctx:l}),e.$set(r)},i(l){n||(p(e.$$.fragment,l),n=!0)},o(l){A(e.$$.fragment,l),n=!1},d(l){S(e,l)}}}const wt=240;function Yn(t,e,n){let{elem_id:l=""}=e,{elem_classes:a=[]}=e,{visible:r=!0}=e,{value:i=null}=e,{source:u="upload"}=e,{tool:s="editor"}=e,{label:f}=e,{show_label:o}=e,{streaming:g}=e,{pending:_}=e,{style:c={}}=e,{mirror_webcam:b}=e,{shape:I}=e,{brush_radius:D}=e,{selectable:N=!1}=e,{loading_status:U}=e,{mode:ce}=e;const E=de();let k;function W(w){ue.call(this,t,w)}function Y(w){i=w,n(0,i)}function se(w){ue.call(this,t,w)}function oe(w){ue.call(this,t,w)}function fe(w){ue.call(this,t,w)}function ie(w){ue.call(this,t,w)}const X=({detail:w})=>n(17,k=w);function te(w){ue.call(this,t,w)}function O(w){ue.call(this,t,w)}const K=({detail:w})=>{n(1,U=U||{}),n(1,U.status="error",U),n(1,U.message=w,U)};return t.$$set=w=>{"elem_id"in w&&n(2,l=w.elem_id),"elem_classes"in w&&n(3,a=w.elem_classes),"visible"in w&&n(4,r=w.visible),"value"in w&&n(0,i=w.value),"source"in w&&n(5,u=w.source),"tool"in w&&n(6,s=w.tool),"label"in w&&n(7,f=w.label),"show_label"in w&&n(8,o=w.show_label),"streaming"in w&&n(9,g=w.streaming),"pending"in w&&n(10,_=w.pending),"style"in w&&n(11,c=w.style),"mirror_webcam"in w&&n(12,b=w.mirror_webcam),"shape"in w&&n(13,I=w.shape),"brush_radius"in w&&n(14,D=w.brush_radius),"selectable"in w&&n(15,N=w.selectable),"loading_status"in w&&n(1,U=w.loading_status),"mode"in w&&n(16,ce=w.mode)},t.$$.update=()=>{t.$$.dirty&1&&n(0,i=i||null),t.$$.dirty&1&&E("change")},[i,U,l,a,r,u,s,f,o,g,_,c,b,I,D,N,ce,k,W,Y,se,oe,fe,ie,X,te,O,K]}class Xn extends ne{constructor(e){super(),le(this,e,Yn,On,$,{elem_id:2,elem_classes:3,visible:4,value:0,source:5,tool:6,label:7,show_label:8,streaming:9,pending:10,style:11,mirror_webcam:12,shape:13,brush_radius:14,selectable:15,loading_status:1,mode:16})}}const rl=Xn,al=["static","dynamic"],ul=t=>({type:{payload:"string"},description:{payload:"image data as base64 string"},example_data:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAACklEQVR4nGMAAQAABQABDQottAAAAABJRU5ErkJggg=="});export{rl as Component,_l as ExampleComponent,ul as document,al as modes}; -//# sourceMappingURL=index-f0702dd5.js.map diff --git a/spaces/widged/gender-bias-evaluation/README.md b/spaces/widged/gender-bias-evaluation/README.md deleted file mode 100644 index c7ec040caf8ef5644fb27cb1f4844b6d7d684ab7..0000000000000000000000000000000000000000 --- a/spaces/widged/gender-bias-evaluation/README.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: Spaces Template Gradio -emoji: 🌍 -colorFrom: gray -colorTo: purple -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. - -# Warnings - -:WARN: Not my own work. Borrowed or adapted from another space of the same name. diff --git a/spaces/wisnuarys15/rvc-wisnu5/app.py b/spaces/wisnuarys15/rvc-wisnu5/app.py deleted file mode 100644 index f6d9df484d4b74af79f88868816ef7b377e47797..0000000000000000000000000000000000000000 --- a/spaces/wisnuarys15/rvc-wisnu5/app.py +++ /dev/null @@ -1,188 +0,0 @@ -import os -import json -import argparse -import traceback -import logging -import gradio as gr -import numpy as np -import librosa -import torch -import asyncio -import edge_tts -from datetime import datetime -from fairseq import checkpoint_utils -from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono -from vc_infer_pipeline import VC -from config import ( - is_half, - device -) -logging.getLogger("numba").setLevel(logging.WARNING) -limitation = os.getenv("SYSTEM") == "spaces" # limit audio length in huggingface spaces - -def create_vc_fn(tgt_sr, net_g, vc, if_f0, file_index, file_big_npy): - def vc_fn( - input_audio, - f0_up_key, - f0_method, - index_rate, - tts_mode, - tts_text, - tts_voice - ): - try: - if tts_mode: - if len(tts_text) > 100 and limitation: - return "Text is too long", None - if tts_text is None or tts_voice is None: - return "You need to enter text and select a voice", None - asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3")) - audio, sr = librosa.load("tts.mp3", sr=16000, mono=True) - else: - if args.files: - audio, sr = librosa.load(input_audio, sr=16000, mono=True) - else: - if input_audio is None: - return "You need to upload an audio", None - sampling_rate, audio = input_audio - duration = audio.shape[0] / sampling_rate - if duration > 20 and limitation: - return "Please upload an audio file that is less than 20 seconds. If you need to generate a longer audio file, please use Colab.", None - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - times = [0, 0, 0] - f0_up_key = int(f0_up_key) - audio_opt = vc.pipeline( - hubert_model, - net_g, - 0, - audio, - times, - f0_up_key, - f0_method, - file_index, - file_big_npy, - index_rate, - if_f0, - ) - print( - f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s" - ) - return "Success", (tgt_sr, audio_opt) - except: - info = traceback.format_exc() - print(info) - return info, (None, None) - return vc_fn - -def load_hubert(): - global hubert_model - models, _, _ = checkpoint_utils.load_model_ensemble_and_task( - ["hubert_base.pt"], - suffix="", - ) - hubert_model = models[0] - hubert_model = hubert_model.to(device) - if is_half: - hubert_model = hubert_model.half() - else: - hubert_model = hubert_model.float() - hubert_model.eval() - -def change_to_tts_mode(tts_mode): - if tts_mode: - return gr.Audio.update(visible=False), gr.Textbox.update(visible=True), gr.Dropdown.update(visible=True) - else: - return gr.Audio.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False) - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--api', action="store_true", default=False) - parser.add_argument("--share", action="store_true", default=False, help="share gradio app") - parser.add_argument("--files", action="store_true", default=False, help="load audio from path") - args, unknown = parser.parse_known_args() - load_hubert() - models = [] - tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices()) - voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list] - with open("weights/model_info.json", "r", encoding="utf-8") as f: - models_info = json.load(f) - for name, info in models_info.items(): - if not info['enable']: - continue - title = info['title'] - author = info.get("author", None) - cover = f"weights/{name}/{info['cover']}" - index = f"weights/{name}/{info['feature_retrieval_library']}" - npy = f"weights/{name}/{info['feature_file']}" - cpt = torch.load(f"weights/{name}/{name}.pth", map_location="cpu") - tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - if_f0 = cpt.get("f0", 1) - if if_f0 == 1: - net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half) - else: - net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - del net_g.enc_q - print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净, 真奇葩 - net_g.eval().to(device) - if is_half: - net_g = net_g.half() - else: - net_g = net_g.float() - vc = VC(tgt_sr, device, is_half) - models.append((name, title, author, cover, create_vc_fn(tgt_sr, net_g, vc, if_f0, index, npy))) - with gr.Blocks() as app: - gr.Markdown( - "#
      RVC Models\n" - "##
      The input audio should be clean and pure voice without background music.\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=ardha27.Rvc-Models)\n\n" - "[![image](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1ZURZ2IaN_EDdimt29i8vDN3qnSM0IMZ_?usp=drive_link)\n\n" - "[![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm-dark.svg)](https://huggingface.co/spaces/ardha27pi/rvc-models?duplicate=true)\n\n" - "[![Train Own Voice](https://badgen.net/badge/icon/github?icon=github&label=Train%20Voice)](https://github.com/ardha27/AI-Song-Cover-RVC)\n\n" - "[![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/R6R7AH1FA)\n\n" - ) - with gr.Tabs(): - for (name, title, author, cover, vc_fn) in models: - with gr.TabItem(name): - with gr.Row(): - gr.Markdown( - '
      ' - f'
      {title}
      \n'+ - (f'
      Model author: {author}
      ' if author else "")+ - (f'' if cover else "")+ - '
      ' - ) - with gr.Row(): - with gr.Column(): - if args.files: - vc_input = gr.Textbox(label="Input audio path") - else: - vc_input = gr.Audio(label="Input audio"+' (less than 20 seconds)' if limitation else '') - vc_transpose = gr.Number(label="Transpose", value=0) - vc_f0method = gr.Radio( - label="Pitch extraction algorithm, PM is fast but Harvest is better for low frequencies", - choices=["pm", "harvest"], - value="pm", - interactive=True, - ) - vc_index_ratio = gr.Slider( - minimum=0, - maximum=1, - label="Retrieval feature ratio", - value=0.6, - interactive=True, - ) - tts_mode = gr.Checkbox(label="tts (use edge-tts as input)", value=False) - tts_text = gr.Textbox(visible=False,label="TTS text (100 words limitation)" if limitation else "TTS text") - tts_voice = gr.Dropdown(label="Edge-tts speaker", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female") - vc_submit = gr.Button("Generate", variant="primary") - with gr.Column(): - vc_output1 = gr.Textbox(label="Output Message") - vc_output2 = gr.Audio(label="Output Audio") - vc_submit.click(vc_fn, [vc_input, vc_transpose, vc_f0method, vc_index_ratio, tts_mode, tts_text, tts_voice], [vc_output1, vc_output2]) - tts_mode.change(change_to_tts_mode, [tts_mode], [vc_input, tts_text, tts_voice]) - app.queue(concurrency_count=1, max_size=20, api_open=args.api).launch(share=args.share) \ No newline at end of file diff --git a/spaces/wisnuarys15/rvc-wisnu5/infer_pack/models.py b/spaces/wisnuarys15/rvc-wisnu5/infer_pack/models.py deleted file mode 100644 index 96165f73644e6fb92d0ffedb4a3c9e1a457cb989..0000000000000000000000000000000000000000 --- a/spaces/wisnuarys15/rvc-wisnu5/infer_pack/models.py +++ /dev/null @@ -1,982 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder256Sim(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_sim(nn.Module): - """ - Synthesizer for Training - """ - - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - # hop_length, - gin_channels=0, - use_sdp=True, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256Sim( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - is_half=kwargs["is_half"], - ) - - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y_lengths, ds - ): # y是spec不需要了现在 - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - z_slice, ids_slice = commons.rand_slice_segments( - x, y_lengths, self.segment_size - ) - - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice - - def infer( - self, phone, phone_lengths, pitch, pitchf, ds, max_len=None - ): # y是spec不需要了现在 - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - x, x_mask = self.enc_p(phone, pitch, phone_lengths) - x = self.flow(x, x_mask, g=g, reverse=True) - o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g) - return o, o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/xdecoder/Instruct-X-Decoder/xdecoder/utils/misc.py b/spaces/xdecoder/Instruct-X-Decoder/xdecoder/utils/misc.py deleted file mode 100644 index e7bfa08060344fedcb1d5017b932a3c16fc5bc86..0000000000000000000000000000000000000000 --- a/spaces/xdecoder/Instruct-X-Decoder/xdecoder/utils/misc.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/util/misc.py -# Modified by Xueyan Zou -""" -Misc functions, including distributed helpers. - -Mostly copy-paste from torchvision references. -""" -from typing import List, Optional - -import torch -import torch.distributed as dist -import torchvision -from torch import Tensor - -def _max_by_axis(the_list): - # type: (List[List[int]]) -> List[int] - maxes = the_list[0] - for sublist in the_list[1:]: - for index, item in enumerate(sublist): - maxes[index] = max(maxes[index], item) - return maxes - -class NestedTensor(object): - def __init__(self, tensors, mask: Optional[Tensor]): - self.tensors = tensors - self.mask = mask - - def to(self, device): - # type: (Device) -> NestedTensor # noqa - cast_tensor = self.tensors.to(device) - mask = self.mask - if mask is not None: - assert mask is not None - cast_mask = mask.to(device) - else: - cast_mask = None - return NestedTensor(cast_tensor, cast_mask) - - def decompose(self): - return self.tensors, self.mask - - def __repr__(self): - return str(self.tensors) - -def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): - # TODO make this more general - if tensor_list[0].ndim == 3: - if torchvision._is_tracing(): - # nested_tensor_from_tensor_list() does not export well to ONNX - # call _onnx_nested_tensor_from_tensor_list() instead - return _onnx_nested_tensor_from_tensor_list(tensor_list) - - # TODO make it support different-sized images - max_size = _max_by_axis([list(img.shape) for img in tensor_list]) - # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) - batch_shape = [len(tensor_list)] + max_size - b, c, h, w = batch_shape - dtype = tensor_list[0].dtype - device = tensor_list[0].device - tensor = torch.zeros(batch_shape, dtype=dtype, device=device) - mask = torch.ones((b, h, w), dtype=torch.bool, device=device) - for img, pad_img, m in zip(tensor_list, tensor, mask): - pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) - m[: img.shape[1], : img.shape[2]] = False - elif tensor_list[0].ndim == 2: - if torchvision._is_tracing(): - # nested_tensor_from_tensor_list() does not export well to ONNX - # call _onnx_nested_tensor_from_tensor_list() instead - return _onnx_nested_tensor_from_tensor_list(tensor_list) - - # TODO make it support different-sized images - max_size = _max_by_axis([list(txt.shape) for txt in tensor_list]) - # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) - batch_shape = [len(tensor_list)] + max_size - b, c, l = batch_shape - dtype = tensor_list[0].dtype - device = tensor_list[0].device - tensor = torch.zeros(batch_shape, dtype=dtype, device=device) - mask = torch.ones((b, l), dtype=torch.bool, device=device) - for txt, pad_txt, m in zip(tensor_list, tensor, mask): - pad_txt[: txt.shape[0], : txt.shape[1]] = txt - m[: txt.shape[1]] = False - else: - raise ValueError("not supported") - return NestedTensor(tensor, mask) - -def _collate_and_pad_divisibility(tensor_list: list, div=32): - max_size = [] - for i in range(tensor_list[0].dim()): - max_size_i = torch.max( - torch.tensor([img.shape[i] for img in tensor_list]).to(torch.float32) - ).to(torch.int64) - max_size.append(max_size_i) - max_size = tuple(max_size) - - c,h,w = max_size - pad_h = (div - h % div) if h % div != 0 else 0 - pad_w = (div - w % div) if w % div != 0 else 0 - max_size = (c,h+pad_h,w+pad_w) - - # work around for - # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) - # m[: img.shape[1], :img.shape[2]] = False - # which is not yet supported in onnx - padded_imgs = [] - padded_masks = [] - for img in tensor_list: - padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] - padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) - padded_imgs.append(padded_img) - - m = torch.zeros_like(img[0], dtype=torch.int, device=img.device) - padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1) - padded_masks.append(padded_mask.to(torch.bool)) - - return padded_imgs - -# _onnx_nested_tensor_from_tensor_list() is an implementation of -# nested_tensor_from_tensor_list() that is supported by ONNX tracing. -@torch.jit.unused -def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor: - max_size = [] - for i in range(tensor_list[0].dim()): - max_size_i = torch.max( - torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32) - ).to(torch.int64) - max_size.append(max_size_i) - max_size = tuple(max_size) - - # work around for - # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) - # m[: img.shape[1], :img.shape[2]] = False - # which is not yet supported in onnx - padded_imgs = [] - padded_masks = [] - for img in tensor_list: - padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] - padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) - padded_imgs.append(padded_img) - - m = torch.zeros_like(img[0], dtype=torch.int, device=img.device) - padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1) - padded_masks.append(padded_mask.to(torch.bool)) - - tensor = torch.stack(padded_imgs) - mask = torch.stack(padded_masks) - - return NestedTensor(tensor, mask=mask) - - -def is_dist_avail_and_initialized(): - if not dist.is_available(): - return False - if not dist.is_initialized(): - return False - return True \ No newline at end of file diff --git a/spaces/xfys/yolov5_tracking/val_utils/trackeval/datasets/__init__.py b/spaces/xfys/yolov5_tracking/val_utils/trackeval/datasets/__init__.py deleted file mode 100644 index 4fdfa9dd590c6283d56e86419b863782fa619029..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/val_utils/trackeval/datasets/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -from .kitti_2d_box import Kitti2DBox -from .kitti_mots import KittiMOTS -from .mot_challenge_2d_box import MotChallenge2DBox -from .mots_challenge import MOTSChallenge -from .bdd100k import BDD100K -from .davis import DAVIS -from .tao import TAO -from .tao_ow import TAO_OW -try: - from .burst import BURST - from .burst_ow import BURST_OW -except ImportError as err: - print(f"Error importing BURST due to missing underlying dependency: {err}") -from .youtube_vis import YouTubeVIS -from .head_tracking_challenge import HeadTrackingChallenge -from .rob_mots import RobMOTS -from .person_path_22 import PersonPath22 diff --git a/spaces/xfys/yolov5_tracking/val_utils/trackeval/datasets/burst_ow.py b/spaces/xfys/yolov5_tracking/val_utils/trackeval/datasets/burst_ow.py deleted file mode 100644 index da775456e6c539c07c85db1fc9cad5998d8baaeb..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/val_utils/trackeval/datasets/burst_ow.py +++ /dev/null @@ -1,91 +0,0 @@ -import json -import os -from .burst_helpers.burst_ow_base import BURST_OW_Base -from .burst_helpers.format_converter import GroundTruthBURSTFormatToTAOFormatConverter, PredictionBURSTFormatToTAOFormatConverter -from .. import utils - - -class BURST_OW(BURST_OW_Base): - """Dataset class for TAO tracking""" - - @staticmethod - def get_default_dataset_config(): - tao_config = BURST_OW_Base.get_default_dataset_config() - code_path = utils.get_code_path() - tao_config['GT_FOLDER'] = os.path.join( - code_path, 'data/gt/burst/all_classes/val/') # Location of GT data - tao_config['TRACKERS_FOLDER'] = os.path.join( - code_path, 'data/trackers/burst/open-world/val/') # Trackers location - return tao_config - - def _iou_type(self): - return 'mask' - - def _box_or_mask_from_det(self, det): - if "segmentation" in det: - return det["segmentation"] - else: - return det["mask"] - - def _calculate_area_for_ann(self, ann): - import pycocotools.mask as cocomask - seg = self._box_or_mask_from_det(ann) - return cocomask.area(seg) - - def _calculate_similarities(self, gt_dets_t, tracker_dets_t): - similarity_scores = self._calculate_mask_ious(gt_dets_t, tracker_dets_t, is_encoded=True, do_ioa=False) - return similarity_scores - - def _postproc_ground_truth_data(self, data): - return GroundTruthBURSTFormatToTAOFormatConverter(data).convert() - - def _postproc_prediction_data(self, data): - # if it's a list, it's already in TAO format and not in Ali format - # however the image ids do not match and need to be remapped - if isinstance(data, list): - _remap_image_ids(data, self.gt_data) - return data - - return PredictionBURSTFormatToTAOFormatConverter( - self.gt_data, data, - exemplar_guided=False).convert() - - -def _remap_image_ids(pred_data, ali_gt_data): - code_path = utils.get_code_path() - if 'split' in ali_gt_data: - split = ali_gt_data['split'] - else: - split = 'val' - - if split in ('val', 'validation'): - tao_gt_path = os.path.join( - code_path, 'data/gt/tao/tao_validation/gt.json') - else: - tao_gt_path = os.path.join( - code_path, 'data/gt/tao/tao_test/test_without_annotations.json') - - with open(tao_gt_path) as f: - tao_gt = json.load(f) - - tao_img_by_id = {} - for img in tao_gt['images']: - img_id = img['id'] - tao_img_by_id[img_id] = img - - ali_img_id_by_filename = {} - for ali_img in ali_gt_data['images']: - ali_img_id = ali_img['id'] - file_name = ali_img['file_name'].replace("validation", "val") - ali_img_id_by_filename[file_name] = ali_img_id - - ali_img_id_by_tao_img_id = {} - for tao_img_id, tao_img in tao_img_by_id.items(): - file_name = tao_img['file_name'] - ali_img_id = ali_img_id_by_filename[file_name] - ali_img_id_by_tao_img_id[tao_img_id] = ali_img_id - - for det in pred_data: - tao_img_id = det['image_id'] - ali_img_id = ali_img_id_by_tao_img_id[tao_img_id] - det['image_id'] = ali_img_id diff --git a/spaces/xiang2811/ChatGPT/README.md b/spaces/xiang2811/ChatGPT/README.md deleted file mode 100644 index 7128e29689e35d059c9cc0a5050910fbd34873cd..0000000000000000000000000000000000000000 --- a/spaces/xiang2811/ChatGPT/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: ChuanhuChatGPT -emoji: 🐯 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.25.0 -app_file: ChuanhuChatbot.py -pinned: false -license: gpl-3.0 -duplicated_from: JohnSmith9982/ChuanhuChatGPT ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/ybelkada/FocusOnDepth/focusondepth/model_definition.py b/spaces/ybelkada/FocusOnDepth/focusondepth/model_definition.py deleted file mode 100644 index 40b825245d1241c272ce0d251befe3cacd5aab7b..0000000000000000000000000000000000000000 --- a/spaces/ybelkada/FocusOnDepth/focusondepth/model_definition.py +++ /dev/null @@ -1,68 +0,0 @@ -from transformers import PreTrainedModel -import timm -import torch.nn as nn -import numpy as np - -from .model_config import FocusOnDepthConfig -from .reassemble import Reassemble -from .fusion import Fusion -from .head import HeadDepth, HeadSeg - - -class FocusOnDepth(PreTrainedModel): - config_class = FocusOnDepthConfig - - def __init__(self, config): - super().__init__(config) - self.transformer_encoders = timm.create_model(config.model_timm, pretrained=True) - self.type_ = config.type_ - - #Register hooks - self.activation = {} - self.hooks = config.hooks - self._get_layers_from_hooks(self.hooks) - - #Reassembles Fusion - self.reassembles = [] - self.fusions = [] - for s in config.reassemble_s: - self.reassembles.append(Reassemble(config.image_size, config.read, config.patch_size, s, config.emb_dim, config.resample_dim)) - self.fusions.append(Fusion(config.resample_dim)) - self.reassembles = nn.ModuleList(self.reassembles) - self.fusions = nn.ModuleList(self.fusions) - - #Head - if self.type_ == "full": - self.head_depth = HeadDepth(config.resample_dim) - self.head_segmentation = HeadSeg(config.resample_dim, nclasses=config.nclasses) - elif self.type_ == "depth": - self.head_depth = HeadDepth(config.resample_dim) - self.head_segmentation = None - else: - self.head_depth = None - self.head_segmentation = HeadSeg(config.resample_dim, nclasses=config.nclasses) - - def forward(self, img): - _ = self.transformer_encoders(img) - previous_stage = None - for i in np.arange(len(self.fusions)-1, -1, -1): - hook_to_take = 't'+str(self.hooks[i]) - activation_result = self.activation[hook_to_take] - reassemble_result = self.reassembles[i](activation_result) - fusion_result = self.fusions[i](reassemble_result, previous_stage) - previous_stage = fusion_result - out_depth = None - out_segmentation = None - if self.head_depth != None: - out_depth = self.head_depth(previous_stage) - if self.head_segmentation != None: - out_segmentation = self.head_segmentation(previous_stage) - return out_depth, out_segmentation - - def _get_layers_from_hooks(self, hooks): - def get_activation(name): - def hook(model, input, output): - self.activation[name] = output - return hook - for h in hooks: - self.transformer_encoders.blocks[h].register_forward_hook(get_activation('t'+str(h))) \ No newline at end of file diff --git a/spaces/ygtxr1997/ReliableSwap_Demo/third_party/GPEN/face_detect/utils/nms/py_cpu_nms.py b/spaces/ygtxr1997/ReliableSwap_Demo/third_party/GPEN/face_detect/utils/nms/py_cpu_nms.py deleted file mode 100644 index 54e7b25fef72b518df6dcf8d6fb78b986796c6e3..0000000000000000000000000000000000000000 --- a/spaces/ygtxr1997/ReliableSwap_Demo/third_party/GPEN/face_detect/utils/nms/py_cpu_nms.py +++ /dev/null @@ -1,38 +0,0 @@ -# -------------------------------------------------------- -# Fast R-CNN -# Copyright (c) 2015 Microsoft -# Licensed under The MIT License [see LICENSE for details] -# Written by Ross Girshick -# -------------------------------------------------------- - -import numpy as np - -def py_cpu_nms(dets, thresh): - """Pure Python NMS baseline.""" - x1 = dets[:, 0] - y1 = dets[:, 1] - x2 = dets[:, 2] - y2 = dets[:, 3] - scores = dets[:, 4] - - areas = (x2 - x1 + 1) * (y2 - y1 + 1) - order = scores.argsort()[::-1] - - keep = [] - while order.size > 0: - i = order[0] - keep.append(i) - xx1 = np.maximum(x1[i], x1[order[1:]]) - yy1 = np.maximum(y1[i], y1[order[1:]]) - xx2 = np.minimum(x2[i], x2[order[1:]]) - yy2 = np.minimum(y2[i], y2[order[1:]]) - - w = np.maximum(0.0, xx2 - xx1 + 1) - h = np.maximum(0.0, yy2 - yy1 + 1) - inter = w * h - ovr = inter / (areas[i] + areas[order[1:]] - inter) - - inds = np.where(ovr <= thresh)[0] - order = order[inds + 1] - - return keep diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/feature_extraction_sequence_utils.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/feature_extraction_sequence_utils.py deleted file mode 100644 index 40717d9931850057407f4d00f8da2c4db72b5f99..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/feature_extraction_sequence_utils.py +++ /dev/null @@ -1,371 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" - Sequence feature extraction class for common feature extractors to preprocess sequences. -""" -from typing import Dict, List, Optional, Union - -import numpy as np - -from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin -from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy - - -logger = logging.get_logger(__name__) - - -class SequenceFeatureExtractor(FeatureExtractionMixin): - """ - This is a general feature extraction class for speech recognition. - - Args: - feature_size (`int`): - The feature dimension of the extracted features. - sampling_rate (`int`): - The sampling rate at which the audio files should be digitalized expressed in hertz (Hz). - padding_value (`float`): - The value that is used to fill the padding values / vectors. - """ - - def __init__(self, feature_size: int, sampling_rate: int, padding_value: float, **kwargs): - self.feature_size = feature_size - self.sampling_rate = sampling_rate - self.padding_value = padding_value - - self.padding_side = kwargs.pop("padding_side", "right") - self.return_attention_mask = kwargs.pop("return_attention_mask", True) - - super().__init__(**kwargs) - - def pad( - self, - processed_features: Union[ - BatchFeature, - List[BatchFeature], - Dict[str, BatchFeature], - Dict[str, List[BatchFeature]], - List[Dict[str, BatchFeature]], - ], - padding: Union[bool, str, PaddingStrategy] = True, - max_length: Optional[int] = None, - truncation: bool = False, - pad_to_multiple_of: Optional[int] = None, - return_attention_mask: Optional[bool] = None, - return_tensors: Optional[Union[str, TensorType]] = None, - ) -> BatchFeature: - """ - Pad input values / input vectors or a batch of input values / input vectors up to predefined length or to the - max sequence length in the batch. - - Padding side (left/right) padding values are defined at the feature extractor level (with `self.padding_side`, - `self.padding_value`) - - - - If the `processed_features` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the - result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of - PyTorch tensors, you will lose the specific device of your tensors however. - - - - Args: - processed_features ([`BatchFeature`], list of [`BatchFeature`], `Dict[str, List[float]]`, `Dict[str, List[List[float]]` or `List[Dict[str, List[float]]]`): - Processed inputs. Can represent one input ([`BatchFeature`] or `Dict[str, List[float]]`) or a batch of - input values / vectors (list of [`BatchFeature`], *Dict[str, List[List[float]]]* or *List[Dict[str, - List[float]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader - collate function. - - Instead of `List[float]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), - see the note above for the return type. - padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): - Select a strategy to pad the returned sequences (according to the model's padding side and padding - index) among: - - - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single - sequence if provided). - - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum - acceptable input length for the model if that argument is not provided. - - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different - lengths). - max_length (`int`, *optional*): - Maximum length of the returned list and optionally padding length (see above). - truncation (`bool`): - Activates truncation to cut input sequences longer than `max_length` to `max_length`. - pad_to_multiple_of (`int`, *optional*): - If set will pad the sequence to a multiple of the provided value. - - This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability - `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. - return_attention_mask (`bool`, *optional*): - Whether to return the attention mask. If left to the default, will return the attention mask according - to the specific feature_extractor's default. - - [What are attention masks?](../glossary#attention-mask) - return_tensors (`str` or [`~utils.TensorType`], *optional*): - If set, will return tensors instead of list of python integers. Acceptable values are: - - - `'tf'`: Return TensorFlow `tf.constant` objects. - - `'pt'`: Return PyTorch `torch.Tensor` objects. - - `'np'`: Return Numpy `np.ndarray` objects. - """ - # If we have a list of dicts, let's convert it in a dict of lists - # We do this to allow using this method as a collate_fn function in PyTorch Dataloader - if isinstance(processed_features, (list, tuple)) and isinstance(processed_features[0], (dict, BatchFeature)): - processed_features = { - key: [example[key] for example in processed_features] for key in processed_features[0].keys() - } - - # The model's main input name, usually `input_values`, has be passed for padding - if self.model_input_names[0] not in processed_features: - raise ValueError( - "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" - f" to this method that includes {self.model_input_names[0]}, but you provided" - f" {list(processed_features.keys())}" - ) - - required_input = processed_features[self.model_input_names[0]] - return_attention_mask = ( - return_attention_mask if return_attention_mask is not None else self.return_attention_mask - ) - - if len(required_input) == 0: - if return_attention_mask: - processed_features["attention_mask"] = [] - return processed_features - - # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays - # and rebuild them afterwards if no return_tensors is specified - # Note that we lose the specific device the tensor may be on for PyTorch - - first_element = required_input[0] - if isinstance(first_element, (list, tuple)): - # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. - index = 0 - while len(required_input[index]) == 0: - index += 1 - if index < len(required_input): - first_element = required_input[index][0] - - if return_tensors is None: - if is_tf_tensor(first_element): - return_tensors = "tf" - elif is_torch_tensor(first_element): - return_tensors = "pt" - elif isinstance(first_element, (int, float, list, tuple, np.ndarray)): - return_tensors = "np" - else: - raise ValueError( - f"type of {first_element} unknown: {type(first_element)}. " - "Should be one of a python, numpy, pytorch or tensorflow object." - ) - - for key, value in processed_features.items(): - if isinstance(value[0], (int, float)): - processed_features[key] = to_numpy(value) - else: - processed_features[key] = [to_numpy(v) for v in value] - - # Convert padding_strategy in PaddingStrategy - padding_strategy = self._get_padding_strategies(padding=padding, max_length=max_length) - - required_input = processed_features[self.model_input_names[0]] - - batch_size = len(required_input) - if not all(len(v) == batch_size for v in processed_features.values()): - raise ValueError("Some items in the output dictionary have a different batch size than others.") - - truncated_inputs = [] - for i in range(batch_size): - inputs = {k: v[i] for k, v in processed_features.items()} - # truncation - inputs_slice = self._truncate( - inputs, - max_length=max_length, - pad_to_multiple_of=pad_to_multiple_of, - truncation=truncation, - ) - truncated_inputs.append(inputs_slice) - - if padding_strategy == PaddingStrategy.LONGEST: - # make sure that `max_length` cannot be longer than the longest truncated length - max_length = max(len(input_slice[self.model_input_names[0]]) for input_slice in truncated_inputs) - padding_strategy = PaddingStrategy.MAX_LENGTH - - batch_outputs = {} - for i in range(batch_size): - # padding - outputs = self._pad( - truncated_inputs[i], - max_length=max_length, - padding_strategy=padding_strategy, - pad_to_multiple_of=pad_to_multiple_of, - return_attention_mask=return_attention_mask, - ) - - for key, value in outputs.items(): - if key not in batch_outputs: - batch_outputs[key] = [] - if value.dtype is np.dtype(np.float64): - value = value.astype(np.float32) - batch_outputs[key].append(value) - - return BatchFeature(batch_outputs, tensor_type=return_tensors) - - def _pad( - self, - processed_features: Union[Dict[str, np.ndarray], BatchFeature], - max_length: Optional[int] = None, - padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, - pad_to_multiple_of: Optional[int] = None, - return_attention_mask: Optional[bool] = None, - ) -> dict: - """ - Pad inputs (on left/right and up to predefined length or max length in the batch) - - Args: - processed_features (`Union[Dict[str, np.ndarray], BatchFeature]`): - Dictionary of input values (`np.ndarray[float]`) / input vectors (`List[np.ndarray[float]]`) or batch - of inputs values (`List[np.ndarray[int]]`) / input vectors (`List[np.ndarray[int]]`) - max_length (`int`, *optional*): - Maximum length of the returned list and optionally padding length (see below) - padding_strategy (`PaddingStrategy`, *optional*, default to `PaddingStrategy.DO_NOT_PAD`): - PaddingStrategy to use for padding. - - - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - - PaddingStrategy.DO_NOT_PAD: Do not pad - The feature_extractor padding sides are defined in self.padding_side: - - - 'left': pads on the left of the sequences - - 'right': pads on the right of the sequences - pad_to_multiple_of (`int`, *optional*): - Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to - enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs - which benefit from having sequence lengths be a multiple of 128. - return_attention_mask (`bool`, *optional*): - Set to False to avoid returning attention mask (default: set to model specifics) - """ - required_input = processed_features[self.model_input_names[0]] - - if padding_strategy == PaddingStrategy.LONGEST: - max_length = len(required_input) - - if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): - max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of - - needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) < max_length - - if return_attention_mask and "attention_mask" not in processed_features: - processed_features["attention_mask"] = np.ones(len(required_input), dtype=np.int32) - - if needs_to_be_padded: - difference = max_length - len(required_input) - if self.padding_side == "right": - if return_attention_mask: - processed_features["attention_mask"] = np.pad( - processed_features["attention_mask"], (0, difference) - ) - padding_shape = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) - processed_features[self.model_input_names[0]] = np.pad( - required_input, padding_shape, "constant", constant_values=self.padding_value - ) - elif self.padding_side == "left": - if return_attention_mask: - processed_features["attention_mask"] = np.pad( - processed_features["attention_mask"], (difference, 0) - ) - padding_shape = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) - processed_features[self.model_input_names[0]] = np.pad( - required_input, padding_shape, "constant", constant_values=self.padding_value - ) - else: - raise ValueError("Invalid padding strategy:" + str(self.padding_side)) - - return processed_features - - def _truncate( - self, - processed_features: Union[Dict[str, np.ndarray], BatchFeature], - max_length: Optional[int] = None, - pad_to_multiple_of: Optional[int] = None, - truncation: Optional[bool] = None, - ): - """ - Truncate inputs to predefined length or max length in the batch - - Args: - processed_features(`Union[Dict[str, np.ndarray], BatchFeature]`): - Dictionary of input values (`np.ndarray[float]`) / input vectors (`List[np.ndarray[float]]`) or batch - of inputs values (`List[np.ndarray[int]]`) / input vectors (`List[np.ndarray[int]]`) - max_length (`int`, *optional*): - maximum length of the returned list and optionally padding length (see below) - pad_to_multiple_of (`int`, *optional*) : - Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to - enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs - which benefit from having sequence lengths be a multiple of 128. - truncation (`bool`, *optional*): - Activates truncation to cut input sequences longer than `max_length` to `max_length`. - """ - if not truncation: - return processed_features - elif truncation and max_length is None: - raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined.") - - required_input = processed_features[self.model_input_names[0]] - - # find `max_length` that fits `pad_to_multiple_of` - if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): - max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of - - needs_to_be_truncated = len(required_input) > max_length - - if needs_to_be_truncated: - processed_features[self.model_input_names[0]] = processed_features[self.model_input_names[0]][:max_length] - if "attention_mask" in processed_features: - processed_features["attention_mask"] = processed_features["attention_mask"][:max_length] - - return processed_features - - def _get_padding_strategies(self, padding=False, max_length=None): - """ - Find the correct padding strategy - """ - - # Get padding strategy - if padding is not False: - if padding is True: - padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch - elif not isinstance(padding, PaddingStrategy): - padding_strategy = PaddingStrategy(padding) - elif isinstance(padding, PaddingStrategy): - padding_strategy = padding - else: - padding_strategy = PaddingStrategy.DO_NOT_PAD - - # Set max length if needed - if max_length is None: - if padding_strategy == PaddingStrategy.MAX_LENGTH: - raise ValueError( - f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" - ) - - # Test if we have a padding value - if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): - raise ValueError( - "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" - " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." - ) - - return padding_strategy diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/blenderbot_small/configuration_blenderbot_small.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/blenderbot_small/configuration_blenderbot_small.py deleted file mode 100644 index fbc23435d66f312dce2656604c8f166bc0e7b8de..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/blenderbot_small/configuration_blenderbot_small.py +++ /dev/null @@ -1,391 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" BlenderbotSmall model configuration""" - -from collections import OrderedDict -from typing import Any, Mapping, Optional - -from ... import PreTrainedTokenizer -from ...configuration_utils import PretrainedConfig -from ...file_utils import TensorType, is_torch_available -from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast -from ...onnx.utils import compute_effective_axis_dimension -from ...utils import logging - - -logger = logging.get_logger(__name__) - -BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", - # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small -} - - -class BlenderbotSmallConfig(PretrainedConfig): - r""" - This is the configuration class to store the configuration of a [`BlenderbotSmallModel`]. It is used to instantiate - an BlenderbotSmall model according to the specified arguments, defining the model architecture. Instantiating a - configuration with the defaults will yield a similar configuration to that of the BlenderbotSmall - [facebook/blenderbot_small-90M](https://huggingface.co/facebook/blenderbot_small-90M) architecture. - - Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the - documentation from [`PretrainedConfig`] for more information. - - - Args: - vocab_size (`int`, *optional*, defaults to 50265): - Vocabulary size of the BlenderbotSmall model. Defines the number of different tokens that can be - represented by the `inputs_ids` passed when calling [`BlenderbotSmallModel`] or [`TFBlenderbotSmallModel`]. - d_model (`int`, *optional*, defaults to 512): - Dimensionality of the layers and the pooler layer. - encoder_layers (`int`, *optional*, defaults to 8): - Number of encoder layers. - decoder_layers (`int`, *optional*, defaults to 8): - Number of decoder layers. - encoder_attention_heads (`int`, *optional*, defaults to 16): - Number of attention heads for each attention layer in the Transformer encoder. - decoder_attention_heads (`int`, *optional*, defaults to 16): - Number of attention heads for each attention layer in the Transformer decoder. - decoder_ffn_dim (`int`, *optional*, defaults to 2048): - Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. - encoder_ffn_dim (`int`, *optional*, defaults to 2048): - Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. - activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): - The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, - `"relu"`, `"silu"` and `"gelu_new"` are supported. - dropout (`float`, *optional*, defaults to 0.1): - The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. - attention_dropout (`float`, *optional*, defaults to 0.0): - The dropout ratio for the attention probabilities. - activation_dropout (`float`, *optional*, defaults to 0.0): - The dropout ratio for activations inside the fully connected layer. - max_position_embeddings (`int`, *optional*, defaults to 512): - The maximum sequence length that this model might ever be used with. Typically set this to something large - just in case (e.g., 512 or 1024 or 2048). - init_std (`float`, *optional*, defaults to 0.02): - The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - encoder_layerdrop (`float`, *optional*, defaults to 0.0): - The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) - for more details. - decoder_layerdrop (`float`, *optional*, defaults to 0.0): - The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) - for more details. - scale_embedding (`bool`, *optional*, defaults to `False`): - Scale embeddings by diving by sqrt(d_model). - use_cache (`bool`, *optional*, defaults to `True`): - Whether or not the model should return the last key/values attentions (not used by all models) - forced_eos_token_id (`int`, *optional*, defaults to 2): - The id of the token to force as the last generated token when `max_length` is reached. Usually set to - `eos_token_id`. - - Example: - - ```python - >>> from transformers import BlenderbotSmallConfig, BlenderbotSmallModel - - >>> # Initializing a BlenderbotSmall facebook/blenderbot_small-90M style configuration - >>> configuration = BlenderbotSmallConfig() - - >>> # Initializing a model (with random weights) from the facebook/blenderbot_small-90M style configuration - >>> model = BlenderbotSmallModel(configuration) - - >>> # Accessing the model configuration - >>> configuration = model.config - ```""" - model_type = "blenderbot-small" - keys_to_ignore_at_inference = ["past_key_values"] - attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} - - def __init__( - self, - vocab_size=50265, - max_position_embeddings=512, - encoder_layers=8, - encoder_ffn_dim=2048, - encoder_attention_heads=16, - decoder_layers=8, - decoder_ffn_dim=2048, - decoder_attention_heads=16, - encoder_layerdrop=0.0, - decoder_layerdrop=0.0, - use_cache=True, - is_encoder_decoder=True, - activation_function="gelu", - d_model=512, - dropout=0.1, - attention_dropout=0.0, - activation_dropout=0.0, - init_std=0.02, - decoder_start_token_id=1, - scale_embedding=False, - pad_token_id=0, - bos_token_id=1, - eos_token_id=2, - forced_eos_token_id=2, - **kwargs, - ): - self.vocab_size = vocab_size - self.max_position_embeddings = max_position_embeddings - self.d_model = d_model - self.encoder_ffn_dim = encoder_ffn_dim - self.encoder_layers = encoder_layers - self.encoder_attention_heads = encoder_attention_heads - self.decoder_ffn_dim = decoder_ffn_dim - self.decoder_layers = decoder_layers - self.decoder_attention_heads = decoder_attention_heads - self.dropout = dropout - self.attention_dropout = attention_dropout - self.activation_dropout = activation_dropout - self.activation_function = activation_function - self.init_std = init_std - self.encoder_layerdrop = encoder_layerdrop - self.decoder_layerdrop = decoder_layerdrop - self.use_cache = use_cache - self.num_hidden_layers = encoder_layers - self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True - - super().__init__( - pad_token_id=pad_token_id, - bos_token_id=bos_token_id, - eos_token_id=eos_token_id, - is_encoder_decoder=is_encoder_decoder, - decoder_start_token_id=decoder_start_token_id, - forced_eos_token_id=forced_eos_token_id, - **kwargs, - ) - - -# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig -class BlenderbotSmallOnnxConfig(OnnxSeq2SeqConfigWithPast): - @property - def inputs(self) -> Mapping[str, Mapping[int, str]]: - if self.task in ["default", "seq2seq-lm"]: - common_inputs = OrderedDict( - [ - ("input_ids", {0: "batch", 1: "encoder_sequence"}), - ("attention_mask", {0: "batch", 1: "encoder_sequence"}), - ] - ) - - if self.use_past: - common_inputs["decoder_input_ids"] = {0: "batch"} - common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"} - else: - common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"} - common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"} - - if self.use_past: - self.fill_with_past_key_values_(common_inputs, direction="inputs") - elif self.task == "causal-lm": - # TODO: figure this case out. - common_inputs = OrderedDict( - [ - ("input_ids", {0: "batch", 1: "encoder_sequence"}), - ("attention_mask", {0: "batch", 1: "encoder_sequence"}), - ] - ) - if self.use_past: - num_encoder_layers, _ = self.num_layers - for i in range(num_encoder_layers): - common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} - common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} - else: - common_inputs = OrderedDict( - [ - ("input_ids", {0: "batch", 1: "encoder_sequence"}), - ("attention_mask", {0: "batch", 1: "encoder_sequence"}), - ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), - ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), - ] - ) - - return common_inputs - - @property - def outputs(self) -> Mapping[str, Mapping[int, str]]: - if self.task in ["default", "seq2seq-lm"]: - common_outputs = super().outputs - else: - common_outputs = super(OnnxConfigWithPast, self).outputs - if self.use_past: - num_encoder_layers, _ = self.num_layers - for i in range(num_encoder_layers): - common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} - common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} - return common_outputs - - def _generate_dummy_inputs_for_default_and_seq2seq_lm( - self, - tokenizer: PreTrainedTokenizer, - batch_size: int = -1, - seq_length: int = -1, - is_pair: bool = False, - framework: Optional[TensorType] = None, - ) -> Mapping[str, Any]: - encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( - tokenizer, batch_size, seq_length, is_pair, framework - ) - - # Generate decoder inputs - decoder_seq_length = seq_length if not self.use_past else 1 - decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( - tokenizer, batch_size, decoder_seq_length, is_pair, framework - ) - decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} - common_inputs = dict(**encoder_inputs, **decoder_inputs) - - if self.use_past: - if not is_torch_available(): - raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") - else: - import torch - batch, encoder_seq_length = common_inputs["input_ids"].shape - decoder_seq_length = common_inputs["decoder_input_ids"].shape[1] - num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads - encoder_shape = ( - batch, - num_encoder_attention_heads, - encoder_seq_length, - self._config.hidden_size // num_encoder_attention_heads, - ) - decoder_past_length = decoder_seq_length + 3 - decoder_shape = ( - batch, - num_decoder_attention_heads, - decoder_past_length, - self._config.hidden_size // num_decoder_attention_heads, - ) - - common_inputs["decoder_attention_mask"] = torch.cat( - [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1 - ) - - common_inputs["past_key_values"] = [] - # If the number of encoder and decoder layers are present in the model configuration, both are considered - num_encoder_layers, num_decoder_layers = self.num_layers - min_num_layers = min(num_encoder_layers, num_decoder_layers) - max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers - remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" - - for _ in range(min_num_layers): - common_inputs["past_key_values"].append( - ( - torch.zeros(decoder_shape), - torch.zeros(decoder_shape), - torch.zeros(encoder_shape), - torch.zeros(encoder_shape), - ) - ) - # TODO: test this. - shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape - for _ in range(min_num_layers, max_num_layers): - common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape))) - return common_inputs - - def _generate_dummy_inputs_for_causal_lm( - self, - tokenizer: PreTrainedTokenizer, - batch_size: int = -1, - seq_length: int = -1, - is_pair: bool = False, - framework: Optional[TensorType] = None, - ) -> Mapping[str, Any]: - common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( - tokenizer, batch_size, seq_length, is_pair, framework - ) - - if self.use_past: - if not is_torch_available(): - raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") - else: - import torch - batch, seqlen = common_inputs["input_ids"].shape - # Not using the same length for past_key_values - past_key_values_length = seqlen + 2 - num_encoder_layers, _ = self.num_layers - num_encoder_attention_heads, _ = self.num_attention_heads - past_shape = ( - batch, - num_encoder_attention_heads, - past_key_values_length, - self._config.hidden_size // num_encoder_attention_heads, - ) - - mask_dtype = common_inputs["attention_mask"].dtype - common_inputs["attention_mask"] = torch.cat( - [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1 - ) - common_inputs["past_key_values"] = [ - (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers) - ] - return common_inputs - - def _generate_dummy_inputs_for_sequence_classification_and_question_answering( - self, - tokenizer: PreTrainedTokenizer, - batch_size: int = -1, - seq_length: int = -1, - is_pair: bool = False, - framework: Optional[TensorType] = None, - ) -> Mapping[str, Any]: - # Copied from OnnxConfig.generate_dummy_inputs - # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. - # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX - batch_size = compute_effective_axis_dimension( - batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 - ) - - # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX - token_to_add = tokenizer.num_special_tokens_to_add(is_pair) - seq_length = compute_effective_axis_dimension( - seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add - ) - - # Generate dummy inputs according to compute batch and sequence - dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size - common_inputs = dict(tokenizer(dummy_input, return_tensors=framework)) - return common_inputs - - def generate_dummy_inputs( - self, - tokenizer: PreTrainedTokenizer, - batch_size: int = -1, - seq_length: int = -1, - is_pair: bool = False, - framework: Optional[TensorType] = None, - ) -> Mapping[str, Any]: - if self.task in ["default", "seq2seq-lm"]: - common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm( - tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework - ) - - elif self.task == "causal-lm": - common_inputs = self._generate_dummy_inputs_for_causal_lm( - tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework - ) - else: - common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( - tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework - ) - - return common_inputs - - def _flatten_past_key_values_(self, flattened_output, name, idx, t): - if self.task in ["default", "seq2seq-lm"]: - flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t) - else: - flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_( - flattened_output, name, idx, t - ) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/electra/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/electra/__init__.py deleted file mode 100644 index 09ce039d25fd057608693a8d6c9d79358d970225..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/electra/__init__.py +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import TYPE_CHECKING - -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"], - "tokenization_electra": ["ElectraTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_electra_fast"] = ["ElectraTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_electra"] = [ - "ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST", - "ElectraForCausalLM", - "ElectraForMaskedLM", - "ElectraForMultipleChoice", - "ElectraForPreTraining", - "ElectraForQuestionAnswering", - "ElectraForSequenceClassification", - "ElectraForTokenClassification", - "ElectraModel", - "ElectraPreTrainedModel", - "load_tf_weights_in_electra", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_electra"] = [ - "TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST", - "TFElectraForMaskedLM", - "TFElectraForMultipleChoice", - "TFElectraForPreTraining", - "TFElectraForQuestionAnswering", - "TFElectraForSequenceClassification", - "TFElectraForTokenClassification", - "TFElectraModel", - "TFElectraPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_electra"] = [ - "FlaxElectraForCausalLM", - "FlaxElectraForMaskedLM", - "FlaxElectraForMultipleChoice", - "FlaxElectraForPreTraining", - "FlaxElectraForQuestionAnswering", - "FlaxElectraForSequenceClassification", - "FlaxElectraForTokenClassification", - "FlaxElectraModel", - "FlaxElectraPreTrainedModel", - ] - - -if TYPE_CHECKING: - from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig - from .tokenization_electra import ElectraTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_electra_fast import ElectraTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_electra import ( - ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, - ElectraForCausalLM, - ElectraForMaskedLM, - ElectraForMultipleChoice, - ElectraForPreTraining, - ElectraForQuestionAnswering, - ElectraForSequenceClassification, - ElectraForTokenClassification, - ElectraModel, - ElectraPreTrainedModel, - load_tf_weights_in_electra, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_electra import ( - TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, - TFElectraForMaskedLM, - TFElectraForMultipleChoice, - TFElectraForPreTraining, - TFElectraForQuestionAnswering, - TFElectraForSequenceClassification, - TFElectraForTokenClassification, - TFElectraModel, - TFElectraPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_electra import ( - FlaxElectraForCausalLM, - FlaxElectraForMaskedLM, - FlaxElectraForMultipleChoice, - FlaxElectraForPreTraining, - FlaxElectraForQuestionAnswering, - FlaxElectraForSequenceClassification, - FlaxElectraForTokenClassification, - FlaxElectraModel, - FlaxElectraPreTrainedModel, - ) - -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/spaces/yl12053/so-vits-4.1-Kitasan-Black/diffusion/logger/__init__.py b/spaces/yl12053/so-vits-4.1-Kitasan-Black/diffusion/logger/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/backbone/res2net.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/backbone/res2net.py deleted file mode 100644 index 1d0d40adb4a300d916deecebd20bcaac08936e6d..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/backbone/res2net.py +++ /dev/null @@ -1,802 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -# This file is modified from https://github.com/Res2Net/Res2Net-detectron2/blob/master/detectron2/modeling/backbone/resnet.py -# The original file is under Apache-2.0 License -import numpy as np -import fvcore.nn.weight_init as weight_init -import torch -import torch.nn.functional as F -from torch import nn - -from detectron2.layers import ( - CNNBlockBase, - Conv2d, - DeformConv, - ModulatedDeformConv, - ShapeSpec, - get_norm, -) - -from detectron2.modeling.backbone import Backbone -from detectron2.modeling.backbone.fpn import FPN -from detectron2.modeling.backbone.build import BACKBONE_REGISTRY -from .fpn_p5 import LastLevelP6P7_P5 -from .bifpn import BiFPN - -__all__ = [ - "ResNetBlockBase", - "BasicBlock", - "BottleneckBlock", - "DeformBottleneckBlock", - "BasicStem", - "ResNet", - "make_stage", - "build_res2net_backbone", -] - - -ResNetBlockBase = CNNBlockBase -""" -Alias for backward compatibiltiy. -""" - - -class BasicBlock(CNNBlockBase): - """ - The basic residual block for ResNet-18 and ResNet-34, with two 3x3 conv layers - and a projection shortcut if needed. - """ - - def __init__(self, in_channels, out_channels, *, stride=1, norm="BN"): - """ - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - stride (int): Stride for the first conv. - norm (str or callable): normalization for all conv layers. - See :func:`layers.get_norm` for supported format. - """ - super().__init__(in_channels, out_channels, stride) - - if in_channels != out_channels: - self.shortcut = Conv2d( - in_channels, - out_channels, - kernel_size=1, - stride=stride, - bias=False, - norm=get_norm(norm, out_channels), - ) - else: - self.shortcut = None - - self.conv1 = Conv2d( - in_channels, - out_channels, - kernel_size=3, - stride=stride, - padding=1, - bias=False, - norm=get_norm(norm, out_channels), - ) - - self.conv2 = Conv2d( - out_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1, - bias=False, - norm=get_norm(norm, out_channels), - ) - - for layer in [self.conv1, self.conv2, self.shortcut]: - if layer is not None: # shortcut can be None - weight_init.c2_msra_fill(layer) - - def forward(self, x): - out = self.conv1(x) - out = F.relu_(out) - out = self.conv2(out) - - if self.shortcut is not None: - shortcut = self.shortcut(x) - else: - shortcut = x - - out += shortcut - out = F.relu_(out) - return out - - -class BottleneckBlock(CNNBlockBase): - """ - The standard bottle2neck residual block used by Res2Net-50, 101 and 152. - """ - - def __init__( - self, - in_channels, - out_channels, - *, - bottleneck_channels, - stride=1, - num_groups=1, - norm="BN", - stride_in_1x1=False, - dilation=1, - basewidth=26, - scale=4, - ): - """ - Args: - bottleneck_channels (int): number of output channels for the 3x3 - "bottleneck" conv layers. - num_groups (int): number of groups for the 3x3 conv layer. - norm (str or callable): normalization for all conv layers. - See :func:`layers.get_norm` for supported format. - stride_in_1x1 (bool): when stride>1, whether to put stride in the - first 1x1 convolution or the bottleneck 3x3 convolution. - dilation (int): the dilation rate of the 3x3 conv layer. - """ - super().__init__(in_channels, out_channels, stride) - - if in_channels != out_channels: - self.shortcut = nn.Sequential( - nn.AvgPool2d(kernel_size=stride, stride=stride, - ceil_mode=True, count_include_pad=False), - Conv2d( - in_channels, - out_channels, - kernel_size=1, - stride=1, - bias=False, - norm=get_norm(norm, out_channels), - ) - ) - else: - self.shortcut = None - - # The original MSRA ResNet models have stride in the first 1x1 conv - # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have - # stride in the 3x3 conv - stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) - width = bottleneck_channels//scale - - self.conv1 = Conv2d( - in_channels, - bottleneck_channels, - kernel_size=1, - stride=stride_1x1, - bias=False, - norm=get_norm(norm, bottleneck_channels), - ) - if scale == 1: - self.nums = 1 - else: - self.nums = scale -1 - if self.in_channels!=self.out_channels and stride_3x3!=2: - self.pool = nn.AvgPool2d(kernel_size=3, stride = stride_3x3, padding=1) - - convs = [] - bns = [] - for i in range(self.nums): - convs.append(nn.Conv2d( - width, - width, - kernel_size=3, - stride=stride_3x3, - padding=1 * dilation, - bias=False, - groups=num_groups, - dilation=dilation, - )) - bns.append(get_norm(norm, width)) - self.convs = nn.ModuleList(convs) - self.bns = nn.ModuleList(bns) - - self.conv3 = Conv2d( - bottleneck_channels, - out_channels, - kernel_size=1, - bias=False, - norm=get_norm(norm, out_channels), - ) - self.scale = scale - self.width = width - self.in_channels = in_channels - self.out_channels = out_channels - self.stride_3x3 = stride_3x3 - for layer in [self.conv1, self.conv3]: - if layer is not None: # shortcut can be None - weight_init.c2_msra_fill(layer) - if self.shortcut is not None: - for layer in self.shortcut.modules(): - if isinstance(layer, Conv2d): - weight_init.c2_msra_fill(layer) - - for layer in self.convs: - if layer is not None: # shortcut can be None - weight_init.c2_msra_fill(layer) - - # Zero-initialize the last normalization in each residual branch, - # so that at the beginning, the residual branch starts with zeros, - # and each residual block behaves like an identity. - # See Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": - # "For BN layers, the learnable scaling coefficient γ is initialized - # to be 1, except for each residual block's last BN - # where γ is initialized to be 0." - - # nn.init.constant_(self.conv3.norm.weight, 0) - # TODO this somehow hurts performance when training GN models from scratch. - # Add it as an option when we need to use this code to train a backbone. - - def forward(self, x): - out = self.conv1(x) - out = F.relu_(out) - - spx = torch.split(out, self.width, 1) - for i in range(self.nums): - if i==0 or self.in_channels!=self.out_channels: - sp = spx[i] - else: - sp = sp + spx[i] - sp = self.convs[i](sp) - sp = F.relu_(self.bns[i](sp)) - if i==0: - out = sp - else: - out = torch.cat((out, sp), 1) - if self.scale!=1 and self.stride_3x3==1: - out = torch.cat((out, spx[self.nums]), 1) - elif self.scale != 1 and self.stride_3x3==2: - out = torch.cat((out, self.pool(spx[self.nums])), 1) - - out = self.conv3(out) - - if self.shortcut is not None: - shortcut = self.shortcut(x) - else: - shortcut = x - - out += shortcut - out = F.relu_(out) - return out - - -class DeformBottleneckBlock(ResNetBlockBase): - """ - Not implemented for res2net yet. - Similar to :class:`BottleneckBlock`, but with deformable conv in the 3x3 convolution. - """ - - def __init__( - self, - in_channels, - out_channels, - *, - bottleneck_channels, - stride=1, - num_groups=1, - norm="BN", - stride_in_1x1=False, - dilation=1, - deform_modulated=False, - deform_num_groups=1, - basewidth=26, - scale=4, - ): - super().__init__(in_channels, out_channels, stride) - self.deform_modulated = deform_modulated - - if in_channels != out_channels: - # self.shortcut = Conv2d( - # in_channels, - # out_channels, - # kernel_size=1, - # stride=stride, - # bias=False, - # norm=get_norm(norm, out_channels), - # ) - self.shortcut = nn.Sequential( - nn.AvgPool2d(kernel_size=stride, stride=stride, - ceil_mode=True, count_include_pad=False), - Conv2d( - in_channels, - out_channels, - kernel_size=1, - stride=1, - bias=False, - norm=get_norm(norm, out_channels), - ) - ) - else: - self.shortcut = None - - stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) - width = bottleneck_channels//scale - - self.conv1 = Conv2d( - in_channels, - bottleneck_channels, - kernel_size=1, - stride=stride_1x1, - bias=False, - norm=get_norm(norm, bottleneck_channels), - ) - - if scale == 1: - self.nums = 1 - else: - self.nums = scale -1 - if self.in_channels!=self.out_channels and stride_3x3!=2: - self.pool = nn.AvgPool2d(kernel_size=3, stride = stride_3x3, padding=1) - - if deform_modulated: - deform_conv_op = ModulatedDeformConv - # offset channels are 2 or 3 (if with modulated) * kernel_size * kernel_size - offset_channels = 27 - else: - deform_conv_op = DeformConv - offset_channels = 18 - - # self.conv2_offset = Conv2d( - # bottleneck_channels, - # offset_channels * deform_num_groups, - # kernel_size=3, - # stride=stride_3x3, - # padding=1 * dilation, - # dilation=dilation, - # ) - # self.conv2 = deform_conv_op( - # bottleneck_channels, - # bottleneck_channels, - # kernel_size=3, - # stride=stride_3x3, - # padding=1 * dilation, - # bias=False, - # groups=num_groups, - # dilation=dilation, - # deformable_groups=deform_num_groups, - # norm=get_norm(norm, bottleneck_channels), - # ) - - conv2_offsets = [] - convs = [] - bns = [] - for i in range(self.nums): - conv2_offsets.append(Conv2d( - width, - offset_channels * deform_num_groups, - kernel_size=3, - stride=stride_3x3, - padding=1 * dilation, - bias=False, - groups=num_groups, - dilation=dilation, - )) - convs.append(deform_conv_op( - width, - width, - kernel_size=3, - stride=stride_3x3, - padding=1 * dilation, - bias=False, - groups=num_groups, - dilation=dilation, - deformable_groups=deform_num_groups, - )) - bns.append(get_norm(norm, width)) - self.conv2_offsets = nn.ModuleList(conv2_offsets) - self.convs = nn.ModuleList(convs) - self.bns = nn.ModuleList(bns) - - self.conv3 = Conv2d( - bottleneck_channels, - out_channels, - kernel_size=1, - bias=False, - norm=get_norm(norm, out_channels), - ) - self.scale = scale - self.width = width - self.in_channels = in_channels - self.out_channels = out_channels - self.stride_3x3 = stride_3x3 - # for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]: - # if layer is not None: # shortcut can be None - # weight_init.c2_msra_fill(layer) - - # nn.init.constant_(self.conv2_offset.weight, 0) - # nn.init.constant_(self.conv2_offset.bias, 0) - for layer in [self.conv1, self.conv3]: - if layer is not None: # shortcut can be None - weight_init.c2_msra_fill(layer) - if self.shortcut is not None: - for layer in self.shortcut.modules(): - if isinstance(layer, Conv2d): - weight_init.c2_msra_fill(layer) - - for layer in self.convs: - if layer is not None: # shortcut can be None - weight_init.c2_msra_fill(layer) - - for layer in self.conv2_offsets: - if layer.weight is not None: - nn.init.constant_(layer.weight, 0) - if layer.bias is not None: - nn.init.constant_(layer.bias, 0) - - def forward(self, x): - out = self.conv1(x) - out = F.relu_(out) - - # if self.deform_modulated: - # offset_mask = self.conv2_offset(out) - # offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1) - # offset = torch.cat((offset_x, offset_y), dim=1) - # mask = mask.sigmoid() - # out = self.conv2(out, offset, mask) - # else: - # offset = self.conv2_offset(out) - # out = self.conv2(out, offset) - # out = F.relu_(out) - - spx = torch.split(out, self.width, 1) - for i in range(self.nums): - if i==0 or self.in_channels!=self.out_channels: - sp = spx[i].contiguous() - else: - sp = sp + spx[i].contiguous() - - # sp = self.convs[i](sp) - if self.deform_modulated: - offset_mask = self.conv2_offsets[i](sp) - offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1) - offset = torch.cat((offset_x, offset_y), dim=1) - mask = mask.sigmoid() - sp = self.convs[i](sp, offset, mask) - else: - offset = self.conv2_offsets[i](sp) - sp = self.convs[i](sp, offset) - sp = F.relu_(self.bns[i](sp)) - if i==0: - out = sp - else: - out = torch.cat((out, sp), 1) - if self.scale!=1 and self.stride_3x3==1: - out = torch.cat((out, spx[self.nums]), 1) - elif self.scale != 1 and self.stride_3x3==2: - out = torch.cat((out, self.pool(spx[self.nums])), 1) - - out = self.conv3(out) - - if self.shortcut is not None: - shortcut = self.shortcut(x) - else: - shortcut = x - - out += shortcut - out = F.relu_(out) - return out - - -def make_stage(block_class, num_blocks, first_stride, *, in_channels, out_channels, **kwargs): - """ - Create a list of blocks just like those in a ResNet stage. - Args: - block_class (type): a subclass of ResNetBlockBase - num_blocks (int): - first_stride (int): the stride of the first block. The other blocks will have stride=1. - in_channels (int): input channels of the entire stage. - out_channels (int): output channels of **every block** in the stage. - kwargs: other arguments passed to the constructor of every block. - Returns: - list[nn.Module]: a list of block module. - """ - assert "stride" not in kwargs, "Stride of blocks in make_stage cannot be changed." - blocks = [] - for i in range(num_blocks): - blocks.append( - block_class( - in_channels=in_channels, - out_channels=out_channels, - stride=first_stride if i == 0 else 1, - **kwargs, - ) - ) - in_channels = out_channels - return blocks - - -class BasicStem(CNNBlockBase): - """ - The standard ResNet stem (layers before the first residual block). - """ - - def __init__(self, in_channels=3, out_channels=64, norm="BN"): - """ - Args: - norm (str or callable): norm after the first conv layer. - See :func:`layers.get_norm` for supported format. - """ - super().__init__(in_channels, out_channels, 4) - self.in_channels = in_channels - self.conv1 = nn.Sequential( - Conv2d( - in_channels, - 32, - kernel_size=3, - stride=2, - padding=1, - bias=False, - ), - get_norm(norm, 32), - nn.ReLU(inplace=True), - Conv2d( - 32, - 32, - kernel_size=3, - stride=1, - padding=1, - bias=False, - ), - get_norm(norm, 32), - nn.ReLU(inplace=True), - Conv2d( - 32, - out_channels, - kernel_size=3, - stride=1, - padding=1, - bias=False, - ), - ) - self.bn1 = get_norm(norm, out_channels) - - for layer in self.conv1: - if isinstance(layer, Conv2d): - weight_init.c2_msra_fill(layer) - - def forward(self, x): - x = self.conv1(x) - x = self.bn1(x) - x = F.relu_(x) - x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) - return x - - -class ResNet(Backbone): - def __init__(self, stem, stages, num_classes=None, out_features=None): - """ - Args: - stem (nn.Module): a stem module - stages (list[list[CNNBlockBase]]): several (typically 4) stages, - each contains multiple :class:`CNNBlockBase`. - num_classes (None or int): if None, will not perform classification. - Otherwise, will create a linear layer. - out_features (list[str]): name of the layers whose outputs should - be returned in forward. Can be anything in "stem", "linear", or "res2" ... - If None, will return the output of the last layer. - """ - super(ResNet, self).__init__() - self.stem = stem - self.num_classes = num_classes - - current_stride = self.stem.stride - self._out_feature_strides = {"stem": current_stride} - self._out_feature_channels = {"stem": self.stem.out_channels} - - self.stages_and_names = [] - for i, blocks in enumerate(stages): - assert len(blocks) > 0, len(blocks) - for block in blocks: - assert isinstance(block, CNNBlockBase), block - - name = "res" + str(i + 2) - stage = nn.Sequential(*blocks) - - self.add_module(name, stage) - self.stages_and_names.append((stage, name)) - - self._out_feature_strides[name] = current_stride = int( - current_stride * np.prod([k.stride for k in blocks]) - ) - self._out_feature_channels[name] = curr_channels = blocks[-1].out_channels - - if num_classes is not None: - self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) - self.linear = nn.Linear(curr_channels, num_classes) - - # Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": - # "The 1000-way fully-connected layer is initialized by - # drawing weights from a zero-mean Gaussian with standard deviation of 0.01." - nn.init.normal_(self.linear.weight, std=0.01) - name = "linear" - - if out_features is None: - out_features = [name] - self._out_features = out_features - assert len(self._out_features) - children = [x[0] for x in self.named_children()] - for out_feature in self._out_features: - assert out_feature in children, "Available children: {}".format(", ".join(children)) - - def forward(self, x): - outputs = {} - x = self.stem(x) - if "stem" in self._out_features: - outputs["stem"] = x - for stage, name in self.stages_and_names: - x = stage(x) - if name in self._out_features: - outputs[name] = x - if self.num_classes is not None: - x = self.avgpool(x) - x = torch.flatten(x, 1) - x = self.linear(x) - if "linear" in self._out_features: - outputs["linear"] = x - return outputs - - def output_shape(self): - return { - name: ShapeSpec( - channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] - ) - for name in self._out_features - } - - def freeze(self, freeze_at=0): - """ - Freeze the first several stages of the ResNet. Commonly used in - fine-tuning. - Args: - freeze_at (int): number of stem and stages to freeze. - `1` means freezing the stem. `2` means freezing the stem and - the first stage, etc. - Returns: - nn.Module: this ResNet itself - """ - if freeze_at >= 1: - self.stem.freeze() - for idx, (stage, _) in enumerate(self.stages_and_names, start=2): - if freeze_at >= idx: - for block in stage.children(): - block.freeze() - return self - - -@BACKBONE_REGISTRY.register() -def build_res2net_backbone(cfg, input_shape): - """ - Create a Res2Net instance from config. - Returns: - ResNet: a :class:`ResNet` instance. - """ - # need registration of new blocks/stems? - norm = cfg.MODEL.RESNETS.NORM - stem = BasicStem( - in_channels=input_shape.channels, - out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS, - norm=norm, - ) - - # fmt: off - freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT - out_features = cfg.MODEL.RESNETS.OUT_FEATURES - depth = cfg.MODEL.RESNETS.DEPTH - num_groups = cfg.MODEL.RESNETS.NUM_GROUPS - width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP - scale = 4 - bottleneck_channels = num_groups * width_per_group * scale - in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS - out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS - stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 - res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION - deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE - deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED - deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS - # fmt: on - assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation) - - num_blocks_per_stage = { - 18: [2, 2, 2, 2], - 34: [3, 4, 6, 3], - 50: [3, 4, 6, 3], - 101: [3, 4, 23, 3], - 152: [3, 8, 36, 3], - }[depth] - - if depth in [18, 34]: - assert out_channels == 64, "Must set MODEL.RESNETS.RES2_OUT_CHANNELS = 64 for R18/R34" - assert not any( - deform_on_per_stage - ), "MODEL.RESNETS.DEFORM_ON_PER_STAGE unsupported for R18/R34" - assert res5_dilation == 1, "Must set MODEL.RESNETS.RES5_DILATION = 1 for R18/R34" - assert num_groups == 1, "Must set MODEL.RESNETS.NUM_GROUPS = 1 for R18/R34" - - stages = [] - - # Avoid creating variables without gradients - # It consumes extra memory and may cause allreduce to fail - out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features] - max_stage_idx = max(out_stage_idx) - for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)): - dilation = res5_dilation if stage_idx == 5 else 1 - first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2 - stage_kargs = { - "num_blocks": num_blocks_per_stage[idx], - "first_stride": first_stride, - "in_channels": in_channels, - "out_channels": out_channels, - "norm": norm, - } - # Use BasicBlock for R18 and R34. - if depth in [18, 34]: - stage_kargs["block_class"] = BasicBlock - else: - stage_kargs["bottleneck_channels"] = bottleneck_channels - stage_kargs["stride_in_1x1"] = stride_in_1x1 - stage_kargs["dilation"] = dilation - stage_kargs["num_groups"] = num_groups - stage_kargs["scale"] = scale - - if deform_on_per_stage[idx]: - stage_kargs["block_class"] = DeformBottleneckBlock - stage_kargs["deform_modulated"] = deform_modulated - stage_kargs["deform_num_groups"] = deform_num_groups - else: - stage_kargs["block_class"] = BottleneckBlock - blocks = make_stage(**stage_kargs) - in_channels = out_channels - out_channels *= 2 - bottleneck_channels *= 2 - stages.append(blocks) - return ResNet(stem, stages, out_features=out_features).freeze(freeze_at) - - -@BACKBONE_REGISTRY.register() -def build_p67_res2net_fpn_backbone(cfg, input_shape: ShapeSpec): - """ - Args: - cfg: a detectron2 CfgNode - - Returns: - backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. - """ - bottom_up = build_res2net_backbone(cfg, input_shape) - in_features = cfg.MODEL.FPN.IN_FEATURES - out_channels = cfg.MODEL.FPN.OUT_CHANNELS - backbone = FPN( - bottom_up=bottom_up, - in_features=in_features, - out_channels=out_channels, - norm=cfg.MODEL.FPN.NORM, - top_block=LastLevelP6P7_P5(out_channels, out_channels), - fuse_type=cfg.MODEL.FPN.FUSE_TYPE, - ) - return backbone - - -@BACKBONE_REGISTRY.register() -def build_res2net_bifpn_backbone(cfg, input_shape: ShapeSpec): - """ - Args: - cfg: a detectron2 CfgNode - - Returns: - backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. - """ - bottom_up = build_res2net_backbone(cfg, input_shape) - in_features = cfg.MODEL.FPN.IN_FEATURES - backbone = BiFPN( - cfg=cfg, - bottom_up=bottom_up, - in_features=in_features, - out_channels=cfg.MODEL.BIFPN.OUT_CHANNELS, - norm=cfg.MODEL.BIFPN.NORM, - num_levels=cfg.MODEL.BIFPN.NUM_LEVELS, - num_bifpn=cfg.MODEL.BIFPN.NUM_BIFPN, - separable_conv=cfg.MODEL.BIFPN.SEPARABLE_CONV, - ) - return backbone \ No newline at end of file diff --git a/spaces/ysharma/Chat_With_Blip2/README.md b/spaces/ysharma/Chat_With_Blip2/README.md deleted file mode 100644 index 3e4c0d3ed7ffa14b01384b2cb6a6489baa8bca48..0000000000000000000000000000000000000000 --- a/spaces/ysharma/Chat_With_Blip2/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Chat With Blip2 -emoji: 🌖 -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ywqisok/ysyy/app.py b/spaces/ywqisok/ysyy/app.py deleted file mode 100644 index 57451de18b453bede7f1489a81acb3ab0a8b2c3c..0000000000000000000000000000000000000000 --- a/spaces/ywqisok/ysyy/app.py +++ /dev/null @@ -1,141 +0,0 @@ -# coding=utf-8 -import time -import os -import gradio as gr -import utils -import argparse -import commons -from models import SynthesizerTrn -from text import text_to_sequence -import torch -from torch import no_grad, LongTensor -import webbrowser -import logging -logging.getLogger('numba').setLevel(logging.WARNING) -limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces - -def get_text(text, hps): - text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm, clean_text - -def vits(text, language, speaker_id, noise_scale, noise_scale_w, length_scale): - start = time.perf_counter() - if not len(text): - return "输入文本不能为空!", None, None - text = text.replace('\n', ' ').replace('\r', '').replace(" ", "") - if len(text) > 100 and limitation: - return f"输入文字过长!{len(text)}>100", None, None - if language == 0: - text = f"[ZH]{text}[ZH]" - elif language == 1: - text = f"[JA]{text}[JA]" - else: - text = f"{text}" - stn_tst, clean_text = get_text(text, hps_ms) - with no_grad(): - x_tst = stn_tst.unsqueeze(0).to(device) - x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device) - speaker_id = LongTensor([speaker_id]).to(device) - audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=speaker_id, noise_scale=noise_scale, noise_scale_w=noise_scale_w, - length_scale=length_scale)[0][0, 0].data.cpu().float().numpy() - - return "生成成功!", (22050, audio), f"生成耗时 {round(time.perf_counter()-start, 2)} s" - -def search_speaker(search_value): - for s in speakers: - if search_value == s: - return s - for s in speakers: - if search_value in s: - return s - -def change_lang(language): - if language == 0: - return 0.6, 0.668, 1.2 - else: - return 0.6, 0.668, 1.1 - -download_audio_js = """ -() =>{{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let audio = root.querySelector("#tts-audio").querySelector("audio"); - let text = root.querySelector("#input-text").querySelector("textarea"); - if (audio == undefined) - return; - text = text.value; - if (text == undefined) - text = Math.floor(Math.random()*100000000); - audio = audio.src; - let oA = document.createElement("a"); - oA.download = text.substr(0, 20)+'.wav'; - oA.href = audio; - document.body.appendChild(oA); - oA.click(); - oA.remove(); -}} -""" - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cpu') - parser.add_argument('--api', action="store_true", default=False) - parser.add_argument("--share", action="store_true", default=False, help="share gradio app") - parser.add_argument("--colab", action="store_true", default=False, help="share gradio app") - args = parser.parse_args() - device = torch.device(args.device) - - hps_ms = utils.get_hparams_from_file(r'./model/config.json') - net_g_ms = SynthesizerTrn( - len(hps_ms.symbols), - hps_ms.data.filter_length // 2 + 1, - hps_ms.train.segment_size // hps_ms.data.hop_length, - n_speakers=hps_ms.data.n_speakers, - **hps_ms.model) - _ = net_g_ms.eval().to(device) - speakers = hps_ms.speakers - model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None) - - with gr.Blocks() as app: - gr.Markdown( - "#
      VITS语音在线合成demo\n" - "#
      严禁将模型用于任何商业项目,否则后果自负\n" - "
      主要有赛马娘,原神中文,原神日语,崩坏3的音色
      " - '' - '' - ) - - with gr.Tabs(): - with gr.TabItem("vits"): - with gr.Row(): - with gr.Column(): - input_text = gr.Textbox(label="Text (100 words limitation) " if limitation else "Text", lines=5, value="今天晚上吃啥好呢。", elem_id=f"input-text") - lang = gr.Dropdown(label="Language", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"], - type="index", value="中文") - btn = gr.Button(value="Submit") - with gr.Row(): - search = gr.Textbox(label="Search Speaker", lines=1) - btn2 = gr.Button(value="Search") - sid = gr.Dropdown(label="Speaker", choices=speakers, type="index", value=speakers[228]) - with gr.Row(): - ns = gr.Slider(label="noise_scale(控制感情变化程度)", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True) - nsw = gr.Slider(label="noise_scale_w(控制音素发音长度)", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True) - ls = gr.Slider(label="length_scale(控制整体语速)", minimum=0.1, maximum=2.0, step=0.1, value=1.2, interactive=True) - with gr.Column(): - o1 = gr.Textbox(label="Output Message") - o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio") - o3 = gr.Textbox(label="Extra Info") - download = gr.Button("Download Audio") - btn.click(vits, inputs=[input_text, lang, sid, ns, nsw, ls], outputs=[o1, o2, o3]) - download.click(None, [], [], _js=download_audio_js.format()) - btn2.click(search_speaker, inputs=[search], outputs=[sid]) - lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls]) - with gr.TabItem("可用人物一览"): - gr.Radio(label="Speaker", choices=speakers, interactive=False, type="index") - if args.colab: - webbrowser.open("http://127.0.0.1:7860") - app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share) diff --git a/spaces/zhenwusw/JoJoGAN/e4e/datasets/inference_dataset.py b/spaces/zhenwusw/JoJoGAN/e4e/datasets/inference_dataset.py deleted file mode 100644 index fb577d7b538d634f27013c2784d2ea32143154cb..0000000000000000000000000000000000000000 --- a/spaces/zhenwusw/JoJoGAN/e4e/datasets/inference_dataset.py +++ /dev/null @@ -1,25 +0,0 @@ -from torch.utils.data import Dataset -from PIL import Image -from utils import data_utils - - -class InferenceDataset(Dataset): - - def __init__(self, root, opts, transform=None, preprocess=None): - self.paths = sorted(data_utils.make_dataset(root)) - self.transform = transform - self.preprocess = preprocess - self.opts = opts - - def __len__(self): - return len(self.paths) - - def __getitem__(self, index): - from_path = self.paths[index] - if self.preprocess is not None: - from_im = self.preprocess(from_path) - else: - from_im = Image.open(from_path).convert('RGB') - if self.transform: - from_im = self.transform(from_im) - return from_im diff --git a/spaces/zhoupin30/zhoupin30/postcss.config.js b/spaces/zhoupin30/zhoupin30/postcss.config.js deleted file mode 100644 index 33ad091d26d8a9dc95ebdf616e217d985ec215b8..0000000000000000000000000000000000000000 --- a/spaces/zhoupin30/zhoupin30/postcss.config.js +++ /dev/null @@ -1,6 +0,0 @@ -module.exports = { - plugins: { - tailwindcss: {}, - autoprefixer: {}, - }, -}