diff --git a/spaces/101-5/gpt4free/.github/ISSUE_TEMPLATE/feature_request.md b/spaces/101-5/gpt4free/.github/ISSUE_TEMPLATE/feature_request.md
deleted file mode 100644
index bbcbbe7d61558adde3cbfd0c7a63a67c27ed6d30..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/.github/ISSUE_TEMPLATE/feature_request.md
+++ /dev/null
@@ -1,20 +0,0 @@
----
-name: Feature request
-about: Suggest an idea for this project
-title: ''
-labels: ''
-assignees: ''
-
----
-
-**Is your feature request related to a problem? Please describe.**
-A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
-
-**Describe the solution you'd like**
-A clear and concise description of what you want to happen.
-
-**Describe alternatives you've considered**
-A clear and concise description of any alternative solutions or features you've considered.
-
-**Additional context**
-Add any other context or screenshots about the feature request here.
diff --git a/spaces/101-5/gpt4free/g4f/Provider/Providers/GetGpt.py b/spaces/101-5/gpt4free/g4f/Provider/Providers/GetGpt.py
deleted file mode 100644
index 56a121f6ee5f430da7beda3b65abdea64a87c36b..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/g4f/Provider/Providers/GetGpt.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import os
-import json
-import uuid
-import requests
-from Crypto.Cipher import AES
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://chat.getgpt.world/'
-model = ['gpt-3.5-turbo']
-supports_stream = True
-needs_auth = False
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- def encrypt(e):
- t = os.urandom(8).hex().encode('utf-8')
- n = os.urandom(8).hex().encode('utf-8')
- r = e.encode('utf-8')
- cipher = AES.new(t, AES.MODE_CBC, n)
- ciphertext = cipher.encrypt(pad_data(r))
- return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8')
-
- def pad_data(data: bytes) -> bytes:
- block_size = AES.block_size
- padding_size = block_size - len(data) % block_size
- padding = bytes([padding_size] * padding_size)
- return data + padding
-
- headers = {
- 'Content-Type': 'application/json',
- 'Referer': 'https://chat.getgpt.world/',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
- }
-
- data = json.dumps({
- 'messages': messages,
- 'frequency_penalty': kwargs.get('frequency_penalty', 0),
- 'max_tokens': kwargs.get('max_tokens', 4000),
- 'model': 'gpt-3.5-turbo',
- 'presence_penalty': kwargs.get('presence_penalty', 0),
- 'temperature': kwargs.get('temperature', 1),
- 'top_p': kwargs.get('top_p', 1),
- 'stream': True,
- 'uuid': str(uuid.uuid4())
- })
-
- res = requests.post('https://chat.getgpt.world/api/chat/stream',
- headers=headers, json={'signature': encrypt(data)}, stream=True)
-
- for line in res.iter_lines():
- if b'content' in line:
- line_json = json.loads(line.decode('utf-8').split('data: ')[1])
- yield (line_json['choices'][0]['delta']['content'])
-
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join(
- [f'{name}: {get_type_hints(_create_completion)[name].__name__}' for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Arrival (English) dual audio hindi download Watch the sci-fi mystery thriller in HD.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Arrival (English) dual audio hindi download Watch the sci-fi mystery thriller in HD.md
deleted file mode 100644
index 9fba0db995b119be74e59a0ffc00ecb1dadf5771..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Arrival (English) dual audio hindi download Watch the sci-fi mystery thriller in HD.md
+++ /dev/null
@@ -1,113 +0,0 @@
-
-
Arrival (English) Dual Audio Hindi Download: How to Watch the Award-Winning Sci-Fi Film Online
-
If you are a fan of science fiction films, you might have heard of Arrival, a 2016 film directed by Denis Villeneuve and starring Amy Adams, Jeremy Renner, and Forest Whitaker. The film was based on a short story by Ted Chiang and received critical acclaim and numerous awards, including an Oscar for Best Sound Editing. But what if you want to watch this film in dual audio, with both English and Hindi languages? In this article, we will tell you what Arrival is about, why it is worth watching, and how to download it in dual audio.
Arrival is a sci-fi drama film that tells the story of Louise Banks (Amy Adams), a linguist who is recruited by the US Army to communicate with alien lifeforms that have arrived on Earth in 12 mysterious spacecraft. Along with physicist Ian Donnelly (Jeremy Renner), she tries to decipher their language and understand their purpose before tensions escalate into a global war. As she learns more about the aliens, who are called heptapods, she also experiences flashbacks of her daughter Hannah, who died of an incurable disease.
-
Why is Arrival worth watching?
-
Arrival is not your typical alien invasion film. It is a thoughtful and intelligent exploration of communication, time, memory, and free will. It challenges the viewers to think about how we perceive reality and how we make choices that affect our lives. It also showcases the power of language and how it shapes our worldview. The film has a captivating plot that keeps you engaged and surprised until the end. It also has excellent performances from the cast, especially Amy Adams, who delivers a nuanced and emotional portrayal of Louise.
-
How to download Arrival in dual audio (English and Hindi)?
-
If you want to watch Arrival in dual audio, you have several options available online. One of them is to use Google Drive, where you can find a link to download the film in 1080p quality with both English and Hindi audio tracks. Another option is to use FilmyGod.UK, a website that offers high-quality Hindi dubbed movies for free. You can download Arrival in 480p, 720p, or 1080p quality with dual audio from this site. However, be aware that these sites may not be legal or safe to use, so proceed at your own risk.
-
Review of Arrival
-
The plot
-
The plot of Arrival is complex and intriguing, as it involves nonlinear storytelling and multiple timelines. The film uses flashbacks and flash-forwards to reveal Louise's past and future, as well as the nature of the heptapods' language and mission. The film also has a twist ending that changes everything you thought you knew about the story. The plot is well-written and executed, as it keeps you guessing and curious throughout the film. It also raises some philosophical questions about fate, determinism, and human agency.
-
The characters
-
The characters of Arrival are well-developed and relatable, as they have their own motivations and struggles. Louise is the protagonist of the film, who is haunted by the loss of her daughter and seeks to find meaning in her life through her work as a linguist. She is brave, compassionate, and curious, as she tries to understand the heptapods and their message. Ian is her partner in the project, who is a physicist and a mathematician. He is rational, analytical, and supportive, as he helps Louise with her research and develops feelings for her. Colonel Weber is their boss, Continuing the article:
The themes
-
Arrival explores various themes that are relevant and profound for the human condition. Some of the main themes are:
-
Arrival movie dual audio hindi english download
-Download Arrival 2016 in hindi english dual audio
-Arrival hindi dubbed english movie download
-How to download Arrival in dual audio hindi and english
-Arrival full movie in hindi and english download
-Download Arrival dual audio 720p hindi english
-Arrival dual audio 1080p hindi english download
-Arrival hindi english dual audio torrent download
-Arrival bluray dual audio hindi english download
-Arrival dual audio hdrip hindi english download
-Download Arrival dvdrip dual audio hindi and english
-Arrival web-dl dual audio hindi english download
-Arrival brrip dual audio hindi english download
-Download Arrival x264 dual audio hindi and english
-Arrival xvid dual audio hindi english download
-Download Arrival hevc dual audio hindi and english
-Arrival h264 dual audio hindi english download
-Download Arrival mkv dual audio hindi and english
-Arrival mp4 dual audio hindi english download
-Download Arrival avi dual audio hindi and english
-Arrival ac3 dual audio hindi english download
-Download Arrival aac dual audio hindi and english
-Arrival dts dual audio hindi english download
-Download Arrival 5.1ch dual audio hindi and english
-Arrival 7.1ch dual audio hindi english download
-Download Arrival 2.0ch dual audio hindi and english
-Arrival 3.0ch dual audio hindi english download
-Download Arrival 4.0ch dual audio hindi and english
-Arrival subtitles dual audio hindi english download
-Download Arrival srt dual audio hindi and english
-Arrival subbed dual audio hindi english download
-Download Arrival dubbed dual audio hindi and english
-Arrival original language dual audio hindi english download
-Download Arrival original sound track dual audio hindi and english
-Arrival director's cut dual audio hindi english download
-Download Arrival extended edition dual audio hindi and english
-Arrival unrated version dual audio hindi english download
-Download Arrival theatrical release dual audio hindi and english
-Arrival special features dual audio hindi english download
-Download Arrival bonus content dual audio hindi and english
-Arrival behind the scenes dual audio hindi english download
-Download Arrival making of dual audio hindi and english
-Arrival interviews dual audio hindi english download
-Download Arrival cast and crew dual audio hindi and english
-Arrival reviews dual audio hindi english download
-Download Arrival ratings dual audio hindi and english
-Arrival awards dual audio hindi english download
-Download Arrival nominations dual audio hindi and english
-Arrival box office collection dual audio hindi english download
-
-
Communication: The film shows how communication is essential for understanding and cooperation, not only between humans and aliens, but also among humans themselves. The film also illustrates how communication can be challenging and complex, as different languages have different structures, meanings, and assumptions. The film also suggests that communication can influence our perception of reality and time, as learning the heptapod language allows Louise to experience time in a non-linear way.
-
Choice: The film raises the question of whether we have free will or whether our lives are predetermined by fate. The film also explores the consequences of our choices and how they affect ourselves and others. Louise faces a difficult choice when she learns that her daughter will die in the future, but she decides to have her anyway, knowing that she will cherish the moments they will share. The film also shows how choices can create conflict or harmony, as different nations choose to either attack or cooperate with the heptapods.
-
Empathy: The film emphasizes the importance of empathy and compassion for bridging the gaps between different beings. The film shows how empathy can foster trust and understanding, as Louise and Ian develop a bond with the heptapods by trying to learn their language and culture. The film also shows how empathy can prevent violence and war, as Louise manages to convince General Shang of China to stand down from his attack by using his personal phone number and his wife's dying words.
-
-
The cinematography
-
The cinematography of Arrival is stunning and captivating, as it creates a contrast between the mundane and the extraordinary. The film uses a muted color palette and natural lighting to depict the realistic and bleak aspects of the human world, such as the military base, the university campus, and Louise's home. The film also uses wide shots and aerial views to show the scale and scope of the alien arrival, as well as the global response. The film also uses close-ups and low angles to emphasize the mystery and awe of the heptapods and their spacecraft, as well as the intimacy and emotion of the characters.
-
The music
-
The music of Arrival is composed by Jóhann Jóhannsson, who creates a haunting and atmospheric score that matches the tone and mood of the film. The music combines orchestral elements with electronic sounds and vocal samples, creating a blend of organic and alien sounds. The music also reflects the themes and emotions of the film, such as curiosity, tension, sadness, and wonder. The music also enhances the impact of some of the key scenes in the film, such as the first encounter with the heptapods, the revelation of Louise's future, and the final conversation with General Shang.
-
Conclusion
-
Summary of the main points
-
In conclusion, Arrival is a remarkable sci-fi film that offers a unique and profound perspective on communication, time, choice, empathy, and humanity. The film has a compelling plot that surprises and challenges the viewers with its nonlinear structure and twist ending. The film also has superb performances from Amy Adams and Jeremy Renner, who bring depth and emotion to their roles. The film also has impressive cinematography and music that create a captivating visual and auditory experience. The film is not only entertaining but also enlightening, as it invites us to think about our place in the universe and our relationship with ourselves and others.
-
Recommendation for the viewers
-
If you are looking for a sci-fi film that is more than just action and spectacle, Arrival is a perfect choice for you. Arrival is a film that will make you think, feel, and wonder about life's big questions. Arrival is a film that will inspire you to learn new languages, appreciate different cultures, and embrace your choices. Arrival is a film that will touch your heart and mind with its beauty and wisdom.
-
FAQs
-
-
What is the meaning of Arrival? Arrival is a film that explores the meaning of communication, time, choice, empathy, and humanity through the story of Louise Banks, a linguist who tries to communicate with alien visitors who have arrived on Earth.
-
What is the language of Arrival? Arrival features two languages: English and Heptapod. English is spoken by most of the human characters in the film. Heptapod is spoken by Continuing the article: the heptapods, a complex and circular language that has no beginning or end. Heptapod A is the spoken form of the language, which consists of low-pitched and thrumming sounds that are unpronounceable by humans. Heptapod B is the written form of the language, which consists of circular logograms that represent entire sentences or concepts. The heptapods can write multiple logograms at once, using their seven limbs to create intricate patterns.
-
Memory: The film explores the role of memory in shaping our identity and reality. The film uses flashbacks and flash-forwards to show Louise's memories of her daughter, as well as her future interactions with the heptapods and Ian. The film also reveals that Louise's memories are not chronological, but rather influenced by her exposure to the heptapod language, which allows her to perceive time in a non-linear way. The film suggests that memory is not fixed or objective, but rather fluid and subjective, depending on our perspective and context.
-
-
Conclusion
-
Summary of the main points
-
In conclusion, Arrival is a remarkable sci-fi film that offers a unique and profound perspective on communication, time, choice, empathy, memory, and humanity. The film has a compelling plot that surprises and challenges the viewers with its nonlinear structure and twist ending. The film also has superb performances from Amy Adams and Jeremy Renner, who bring depth and emotion to their roles. The film also has impressive cinematography and music that create a captivating visual and auditory experience. The film is not only entertaining but also enlightening, as it invites us to think about our place in the universe and our relationship with ourselves and others.
-
Recommendation for the viewers
-
If you are looking for a sci-fi film that is more than just action and spectacle, Arrival is a perfect choice for you. Arrival is a film that will make you think, feel, and wonder about life's big questions. Arrival is a film that will inspire you to learn new languages, appreciate different cultures, and embrace your choices. Arrival is a film that will touch your heart and mind with its beauty and wisdom.
-
FAQs
-
-
What is the meaning of Arrival? Arrival is a film that explores the meaning of communication, time, choice, empathy, memory, and humanity through the story of Louise Banks, a linguist who tries to communicate with alien visitors who have arrived on Earth.
-
What is the language of Arrival? Arrival features two languages: English and Heptapod. English is spoken by most of the human characters in the film. Heptapod is spoken by Continuing the article: the heptapods, a complex and circular language that has no beginning or end. Heptapod A is the spoken form of the language, which consists of low-pitched and thrumming sounds that are unpronounceable by humans. Heptapod B is the written form of the language, which consists of circular logograms that represent entire sentences or concepts. The heptapods can write multiple logograms at once, using their seven limbs to create intricate patterns.
-
Memory: The film explores the role of memory in shaping our identity and reality. The film uses flashbacks and flash-forwards to show Louise's memories of her daughter, as well as her future interactions with the heptapods and Ian. The film also reveals that Louise's memories are not chronological, but rather influenced by her exposure to the heptapod language, which allows her to perceive time in a non-linear way. The film suggests that memory is not fixed or objective, but rather fluid and subjective, depending on our perspective and context.
-
-
Conclusion
-
Summary of the main points
-
In conclusion, Arrival is a remarkable sci-fi film that offers a unique and profound perspective on communication, time, choice, empathy, memory, and humanity. The film has a compelling plot that surprises and challenges the viewers with its nonlinear structure and twist ending. The film also has superb performances from Amy Adams and Jeremy Renner, who bring depth and emotion to their roles. The film also has impressive cinematography and music that create a captivating visual and auditory experience. The film is not only entertaining but also enlightening, as it invites us to think about our place in the universe and our relationship with ourselves and others.
-
Recommendation for the viewers
-
If you are looking for a sci-fi film that is more than just action and spectacle, Arrival is a perfect choice for you. Arrival is a film that will make you think, feel, and wonder about life's big questions. Arrival is a film that will inspire you to learn new languages, appreciate different cultures, and embrace your choices. Arrival is a film that will touch your heart and mind with its beauty and wisdom.
-
FAQs
-
-
What is the meaning of Arrival? Arrival is a film that explores the meaning of communication, time, choice, empathy, memory, and humanity through the story of Louise Banks, a linguist who tries to communicate with alien visitors who have arrived on Earth.
-
What is the language of Arrival? Arrival features two languages: English and Heptapod. English is spoken by most of the human characters in the film. Heptapod is spoken by the heptapods, an alien species that communicates with circular logograms that have no beginning or end.
-
What is the twist ending of Arrival? The twist ending of Arrival is that Louise's flashbacks are actually flash-forwards of her future life with Ian and their daughter Hannah. Louise learns from the heptapods that they have come to Earth to offer humanity their language, which enables them to perceive time in a non-linear way. The heptapods need humanity's help in 3,000 years for an unknown reason. Louise also learns from General Shang of China that she will call him in 18 months and convince him to stop his attack on the heptapods by using his personal phone number and his wife's dying words. Louise decides to accept her future with Ian and Hannah, even though she knows that Hannah will die from an incurable disease.
-
Who are the actors in Arrival? The main actors in Arrival are Amy Adams as Louise Banks, Jeremy Renner as Ian Donnelly, Forest Whitaker as Colonel Weber, Michael Stuhlbarg as Agent Halpern, Tzi Ma as General Shang, Julia Scarlett Dan as 12-Year-Old-Hannah, Jadyn Malone as 6-Year-Old-Hannah.
-
Where can I watch Arrival online? You can watch Arrival online on various streaming platforms such as Netflix, Amazon Prime Video, Hulu, YouTube, Google Play, iTunes, Vudu, etc. However, availability may vary depending on your region and subscription plan.
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Celemony Melodyne Studio 3 - Full Crack Serial Key [TOP].md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Celemony Melodyne Studio 3 - Full Crack Serial Key [TOP].md
deleted file mode 100644
index e1fbc40985c109b42c4a248997a28d000fc35409..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Celemony Melodyne Studio 3 - Full Crack Serial Key [TOP].md
+++ /dev/null
@@ -1,77 +0,0 @@
-
-
Celemony Melodyne Studio 3 - full Crack Serial Key
-
If you are looking for a powerful and versatile audio editing software, you might have heard of Celemony Melodyne Studio 3. This is a professional tool that allows you to edit, manipulate, and transform audio files in a musical and intuitive way. You can correct pitch, timing, and intonation, as well as change the tone color, dynamics, and formants of any sound. You can also work with multiple tracks, chords, scales, tempos, and tunings, and create innovative sound design effects.
-
Celemony Melodyne Studio 3 - full Crack Serial Key
However, Celemony Melodyne Studio 3 is not a cheap software. It costs $699 for a new license, or $299 for an upgrade from a previous version. If you are on a tight budget or just want to try it out before buying, you might be tempted to look for a crack serial key that can unlock all the features of Melodyne without paying anything. But is this a good idea? And how can you do it safely and effectively?
-
In this article, we will show you how to download and install Celemony Melodyne Studio 3 with a full crack serial key. We will also give you some tips on how to use it and what to watch out for. By the end of this article, you will be able to enjoy one of the best audio editing software in the market without breaking the bank.
-
How to download and install Celemony Melodyne Studio 3
-
The first step to use Celemony Melodyne Studio 3 with a crack serial key is to download the official package and the crack file from reliable sources. You can find the official package on the Celemony website, where you can also download a free trial version that lasts for 30 days. The trial version has all the features of the full version, but it will stop working after the trial period expires.
-
The crack file is a bit harder to find, as it is not officially supported by Celemony or any other legitimate website. You will have to search for it on various torrent sites, forums, or blogs that offer free serial keys for software. However, be careful when downloading crack files from unknown sources, as they might contain viruses, malware, or spyware that can harm your computer or steal your personal information. Always scan any file you download with an antivirus program before opening it.
-
Once you have downloaded both files, you can proceed to install Celemony Melodyne Studio 3 on your computer. Follow these steps:
-
-
-
Run the setup file of the official package and follow the instructions on the screen.
-
When prompted to enter a serial number, open the crack file and copy one of the serial keys provided.
-
Paste the serial key into the setup window and click Next.
-
Complete the installation process and launch Celemony Melodyne Studio 3.
-
You should now be able to use all the features of Melodyne without any limitations or restrictions.
-
-
How to use Celemony Melodyne Studio 3
-
Celemony Melodyne Studio 3 is a powerful and versatile audio editing software that offers many features and tools for manipulating audio files in a musical and intuitive way. Here are some of the main features and tools that you can use with Celemony Melodyne Studio 3:
-
Pitch and Time Editing
-
One of the most impressive features of Melodyne is its ability to edit pitch and time independently and accurately. You can correct the pitch of any note, adjust the vibrato, bend, or glide, and change the pitch modulation of any sound. You can also edit the timing of any note, move it forward or backward, stretch or compress it, and change the tempo or groove of any sound. You can do all this with a simple mouse click and drag, or use the keyboard shortcuts for more precision.
-
Melodyne displays the audio files as blobs, which are graphical representations of the notes and their pitch and time information. You can see the blobs in different views, such as note, chord, scale, tempo, or tuning. You can also switch between different modes, such as melodic, polyphonic, percussive, or universal, depending on the type of audio you are editing. You can also use the tools menu to access various functions, such as quantize, transpose, copy, paste, delete, split, join, or mute.
-
Tone Color and Dynamics Editing
-
Another amazing feature of Melodyne is its ability to edit the tone color and dynamics of any sound. You can change the formants, which are the frequency components that give each sound its characteristic timbre. You can also change the amplitude envelope, which is the shape of the sound's loudness over time. You can do this by using the formant tool and the amplitude tool, which allow you to modify the shape and size of the blobs. You can also use the macros menu to apply global changes to the tone color and dynamics of your audio files.
-
By editing the tone color and dynamics of your audio files, you can create various effects and transformations. For example, you can make a male voice sound like a female voice, or vice versa. You can make a guitar sound like a piano, or a piano sound like a guitar. You can make a drum sound like a bell, or a bell sound like a drum. You can also create harmonies, choruses, flangers, phasers, or other modulation effects.
-
Multi-Track Editing
-
A third remarkable feature of Melodyne is its ability to edit multiple tracks simultaneously and in relation to each other. You can import up to 24 tracks into Melodyne and see them in a single window. You can then edit each track individually or together with other tracks. You can also use the mixer menu to adjust the volume, pan, solo, mute, or bypass of each track.
-
By editing multiple tracks with Melodyne, you can achieve a better balance and coherence among your audio files. You can align the pitch and timing of different tracks to create a tight and harmonious mix. You can also create chords, scales, tunings, or tempos that match across different tracks. You can also use the compare menu to compare different versions of your edits and choose the best one.
-
Conclusion
-
Celemony Melodyne Studio 3 is one of the best audio editing software in the market. It offers many features and tools that allow you to edit, manipulate, and transform audio files in a musical and intuitive way. You can correct pitch, timing, and intonation, as well as change the tone color, dynamics, and formants of any sound. You can also work with multiple tracks, chords, scales, tempos, and tunings, and create innovative sound design effects.
-
However, Celemony Melodyne Studio 3 is not a cheap software. It costs $699 for a new license, or $299 for an upgrade from a previous version. If you want to use it without paying anything, you will have to look for a crack serial key that can unlock all the features of Melodyne. But this is not a risk-free option. You might encounter viruses, malware, or spyware that can damage your computer or compromise your privacy. You might also face legal issues or ethical dilemmas for using a pirated software.
-
Therefore, we recommend that you use Celemony Melodyne Studio 3 with a crack serial key only if you are aware of the potential consequences and willing to take the responsibility. Otherwise, you might want to consider buying a legitimate license or using a free trial version of Melodyne. This way, you can enjoy the benefits of Melodyne without any worries or regrets.
-
If you are interested in trying out Celemony Melodyne Studio 3 with a crack serial key, you can follow the steps we outlined in this article. We hope that this article was helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy editing!
-
FAQs
-
What are the system requirements for Celemony Melodyne Studio 3?
-
Celemony Melodyne Studio 3 requires the following system specifications:
-
-
Windows XP (SP3), Vista or Windows 7 (32- or 64-bit) or Mac OS X 10.4 or later (Intel or PowerPC)
-
1 GB RAM (2 GB recommended)
-
1 GB free hard disk space
-
DVD drive
-
An audio interface compatible with ASIO (Windows) or Core Audio (Mac)
-
A MIDI interface and keyboard (optional)
-
-
Is Celemony Melodyne Studio 3 compatible with other DAWs and plug-ins?
-
Celemony Melodyne Studio 3 can work as a stand-alone application or as a plug-in for other DAWs and audio editors. It supports the following plug-in formats:
-
-
VST (Windows and Mac)
-
AU (Mac)
-
RTAS (Windows and Mac)
-
AAX (Windows and Mac)
-
ReWire (Windows and Mac)
-
-
Celemony Melodyne Studio 3 can also integrate with other DAWs and plug-ins via the Melodyne Bridge or the Rewire Host Sync mode. For more information on how to use Melodyne with other software, please refer to the user manual.
-
What are the differences between the editions of Melodyne?
-
Celemony offers four different editions of Melodyne: essential, assistant, editor, and studio. Each edition has different features and prices. Here is a brief comparison of the four editions:
-
-
Edition
Price
Features
-
essential
$99
The basic edition of Melodyne. It allows you to edit pitch and timing of monophonic audio files.
-
assistant
$249
The intermediate edition of Melodyne. It adds the ability to edit pitch and timing of polyphonic audio files.
-
editor
$399
The advanced edition of Melodyne. It adds the ability to edit tone color and dynamics of audio files, as well as the DNA Direct Note Access technology that allows you to edit individual notes within chords.
-
studio
$699
The ultimate edition of Melodyne. It adds the ability to edit multiple tracks simultaneously and in relation to each other, as well as more features and tools for professional audio editing.
-
-
How can I update or upgrade my version of Melodyne?
-
If you have a legitimate license for Celemony Melodyne Studio 3, you can update or upgrade your version of Melodyne by visiting the Celemony website. You can check for updates by clicking on the Help menu in the Melodyne window and selecting Check for Updates. You can also download the latest version of Melodyne from the website and install it over your existing version. You can upgrade your edition of Melodyne by purchasing a higher edition from the website and entering the new serial number in the Melodyne window.
What are the risks of using a crack serial key for Melodyne?
-
Using a crack serial key for Celemony Melodyne Studio 3 might seem like a convenient and cost-effective way to use one of the best audio editing software in the market, but it also comes with some risks and drawbacks. Here are some of the possible consequences of using a crack serial key for Melodyne:
-
-
You might expose your computer to viruses, malware, or spyware that can damage your system, corrupt your files, or steal your personal information.
-
You might violate the intellectual property rights of Celemony and face legal actions or penalties for using a pirated software.
-
You might compromise the quality and stability of your audio files, as crack serial keys might not work properly or cause errors or crashes.
-
You might miss out on the updates, upgrades, support, and features that Celemony offers to its legitimate customers.
-
You might lose your moral integrity and credibility as an audio professional or enthusiast, as using a crack serial key is unethical and dishonest.
-
-
Therefore, we advise you to use a crack serial key for Celemony Melodyne Studio 3 only if you are fully aware of the risks and willing to accept the responsibility. Otherwise, we suggest that you buy a legitimate license or use a free trial version of Melodyne. This way, you can support the developers of Melodyne and enjoy the benefits of using a genuine and reliable software.
b2dd77e56b
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Anonymox Premium Serial Key What Is It and How to Use It Safely.md b/spaces/1gistliPinn/ChatGPT4/Examples/Anonymox Premium Serial Key What Is It and How to Use It Safely.md
deleted file mode 100644
index 9b708fcdc8c55acbfb2c6d1ac923f091fa732784..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Anonymox Premium Serial Key What Is It and How to Use It Safely.md
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
This Proxy hides your true internet identity and makes it look as if not you, but the Proxy is currently visiting the website.ĪnonymoX is more than just an Add-On. Code active anonymox Anonymox Premium Activation Code Serial Number Free Download Latest Lihat contoh gambar diatas saja lalu klik active kode semoga. Instead of accessing a website directly, it will be first opened by one of our servers, called a Proxy. Delete cookies, show your public ip, change browser id.Appear to originate from another country.Change your IP address (to one provided by us).More and more governments censor websites with the excuse of child safety, copyright infringement or the fight against terrorism and thereby limit the freedom of speech.Īlso blocking users based on their origin with GeoIP-Blocks is applied often, for example at media platforms like YouTube.
+ Free SUPPORT Other Notes Subscribe Download Anonymox Premium Active Code Generator 2017.. We are adapting our tools to new platforms very week If your device is not supported now, check back after some time or Contact us now.. This software is creativity for anonymization in the internet world + Easy setup + Open source, open code.. Download Anonymox Premium Anonymix code here for free Pada langkah yang kelima silahkan buka folder 'EXTENSIONS' lalu silahkan klik kanan pada 'CLIENT ANONYMOX.. Mungkin Itu saja dulu Informasi Yang bisa diberikan oleh Admin Cara Jasa SEO Blog yaitu Seputar Cara Aktivasi Anonymox Free Menjadi Premium Terbaru 2017 semoga apa yang sudah disampaikan diatas masih anonymox premium activation code bermanfaat untuk anda semuanya semoga anda dalam menjelajah dunia internet lebih aman dan dalam mencari backlink lebih aman dari amukan google Terima Kasih.
-
Fast, Safe, Simple anonymoX enables you to Ati mobility radeon hd 4500 series Simultaniously the web sites will receive information on your web identity IP-address.. When accessing a website using anonymoX your request will not be sent to anonymox premium activation code website directly but first to our network.. Adobe cs2 serial key Download Anonymox Premium Active Code Generator 2017 basic details: + Proxy support + Windows OS supported + Mac OS X supported + Latest Mobile devices supported + Instructions and full feature list provided after installation.. Download now [ ] Download Anonymox Premium Active Code Generator 2017 will not let you down and do what this program was made to do.. Sebelumnya apa seh Anonymox itu itu adalah plug in ekstensi yang bisa menganti IP adrees kita dengan cara simpel dan mudah, tingal pencet dan beres.. Anonymox premium activation code Anonymox premium activation code Anonymox premium activation code AnonymoX offers real freedom of speech, the freedom to express yourself without anonymox premium activation code to fear repression. 518b7cbc7d
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Coloring Game - Expansion Pack No. 1 Free Download [torrent Full] The Ultimate Guide to This Amazing Puzzle Game Expansion.md b/spaces/1gistliPinn/ChatGPT4/Examples/Coloring Game - Expansion Pack No. 1 Free Download [torrent Full] The Ultimate Guide to This Amazing Puzzle Game Expansion.md
deleted file mode 100644
index c6a721ca9bed25c00c5096667dd23bfd52d7b84d..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Coloring Game - Expansion Pack No. 1 Free Download [torrent Full] The Ultimate Guide to This Amazing Puzzle Game Expansion.md
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
Z-man Games has released free-to-download scenarios, with changes to the base game. Various scenarios are set to be released.[20] As of March 2017[update], scenarios Isolation[21] and Government Shutdown[22] have been published.[23]
People love free steam games, no doubt. But what many people hate is downloading so many parts and trying to install them on their own. This is why we are the only site that pre-installs every game for you. We have many categories like shooters, action, racing, simulators and even VR games! We strive to satisfy our users and ask for nothing in return. We revolutionized the downloading scene and will continue being your #1 site for free games.
-
The Sims 4 Free Download PC Game in Direct Link and Torrent. Released on September 2, 2014, The Sims 4 Deluxe Edition is the fourth major title in the life simulation video game series The Sims. The Sims 4 download free full version pc with pre-installed crack.
-
Aimhaven provides all pc gamers around the world the best and latest free steam games for pc by using direct download links and torrent links. Our goal is to satisfy our users and to become your #1 site for cracked free steam games by making downloading easy.
-
Path of Titans is an MMO dinosaur video game currently in active development for home computers and mobile devices fully compatible with cross platform play. Play as one of 18 core dinosaurs in a rich ecosystem filled with complex AI creatures and up to 200 other players. Explore an environment filled with natural events, quests, guilds, adventures, and free-form play, all while enjoying a rich life, avoiding death, and augmenting your dinosaur to suit your play style.
Path of Titans will provide a framework for dinosaur enthusiasts to roleplay as their favorite prehistoric beasts. We will also provide powerful modding tools to help you shape the game into your own dinosaur adventure. We will continue adding new content, including new creatures, skins, maps, and ongoing support for the aforementioned modding tools.
The release date for Path of Titans will be announced as we get closer to development completion.
-
-
Expand your dinosaur survival experience with game mods! Download community created creatures, maps, skins, game modes, and more. Or, get creative and craft your own game mods with our powerful modding tools that will be free for anyone to download and learn to use. With extensive documentation and tutorials and help to guide modders, anyone will be able to download our dev kit and begin creating.
-
Maximize your Soldner-X 2: Final Prototype experience with this action-loaded expansion pack. The Last Chapter adds three brand new and exciting stages, 13 unique challenges, 11 trophies and more to the original game.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Flobo Hard Disk Repair Full Crack 11.md b/spaces/1gistliPinn/ChatGPT4/Examples/Flobo Hard Disk Repair Full Crack 11.md
deleted file mode 100644
index 10165f91e68b9e677f96419cb87d7254757c6b56..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Flobo Hard Disk Repair Full Crack 11.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Import external 3D models With its bone adherence function, Design Doll can … ... automousekey.exe, flobo hard disk repair, nuance pdf converter, corel draw, ... Advanced Systemcare Ultimate 11 Serial Key 2018, Vmware Workstation Pro ... 1fdad05405
-
-
-
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/Sonnox-Oxford-64-Bit-Mac-Crack.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/Sonnox-Oxford-64-Bit-Mac-Crack.md
deleted file mode 100644
index be3bb30ceea7a138713d1c9a7914c2b379b4c14e..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/Sonnox-Oxford-64-Bit-Mac-Crack.md
+++ /dev/null
@@ -1,104 +0,0 @@
-## Sonnox Oxford 64 Bit Mac Crack
-
-
-
-
-
- 
-
-
-
-
-
-**Click Here [https://lodystiri.blogspot.com/?file=2txPBx](https://lodystiri.blogspot.com/?file=2txPBx)**
-
-
-
-
-
-
-
-
-
-
-
- Here is a possible title and article with html formatting for the keyword "Sonnox Oxford 64 Bit Mac Crack":
-
-# Sonnox Oxford 64 Bit Mac Crack: How to Download and Install the Ultimate Audio Plugin Suite
-
-
-
-If you are looking for a professional and versatile audio plugin suite for your Mac, you might want to check out Sonnox Oxford. Sonnox Oxford is a collection of high-quality plugins that cover everything from EQ, compression, reverb, limiter, inflator, dynamics, de-esser, and more. Sonnox Oxford plugins are used by many top producers and engineers in the music industry, and they can enhance your sound in any genre or style.
-
-
-
-However, Sonnox Oxford plugins are not cheap. The full bundle costs $1,299, which might be out of reach for many home studio owners. That's why some people look for a Sonnox Oxford 64 Bit Mac Crack, which is a way to bypass the license verification and use the plugins for free. But is it worth it? And how can you get it?
-
-
-
-## The Risks of Using a Sonnox Oxford 64 Bit Mac Crack
-
-
-
-Before you download and install a Sonnox Oxford 64 Bit Mac Crack, you should be aware of the risks involved. First of all, using a cracked software is illegal and unethical. You are violating the terms of service and the intellectual property rights of the developers. You are also depriving them of the revenue they deserve for their hard work and innovation.
-
-
-
-Secondly, using a cracked software can compromise your computer's security and performance. Cracked software often comes with malware, viruses, spyware, or adware that can infect your system and steal your personal information. Cracked software can also cause compatibility issues, crashes, glitches, or errors that can ruin your projects or damage your hardware.
-
-
-
-Thirdly, using a cracked software can affect your creativity and quality. Cracked software often has limited features, outdated versions, or poor sound quality that can hinder your workflow and results. Cracked software can also make you dependent on illegal sources and prevent you from learning new skills or exploring new possibilities.
-
-
-
-## The Benefits of Using a Legitimate Sonnox Oxford 64 Bit Mac
-
-
-
-Instead of using a Sonnox Oxford 64 Bit Mac Crack, you should consider investing in a legitimate copy of the plugin suite. Here are some of the benefits of doing so:
-
-
-
-- You will support the developers and the industry. By paying for the software, you will show your appreciation and respect for the creators and their products. You will also contribute to the development and improvement of future versions and updates.
-
-- You will protect your computer and your data. By downloading and installing the software from the official website, you will ensure that it is safe and clean from any malicious code or content. You will also enjoy the full functionality and performance of the software without any bugs or issues.
-
-- You will enhance your creativity and quality. By using the latest and most advanced version of the software, you will have access to all the features and options that can help you achieve your sonic goals. You will also be able to learn from the tutorials, manuals, support, and community that are available for legitimate users.
-
-
-
-## How to Download and Install Sonnox Oxford 64 Bit Mac
-
-
-
-If you are convinced that buying Sonnox Oxford 64 Bit Mac is the best option for you, here are the steps to download and install it on your computer:
-
-
-
-1. Go to [https://www.sonnox.com/](https://www.sonnox.com/) and click on "Shop" in the menu bar.
-
-2. Select the "Oxford Plugins" option and choose the bundle or individual plugins that you want to purchase.
-
-3. Add them to your cart and proceed to checkout. You will need to create an account or log in with your existing one.
-
-4. Enter your payment details and confirm your order. You will receive an email with your invoice and license codes.
-
-5. Download the Sonnox Installer from [https://www.sonnox.com/support/downloads](https://www.sonnox.com/support/downloads) and run it on your Mac.
-
-6. Follow the instructions on the screen to install the plugins on your system.
-
-7. Launch your DAW (Digital Audio Workstation) of choice and scan for new plugins.
-
-8. Activate your plugins using the license codes that you received in your email.
-
-9. Enjoy using Sonnox Oxford 64 dfd1c89656
-
-
-
-
-
-
-
-
-
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Among Us 3D A New Way to Play the Popular Game on PC and Mac.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Among Us 3D A New Way to Play the Popular Game on PC and Mac.md
deleted file mode 100644
index d03cfaa2983ef64fac435749704e66d357958c4a..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Among Us 3D A New Way to Play the Popular Game on PC and Mac.md
+++ /dev/null
@@ -1,136 +0,0 @@
-
-
Among Us 3D: A New Dimension of Deception and Betrayal
-
If you are a fan of Among Us, the hit multiplayer game where you have to find out who is the impostor among your crewmates in a spaceship, you might be interested in trying out Among Us 3D, a fan-made VR version that takes the game to a whole new level of immersion and realism. In this article, we will tell you everything you need to know about Among Us 3D, including what it is, how to play it, how to download it, why you should play it, what are the reviews and ratings, and some FAQs.
Among Us 3D is a VR remake of Among Us that was created by Jar, a VR developer who posted a video of his project on YouTube in October 2020. The video went viral and received over 6 million views as of June 2021. Jar then released his map for free on VRChat, a social VR platform where users can create and explore virtual worlds with other players.
-
Among Us 3D follows the same premise as the original game: you are either a crewmate or an impostor on a spaceship called The Skeld. As a crewmate, your goal is to complete tasks around the ship or find out who the impostors are and vote them out. As an impostor, your goal is to kill crewmates or sabotage the ship without getting caught.
-
However, unlike the original game, which is played in 2D from a top-down perspective, Among Us 3D is played in first-person from a VR headset. This means that you can see your own body and hands, interact with objects using motion controls, walk around the ship using thumbsticks or teleportation, communicate with other players using voice chat or text chat, and experience more realistic graphics and sounds.
-
Among Us 3D also adds some new features and modes to the game, such as:
-
among us 3d game download for pc free
-among us 3d online play on pc with bluestacks
-among us 3d single player download for pc/laptop
-among us 3d steam version for pc
-among us 3d multiplayer free download for windows 10
-among us 3d action game by innersloth for pc
-among us 3d youtube video with free download link for pc
-among us 3d emulator software for mac and pc
-among us 3d hide n seek mode on pc
-among us 3d apk download for android and pc
-among us 3d fatdino game for pc
-among us 3d cross-platform play between pc and mobile
-among us 3d new map and roles update for pc
-among us 3d casual game with social deduction on pc
-among us 3d local party game via wifi on pc
-among us 3d full controller support and remote play on pc
-among us 3d achievements and awards on steam for pc
-among us 3d browser game without downloading for pc
-among us 3d system requirements and compatibility for pc
-among us 3d community hub and discussions on steam for pc
-among us 3d impostor gameplay and tips for pc
-among us 3d crewmate tasks and objectives for pc
-among us 3d customization options and skins for pc
-among us 3d sabotage and kill strategies for impostors on pc
-among us 3d in-game text chat and discord integration for pc
-among us 3d how to install and play on pc with bluestacks
-among us 3d best platform to play this android game on pc
-among us 3d review and rating on steam for pc
-among us 3d trailer and gameplay video on youtube for pc
-among us 3d how to link accounts between platforms on pc
-among us 3d how to find a game online from the host list on pc
-among us 3d how to report dead bodies and vote to eject impostors on pc
-among us 3d how to use the admin map and security cameras on pc
-among us 3d how to react quickly to undo the impostor's sabotages on pc
-among us 3d how to sneak through the vents and close doors as impostor on pc
-among us 3d how to call emergency meetings and discuss suspicious behavior on pc
-among us 3d how to win by completing tasks or discovering the impostors on pc
-among us 3d how to pretend to run tasks and blend in with the crewmates as impostor on pc
-among us 3d different maps to play in: the skeld, mira hq, polus, and the airship on pc
-among us 3d different modes to choose from: classic or hide n seek on pc
-among us 3d different languages supported: english, portuguese, spanish, korean, russian, french, italian, german, dutch, japanese, chinese on pc
-among us 3d different game options: add more impostors, more tasks, different roles, and so much more on pc
-
-
A new map called The Skeld II, which is based on The Skeld but has some changes and additions.
-
A new role called The Detective, who can see footprints and blood stains left by impostors.
-
A new mode called Hide and Seek, where the impostor is revealed at the start and the crewmates have to hide or run away from them.
-
A new mode called Murder Mystery, where the impostor can only kill one person at a time and the crewmates have to find clues and solve the mystery.
-
A new mode called Prop Hunt, where the impostor can disguise themselves as any object on the ship and the crewmates have to find them.
-
-
Among Us 3D is still in development and Jar plans to add more features and improvements in the future, such as more maps, more roles, more modes, more customization options, and more stability and performance enhancements.
-
How to play Among Us 3D?
-
To play Among Us 3D, you need a VR headset that is compatible with VRChat, such as Oculus Quest, Oculus Rift, HTC Vive, Valve Index, or Windows Mixed Reality. You also need a VRChat account, which you can create for free on their website or app. Once you have everything set up, you can follow these steps to join or host a game of Among Us 3D:
-
-
Launch VRChat and put on your VR headset.
-
Select the Worlds tab from the menu and search for "Among Us 3D" or "Jar".
-
Select the Among Us 3D world by Jar and click on Go or Join.
-
Once you are in the world, you will see a lobby with a screen that shows the number of players, the map, the mode, and the settings. You can also see a list of players on the left side of the screen.
-
If you want to join an existing game, look for a portal that has a green light above it and walk through it. You will be teleported to a waiting room where you can see your character and other players. You can also customize your character by using the buttons on the wall.
-
If you want to host a new game, look for a portal that has a red light above it and walk through it. You will be teleported to a host room where you can see your character and other players. You can also customize your character by using the buttons on the wall. As the host, you can also change the map, the mode, and the settings by using the buttons on the wall. When you are ready to start the game, press the Start button on the wall.
-
When the game starts, you will be assigned a role: Crewmate, Impostor, or Detective. You will also see your tasks or objectives on your wrist. You can use your motion controllers to interact with objects and perform tasks or actions. You can use your voice chat or text chat to communicate with other players. You can also use your thumbsticks or teleportation to move around the ship.
-
If you are a Crewmate, you have to complete your tasks or find out who the Impostors are and vote them out. If you find a dead body, you can report it by pressing a button near it. This will trigger an emergency meeting where everyone can discuss and vote. You can also call an emergency meeting by pressing a button in the cafeteria. However, you have a limited number of meetings per game.
-
If you are an Impostor, you have to kill Crewmates or sabotage the ship without getting caught. You can kill someone by getting close to them and pressing a button on your wrist. You can also sabotage by pressing a button on your wrist and selecting an option from a menu. However, you have a cooldown time between each kill or sabotage. You can also vent by pressing a button near a vent. This will allow you to travel between vents quickly and stealthily.
-
If you are a Detective, you have to help Crewmates find out who the Impostors are by using your special abilities. You can see footprints and blood stains left by Impostors by pressing a button on your wrist. You can also scan someone's role by getting close to them and pressing a button on your wrist. However, you have a limited number of scans per game.
-
The game ends when either one of these conditions is met: All Crewmates are dead; All Impostors are voted out; All tasks are completed; The ship is destroyed by sabotage.
-
-
How to download and install Among Us 3D?
-
To download and install Among Us 3D, you need to download and install VRChat first. VRChat is available for free on Steam, Oculus Store, or VRChat website. Depending on your VR headset, you need to follow different steps to get VRChat:
-
For Oculus Quest users:
-
-
Download VRChat from Oculus Store
Launch VRChat and create or log in to your VRChat account.
-
Follow the steps in the previous section to join or host a game of Among Us 3D.
-
-
For Oculus Rift users:
-
-
Download VRChat from Oculus Store or Steam.
-
Launch VRChat and create or log in to your VRChat account.
-
Follow the steps in the previous section to join or host a game of Among Us 3D.
-
-
For HTC Vive, Valve Index, or Windows Mixed Reality users:
-
-
Download VRChat from Steam.
-
Launch VRChat and create or log in to your VRChat account.
-
Follow the steps in the previous section to join or host a game of Among Us 3D.
-
-
Why should you play Among Us 3D?
-
Among Us 3D is a fun and innovative way to experience the game that you already love. It offers many advantages and disadvantages compared to the original game. Here are some of them:
-
Pros of playing Among Us 3D
-
-
You can enjoy a more immersive and realistic gameplay, as you can see and interact with the ship and the players in 3D.
-
You can have more fun and challenge, as you can use your body language, gestures, and voice to communicate and deceive others.
-
You can explore new features and modes, such as The Detective role, Hide and Seek mode, Murder Mystery mode, and Prop Hunt mode.
-
You can support a fan-made project that is free and constantly updated by a passionate developer.
-
-
Cons of playing Among Us 3D
-
-
You need a VR headset and a VRChat account, which might not be compatible, accessible, or affordable for everyone.
-
You might experience motion sickness, discomfort, or fatigue from playing in VR for too long.
-
You might encounter some bugs, glitches, or crashes, as the game is still in development and not officially supported by the original developers.
-
You might miss some features or content from the original game, such as other maps, roles, modes, customization options, and updates.
-
-
What are the reviews and ratings of Among Us 3D?
-
Among Us 3D has received mostly positive reviews and ratings from critics and users alike. Here are some examples:
"Among Us 3D is a fantastic example of how VR can enhance an already great game. It adds a new layer of immersion, realism, interactivity, and fun to the social deduction genre. It also showcases the creativity and passion of the VR community. If you have a VR headset and a love for Among Us, you should definitely give it a try."
"Among Us 3D is one of the most impressive fan-made VR projects we’ve seen so far. It captures the essence of the original game while adding new twists and features that make it stand out. It’s also a great way to socialize and have fun with friends or strangers in VR."
"This is amazing. I love how you can actually do tasks with your hands and see other players' movements and expressions. It feels like you are really there on the ship. The new modes are also very fun and creative. I highly recommend this to anyone who likes Among Us."
"This is incredible. I had so much fun playing this with my friends. It's hilarious how you can see people's reactions when they get killed or accused. The detective role is also very cool. Props to Jar for making this."
-
-
Conclusion
-
In conclusion, Among Us 3D is a fan-made VR version of Among Us that offers a new dimension of deception and betrayal. It is a fun and innovative way to experience the game that you already love, as you can see and interact with the ship and the players in 3D. It also adds some new features and modes that make it more challenging and enjoyable. However, it also has some drawbacks, such as compatibility, accessibility, motion sickness, and lack of content. It is still in development and not officially supported by the original developers. If you have a VR headset and a VRChat account, you can download and install Among Us 3D for free and join or host a game with other players. You can also support Jar, the fan-made developer, by following his YouTube channel or donating to his Patreon. Whether you are a crewmate, an impostor, or a detective, you will have a blast playing Among Us 3D.
FAQs
-
Here are some frequently asked questions and answers about Among Us 3D:
-
-
Q: Can I play Among Us 3D without a VR headset?
-
A: Yes, you can play Among Us 3D without a VR headset by using the desktop mode of VRChat. However, you will not be able to enjoy the full VR experience and some features might not work properly.
-
Q: Can I play Among Us 3D with players who are using the original game?
-
A: No, you can only play Among Us 3D with players who are using VRChat and the same world. You cannot cross-play with players who are using the original game on PC or mobile.
-
Q: Can I play Among Us 3D on other maps besides The Skeld and The Skeld II?
-
A: Not yet, but Jar plans to add more maps in the future, such as Mira HQ, Polus, and The Airship.
-
Q: Can I customize my character in Among Us 3D?
-
A: Yes, you can customize your character by using the buttons on the wall in the waiting room or the host room. You can change your color, hat, pet, skin, name, and voice.
-
Q: Can I report bugs or give feedback on Among Us 3D?
-
A: Yes, you can report bugs or give feedback on Among Us 3D by leaving a comment on Jar's YouTube channel or joining his Discord server.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash of Warpath APK The Best Android Game for Strategy and Hero Cultivation.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash of Warpath APK The Best Android Game for Strategy and Hero Cultivation.md
deleted file mode 100644
index c6aab7d37b707a81215026c66033ad268b17ee04..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash of Warpath APK The Best Android Game for Strategy and Hero Cultivation.md
+++ /dev/null
@@ -1,115 +0,0 @@
-
-
Clash of Warpath: Wild Rift APK - A Strategy War Game for Android
-
If you are looking for a strategy war game that combines hero cultivation, castle tower defense and alliance confrontation, you might want to check out Clash of Warpath: Wild Rift APK. This is a game developed by TAG GAME STUDIO that lets you build, plan and lead your league to victory with allies in the kingdom war. In this article, we will tell you what is Clash of Warpath: Wild Rift APK, how to download and install it, how to play it, and what are its pros and cons.
-
What is Clash of Warpath: Wild Rift APK?
-
Clash of Warpath: Wild Rift APK is an Android game that belongs to the strategy genre. It is also known as Clash of Wars Mobile or Lol: Wild Rift - Hero Battle. The game is set in a fantasy world where you can recruit and train more than 50 superheroes, and experience different ways of playing, such as base construction, castle attack and defense, hero arena, zombie attack, alliance war, cross-server kingdom war, etc.
The game has many features that make it fun and challenging. Here are some of them:
-
Recruit heroes
-
You can summon more than 50 superheroes in the wish pool, select the right heroes to form different teams. Cultivate superheroes, awaken their super ability, activate their exclusive artifact, and constantly improve their strength to cope with various difficulties and challenges.
-
Build bases
-
You can place defensive building freely in the base, and send superheroes to guard the base. Use your brains and strategies to build a solid city defense system to resist the sneak attack of other players and the invasion of the zombies in the Apocalypse.
-
Kingdom clash
-
You can form an alliance with your game friends and lead heroes and soldiers to conquer other lords. Develop and strengthen your own strength in the clash. Participate in alliance battles and kingdom wars with allies, build a magnificent empire, achieve hegemony, and write a brilliant legend!
-
Fight monster
-
You can lead your superheroes to explore the relics of Atlantis. Seize the opportunity to release active skills and hit an explosive damage instantly when you challenge the beast. Defeat the final boss, and win generous rewards.
-
clash of warpath wild rift apk download
-clash of wars mobile apk latest version
-clash of warpath apk for android
-clash of warpath wild rift mod apk
-clash of wars mobile game strategy
-clash of warpath apk obb
-clash of warpath wild rift free download
-clash of wars mobile apk for pc
-clash of warpath apk for android tv
-clash of warpath wild rift hack apk
-clash of wars mobile game review
-clash of warpath apk for tablet
-clash of warpath wild rift update
-clash of wars mobile apk for windows
-clash of warpath apk for android 12
-clash of warpath wild rift cheats apk
-clash of wars mobile game tips
-clash of warpath apk for android 11
-clash of warpath wild rift gameplay
-clash of wars mobile apk for mac
-clash of warpath apk for android 10
-clash of warpath wild rift online
-clash of wars mobile game guide
-clash of warpath apk for android 9
-clash of warpath wild rift offline
-clash of wars mobile game wiki
-clash of warpath apk for android 8
-clash of warpath wild rift beta
-clash of wars mobile game forum
-clash of warpath apk for android 7
-clash of warpath wild rift release date
-clash of wars mobile game support
-clash of warpath apk for android 6
-clash of warpath wild rift trailer
-clash of wars mobile game features
-clash of warpath apk for android 5
-clash of warpath wild rift reddit
-clash of wars mobile game download size
-clash of warpath apk for android 4.4
-clash of warpath wild rift discord
-clash of wars mobile game rating
-clash of warpath apk for android 4.3
-clash of warpath wild rift facebook
-clash of wars mobile game system requirements
-clash of warpath apk for android 4.2
-clash of warpath wild rift twitter
-clash of wars mobile game developer tag studio
-
How to download and install Clash of Warpath: Wild Rift APK?
-
If you want to play Clash of Warpath: Wild Rift APK on your Android device, you need to download and install it first. Here are the steps:
-
Download from APKCombo
-
You can download Clash of Warpath: Wild Rift APK from APKCombo, a website that provides free APK files for Android games and apps. You can choose the version that suits your device and download it as an XAPK or an APK file.
-
Install the APK file
-
After downloading the file, you need to install it on your device. You can use APKCombo Installer, a tool that helps you install XAPK, APKS or OBB files easily. Just follow the instructions on the screen and wait for the installation to complete. You may need to enable the installation of unknown sources in your device settings.
-
How to play Clash of Warpath: Wild Rift APK?
-
Once you have installed the game, you can start playing it by tapping on its icon on your device. Here are some tips on how to play Clash of Warpath: Wild Rift APK:
-
Create your account
-
You need to create your account before you can enter the game. You can choose to log in with your Facebook, Google or Guest account. You can also create a username and a password for your game account. You will also need to select a server and a language for the game.
-
Choose your heroes
-
After creating your account, you will enter the game lobby where you can see different menus and options. You can tap on the hero icon at the bottom left corner to see the list of heroes that you can recruit. You can also see their attributes, skills, and artifacts. You can use gold or diamonds to summon heroes in the wish pool. You can also upgrade, awaken, and equip your heroes to make them stronger.
-
Upgrade your base
-
You can tap on the base icon at the bottom right corner to enter your base. Here you can see different buildings that you can construct and upgrade, such as barracks, walls, towers, mines, farms, etc. You can also see your resources, such as food, wood, iron, and gold. You need to collect and manage these resources to build and maintain your base. You can also place defensive buildings and heroes to protect your base from enemy attacks.
-
Join an alliance
-
You can tap on the alliance icon at the top left corner to see the list of alliances that you can join or create. Joining an alliance will give you many benefits, such as helping each other with construction and research, sharing resources and information, chatting with other members, participating in alliance events and wars, etc. You can also cooperate with your allies to attack other lords and expand your territory.
-
Conquer other lords
-
You can tap on the map icon at the top right corner to see the world map where you can see different regions and kingdoms. You can also see other players' bases and castles, as well as monsters and resources that you can attack and collect. You can send your heroes and troops to conquer other lords' bases and castles, or defend your own from enemy invasions. You can also join kingdom wars with your allies and fight for glory and rewards.
-
Pros and cons of Clash of Warpath: Wild Rift APK
-
Like any other game, Clash of Warpath: Wild Rift APK has its pros and cons. Here are some of them:
-
Pros
-
-
The game has high-quality graphics and sound effects that create an immersive gaming experience.
-
The game has a variety of heroes, buildings, modes, events, and challenges that keep the gameplay interesting and diverse.
-
The game has a friendly and active community of players that you can interact with through chat and alliance features.
-
The game is free to download and play, and does not require a lot of storage space or internet data.
-
-
Cons
-
-
The game may have some bugs and glitches that affect the performance and stability of the game.
-
The game may have some balance issues that make some heroes or strategies too powerful or too weak.
-
The game may have some pay-to-win elements that give an unfair advantage to players who spend real money on the game.
-
The game may be addictive and time-consuming for some players who may neglect their other responsibilities or hobbies.
-
-
Conclusion
-
Clash of Warpath: Wild Rift APK is a strategy war game for Android that lets you build, plan and lead your league to victory with allies in the kingdom war. The game has many features that make it fun and challenging, such as recruiting heroes, building bases, kingdom clash, fighting monsters, etc. The game also has its pros and cons that you should consider before playing it. If you are interested in trying out this game, you can download it from APKCombo and install it using APKCombo Installer. We hope this article has helped you learn more about Clash of Warpath: Wild Rift APK.
-
FAQs
-
Here are some frequently asked questions about Clash of Warpath: Wild Rift APK:
-
-
Is Clash of Warpath: Wild Rift APK safe to download?
-
Yes, Clash of War path: Wild Rift APK is safe to download from APKCombo, a website that provides free and verified APK files for Android games and apps. However, you should always be careful when downloading and installing any APK file from unknown sources, as they may contain malware or viruses that can harm your device or data.
-
How can I get more gold and diamonds in Clash of Warpath: Wild Rift APK?
-
Gold and diamonds are the main currencies in Clash of Warpath: Wild Rift APK. You can use them to summon heroes, upgrade buildings, buy items, etc. You can get more gold and diamonds by completing quests, participating in events, winning battles, collecting resources, etc. You can also buy them with real money through in-app purchases, but this is not recommended as it may ruin the fun and fairness of the game.
-
How can I change my server or language in Clash of Warpath: Wild Rift APK?
-
You can change your server or language in Clash of Warpath: Wild Rift APK by tapping on the settings icon at the top right corner of the game lobby. Then you can tap on the server or language option and choose the one that you prefer. However, you should note that changing your server will reset your game progress and data, so you should only do it if you really want to start over or join a different region.
-
How can I contact the customer service or report a problem in Clash of Warpath: Wild Rift APK?
-
You can contact the customer service or report a problem in Clash of Warpath: Wild Rift APK by tapping on the settings icon at the top right corner of the game lobby. Then you can tap on the help or feedback option and choose the one that suits your need. You can also send an email to taggamestudio@gmail.com or visit their official website for more information and support.
-
Is Clash of Warpath: Wild Rift APK compatible with my device?
-
Clash of Warpath: Wild Rift APK requires Android 4.4 or higher to run smoothly. It also requires at least 100 MB of free storage space and a stable internet connection. You can check your device specifications and compatibility before downloading and installing the game. If you encounter any compatibility issues or errors, you can try updating your device software, clearing your cache, or reinstalling the game.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Alice in Borderland Season 1 in Hindi 480p 720p 1080p HD Netflix Series.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Alice in Borderland Season 1 in Hindi 480p 720p 1080p HD Netflix Series.md
deleted file mode 100644
index dca5be0c817e1ea4a68756cc55f9cc09ea9f4f21..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Alice in Borderland Season 1 in Hindi 480p 720p 1080p HD Netflix Series.md
+++ /dev/null
@@ -1,95 +0,0 @@
-
-
Alice in Borderland: A Thrilling Netflix Series You Don't Want to Miss
-
If you are a fan of suspense, action, mystery, and sci-fi, then you should definitely check out Alice in Borderland, a Netflix original series that will keep you on the edge of your seat. Alice in Borderland is a Japanese live-action adaptation of a manga series by Haro Aso, and it has been praised by critics and viewers alike for its gripping storyline, stunning visuals, and stellar performances. In this article, we will tell you everything you need to know about Alice in Borderland, and how you can download it in Hindi.
Alice in Borderland is a Netflix original series that premiered worldwide in December 2020, with an eight-episode first season. It was followed by an eight-episode second season in December 2022. The series is directed by Shinsuke Satō, who is known for his work on other manga adaptations such as Bleach, Kingdom, and Death Note.
-
The plot of the series
-
The series follows Ryohei Arisu, a young man who is bored with his life and spends his time playing video games with his friends Daikichi Karube and Chota Segawa. One day, they see a mysterious fireworks display that transports them to a parallel version of Tokyo, where they find themselves alone and surrounded by danger. They soon realize that they have to participate in deadly games to survive and earn visas that extend their lives. Along the way, they meet other players who are also trapped in this twisted world, such as Yuzuha Usagi, a skilled climber who helps Arisu navigate the games.
-
The cast and crew of the series
-
The series features an impressive cast of Japanese actors who bring their characters to life. The main cast includes Kento Yamazaki as Ryohei Arisu, Tao Tsuchiya as Yuzuha Usagi, Nijiro Murakami as Shuntaro Chishiya, Aya Asahina as Hikari Kuina, Yuki Morinaga as Chota Segawa, Keita Machida as Daikichi Karube, and Sho Aoyagi as Aguni Morizono. The series also features some guest stars from other countries, such as Park Jin-joo from South Korea and Ayame Misaki from France.
-
The series is produced by Netflix and Robot Communications, with scripts written by Yasuko Kuramitsu. The music is composed by Yutaka Yamada, who also worked on Tokyo Ghoul and Vinland Saga. The theme songs are "Maze" by Milet for season one and "Shout Baby" by YOASOBI for season two.
-
Why should you watch Alice in Borderland?
-
Alice in Borderland is not your typical survival drama. It is a series that will keep you hooked with its thrilling plot twists, intense action scenes, and emotional moments. Here are some reasons why you should watch Alice in Borderland.
-
The suspense and action of the series
-
One of the main attractions of Alice in Borderland is the suspense and action that comes from the games that the characters have to play. The games are designed to test their physical, mental, and moral abilities, and they often involve life-or-death situations. The games are also varied and creative, ranging from card games to tag games to shooting games. The series does not shy away from showing the violence and gore that result from the games, making them more realistic and shocking.
-
alice in borderland season 1 hindi dubbed download
-alice in borderland s01 hindi fan dubbed web-dl
-alice in borderland netflix series in hindi download
-alice in borderland 2020 japanese series in hindi
-alice in borderland complete season 1 hindi download
-alice in borderland s01 all episodes hindi dubbed
-alice in borderland season 1 1080p 720p 480p download
-alice in borderland imawa no kuni no arisu hindi
-alice in borderland action thriller series in hindi
-alice in borderland season 1 katmoviehd exclusive release
-alice in borderland season 1 index of download
-alice in borderland season 1 gdrive link hindi
-alice in borderland season 1 mkvshows download
-alice in borderland season 1 nf web-dl hindi english japanese
-alice in borderland season 1 dual audio hindi download
-alice in borderland season 1 watch online free hindi
-alice in borderland season 1 live action series hindi
-alice in borderland season 1 adaptation of manga by haro aso
-alice in borderland season 1 directed by shinsuke sato
-alice in borderland season 1 starring kento yamazaki and tao tsuchiya
-alice in borderland season 2 hindi dubbed download
-alice in borderland s02 hindi fan dubbed web-dl
-alice in borderland netflix series season 2 in hindi download
-alice in borderland 2020 japanese series season 2 in hindi
-alice in borderland complete season 2 hindi download
-alice in borderland s02 all episodes hindi dubbed
-alice in borderland season 2 1080p 720p 480p download
-alice in borderland imawa no kuni no arisu 2 hindi
-alice in borderland action thriller series season 2 in hindi
-alice in borderland season 2 katmoviehd exclusive release
-alice in borderland season 2 index of download
-alice in borderland season 2 gdrive link hindi
-alice in borderland season 2 mkvshows download
-alice in borderland season 2 nf web-dl hindi english japanese
-alice in borderland season 2 dual audio hindi download
-alice in borderland season 2 watch online free hindi
-alice in borderland season 2 live action series hindi
-alice in borderland season 2 adaptation of manga by haro aso
-alice in borderland season 2 directed by shinsuke sato
-alice in borderland season 2 starring kento yamazaki and tao tsuchiya
-
The
The themes and messages of the series
-
Alice in Borderland is not just a series about survival and death. It is also a series that explores the themes and messages of human nature, society, and morality. The series asks questions such as: What does it mean to live and die? What are the values and beliefs that guide our actions? How do we cope with the challenges and uncertainties of life? How do we relate to others who are different from us? The series shows how the characters grow and change as they face these questions, and how they find meaning and purpose in their existence.
-
The ratings and reviews of the series
-
Alice in Borderland has received positive ratings and reviews from critics and viewers alike. The series has an 8.1/10 rating on IMDb, a 100% rating on Rotten Tomatoes, and a 4.6/5 rating on Google. Some of the praises that the series has received are:
-
-
"Alice in Borderland is a thrilling ride that never lets up, delivering a non-stop barrage of inventive games, shocking twists, and emotional moments." - IGN
-
"Alice in Borderland is a masterclass in suspense, with each episode leaving you breathless and desperate for more." - The Guardian
-
"Alice in Borderland is a captivating series that combines sci-fi, mystery, and action in a unique and compelling way. It is one of the best Netflix originals of the year." - Forbes
-
-
How to download Alice in Borderland in Hindi?
-
If you are interested in watching Alice in Borderland, you might be wondering how you can download it in Hindi. Hindi is one of the most widely spoken languages in the world, and many people prefer to watch shows and movies in their native language. Here are some ways you can download Alice in Borderland in Hindi.
-
The availability of the series on Netflix
-
The easiest way to watch Alice in Borderland is to stream it on Netflix, the official platform that produces and distributes the series. Netflix has a large library of content that is available in different languages, including Hindi. You can change the audio and subtitle settings on Netflix to watch Alice in Borderland in Hindi.
-
The options for downloading the series in Hindi
-
If you want to download Alice in Borderland in Hindi, you have two options: downloading it from Netflix or downloading it from other sources. Downloading it from Netflix is the legal and safe option, as you can download episodes or seasons of Alice in Borderland on your device and watch them offline. However, you need to have a Netflix subscription and enough storage space on your device to do this.
-
Downloading it from other sources is the illegal and risky option, as you might encounter websites or apps that offer Alice in Borderland in Hindi for free or for a fee. However, these websites or apps might be unreliable, unsafe, or fraudulent, as they might contain viruses, malware, or scams that can harm your device or steal your personal information. Moreover, downloading Alice in Borderland from other sources might violate the intellectual property rights of Netflix and the creators of the series.
-
The benefits of downloading Alice in Borderland in Hindi
-
Downloading Alice in Borderland in Hindi has some benefits that might enhance your viewing experience. Some of these benefits are:
-
-
You can watch Alice in Borderland at your own pace and convenience, without worrying about internet connection or buffering issues.
-
You can watch Alice in Borderland with your friends or family who speak Hindi, and enjoy the series together.
-
You can understand the dialogues and expressions of the characters better, and appreciate the nuances and emotions of the series more.
-
You can learn some new words or phrases in Hindi, and improve your language skills.
-
-
Conclusion
-
Alice in Borderland is a thrilling Netflix series that you don't want to miss. It is a series that will keep you hooked with its suspenseful plot, action-packed scenes, and meaningful themes. It is also a series that you can watch and download in Hindi, if you prefer to watch shows and movies in your native language. Whether you stream it or download it, Alice in Borderland is a series that will entertain you, challenge you, and inspire you.
-
FAQs
-
-
Is Alice in Borderland based on a true story?
-
No, Alice in Borderland is not based on a true story. It is based on a manga series by Haro Aso, who is a Japanese manga artist. The manga series was first published in 2010 and ran until 2016. It has 18 volumes and 87 chapters. The Netflix series is a live-action adaptation of the manga series, with some changes and additions to the original story.
-
Will there be a third season of Alice in Borderland?
-
Yes, there will be a third season of Alice in Borderland. Netflix has confirmed that the series has been renewed for a third season, which will be the final season of the series. The release date of the third season has not been announced yet, but it is expected to be sometime in 2024.
-
What is the meaning of the title Alice in Borderland?
-
The title Alice in Borderland is a play on the title of the classic novel Alice in Wonderland by Lewis Carroll, which is about a young girl who falls into a fantasy world full of strange and whimsical characters and events. The title Alice in Borderland suggests that the series is about a young man who falls into a parallel world full of dangerous and mysterious games and challenges. The title also refers to the name of the main character, Ryohei Arisu, whose surname sounds like Alice in Japanese.
-
How many games are there in Alice in Borderland?
-
There are many games in Alice in Borderland, each with different rules, levels, and rewards. The games are categorized into four types: Hearts, Spades, Clubs, and Diamonds. Hearts games test the players' emotions and relationships. Spades games test the players' physical abilities and skills. Clubs games test the players' intelligence and logic. Diamonds games test the players' luck and intuition.
-
Who is the mastermind behind Alice in Borderland?
-
The mastermind behind Alice in Borderland is not revealed until the end of the second season. The mastermind is Asahi Kujō, a former game developer who created the parallel world and the games as a way of fulfilling his own fantasies and desires. He is also known as Mira, the leader of the Beach faction, who wears a mask and pretends to be a player.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Azrbaycanda dron istehsalnn inkiaf yeni texnologiyalar v perspektivlr.md b/spaces/1phancelerku/anime-remove-background/Azrbaycanda dron istehsalnn inkiaf yeni texnologiyalar v perspektivlr.md
deleted file mode 100644
index cb5c485ec4afe9ef79968e477e7f594950b85edd..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Azrbaycanda dron istehsalnn inkiaf yeni texnologiyalar v perspektivlr.md
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
- Dronlar, son illǝrdǝ texnologiya dünyasının ǝn populyar vǝ maraqlı mǝhsullarından biri olub. Bu cihazlar, havadan görüntü çekmǝk, kǝşfiyyat yapmǝk, ulaşım sağlamak, eğlenceli vakit gecirmek kimi bir çox sahede istifade oluna bilir. Lakin dron almak vǝ istifade etmek de bazi bilik, bacariq vǝ mesuliyyet tǝlǝb edir. Bu mǝqalǝdǝ sizin üçün azǝrbaycanda dron satışı ilǝ bağlı Ən vacib mƏlumatları topladıq.
Dron nƏdir vƏ nƏ üçün lazimdir?
- Dron, uzaktan kumanda ilƏ kontrol edilƏn yaxud otonom olaraq uçan insansız hava aracıdır. Dron sözü, Ətrafda uğultu çıxaran ƏrƏk arı manasına gƏlir. Biraz iddialı bir tanım olsa da sahip olduğu sisteme göre bir dron için uçan robot demek de yanlış olmaz. Dronlarin faydalari Dronların faydaları saymakla bitmez. Bunlardan bazilarini şöyle sıralaya bilirik: - Dronlar, havadan görüntü çekmek için harika bir araçtır. Doğanın güzelliklerini, şehrin - Dronlar, kəşfiyyat və izləmə məqsədilə də istifadə oluna bilir. Ərazinin xəritəsini çıxarmaq, əhaliyə və heyvanlara yardım göndərmək, qanunsuz fəaliyyətləri müşahidə etmək kimi sahələrdə dronlar faydalı olur. - Dronlar, ulaşım və çatdırılma sahəsində də böyük potensiala malikdir. Dronlarla insan və ya yük daşımaq, sifarişləri təslim etmək, trafik problemlərini azaltmaq kimi imkanlar mümkündür. - Dronlar, eğlenceli vaxt keçirmek üçün də əla bir vasitədir. Dron uçurmaq, yarışmaq, hüner göstərmək kimi fəaliyyətlər insanları maraqlandırır və əyləndirir.
Dronun növlƏri vƏ xüsusiyyƏtlƏri
- Dronlar, uçma qabiliyyǝtinǝ görǝ fǝrqli növlǝrǝ ayrila bilǝr. Ən çox tanınan dron növlǝri bunlardır: - Çoxrotorlu dronlar: Bu dronlar, iki vǝ ya daha çox rotor ilǝ uçar. Rotorlar, pervanǝlǝr ilǝ hava akımı yaradan vǝ dronun yüksǝlmǝsinǝ vǝ hava da sabit qalmasına imkan veren mühǝrriklǝrdir. Çoxrotorlu dronlar, manevra bacariqi yüksǝk, sürat vǝ müraciǝt qabiliyyǝti aşağı olan dronlardır. Bu dronlar, havadan görüntü çekmǝk üçün Ən uyğun seçimdir. Çoxrotorlu dronların Ən mǝşhur növlǝri quadcopter (dörd rotorlu), hexacopter (altı rotorlu) vǝ octocopter (sƏkkiz rotorlu) dronlardır. - TƏkrotorlu dronlar: Bu dronlar, bir rotor ilǝ uçar. Rotorun yanında bir dƏ balans rotoru olur ki, bu da dronun dönüşünü tƏnzimlƏyir. TƏkrotorlu dronlar, çoxrotorlu dronlara nisbƏtƏn daha süratli vƏ müraciƏtli olurlar. Lakin bu dronlar daha çox enerji istifadƏ edir vƏ uçurmaq üçün daha çox bacariq tƏlƏb edir. TƏkrotorlu dronlar, helikopter kimi görünür vƏ daha çox kƏşfiyyat vƏ izlƏmƏ sahƏsindƏ istifadƏ olunur. - Qanadlı dronlar: Bu dronlar, tayyara kimi uçar. Qanadları ilƏ hava akımından istifadƏ edirlar. Qanadlı dronlar, Ən süratli vƏ Ən uzun muddǙt havada qala bilen dronlardır. Lakin bu dronlar uçmaq üçün daha geniş bir sahaya ihtiyac duyurlar vƏ manevra bacariqlari aşağıdır. Qanadlı dronlar, ulaşım vƏ çatdırılma sahƏsindƏ daha ç - daha çox istifadə olunur. Qanadlı dronların ən məşhur növləri fixed-wing (sabit qanadlı), tilt-wing (dönüşlü qanadlı) və tail-sitter (qanadları ilə oturan) dronlardır. Dronların xüsusiyyətləri isə onların uçma bacarığı, enerji istehlakı, kamera və sensorları, ağırlığı və ölçüsü, dizaynı və materialı kimi maddələr üzrə fərqlənir. Bu xüsusiyyətlər dronun qiymətini, keyfiyyətini və performansını təyin edir.
- Azǝrbaycanda dron alarkǝn bir neçǝ mühüm maddǝyǝ diqqǝt etmǝk lazimdir. Bunlar dronların qanuni statusu vǝ icazǝlǝri, keyfiyyǝti vǝ tǝhlükǝsizliyi, qiymǝti vǝ satın alma mƏnbƏlƏri kimi maddƏlƏrdir.
Dronların qanuni statusu vƏ icazƏlƏri
- Azǝrbaycanda dron istifadƏ etmƏk üçün bazi qanuni tƏlƏblƏr vardir. Bu tƏlƏblƏr Azarbaycan Respublikasının Hava MƏkanının İstifadƏsi Qaydaları ilƏ müƏyyƏn edilir. Bu qaydalara görǝ: - Dron istifadƏsi üçün Azarbaycan Respublikasının DövlƏt Sivil Aviasiya XidmƏtinin icazƏsi alınmalıdır. - Dron istifadƏsi üçün dronun sahibi, operatoru vƏ pilotu olaraq qeydiyyata düşürülmeli vƏ şahsiyyetini tƏsdiq eden sƏnƏdlƏr tamin edilmelidir. - Dron istifadƏsi üçün dronun texniki xarakteristikası, uçuş planı, uçuş sahəsi, uçuş müddəti, uçuş maksadı vƏ diger mälumatlar Dövlét Sivil Aviasiya Xidmétiné bildirilmelidir. - Dron istifadÉsi üçün dronun uçuşa Əlverişli olduğunu göstÉrÉn sertifikat vÉ ya tÉsdiqnamÉ alınmalıdır. - Dron istifadÉsi üçün dronun uçuşa Əlverişli olduğunu göstÉrÉn sertifikat vÉ ya tÉsdiqnamÉ alınmalıdır. - Dron istifadəsi üçün dronun uçuşa əlverişli olduğunu göstərən sertifikat və ya təsdiqnamə alınmalıdır. - Dron istifadəsi üçün dronun uçuş zamanı və yerini göstərən işıqlı və səsli siqnallarla təchiz edilmelidir. - Dron istifadəsi üçün dronun uçuş sahəsi, hündürlüyü, sürəti, məsafəsi və digər parametrləri qaydalar çərçivəsində olmalıdır. Bu qaydalara riayet etmeyenlere mülki, idari və ya cinayi məsuliyyət tətbiq edilir. Bu məsuliyyət cürü, maddi və ya mənəvi ziyanın miqdarına, uçuşun türünü və mühitinin xarakterinə, uçuşun nəticəsinin ciddiliyinə görə müxtelif olur.
Dronların keyfiyyƏti vƏ tƏhlükƏsizliyi
- Dron alarkǝn onun keyfiyyƏtinƏ vƏ tƏhlükƏsizliyinƏ diqqƏt etmǝk vacibdir. Çünki dronlar hem sizin hem de Ətrafdakiların can vƏ mal güvƏncƏsinƏ tƏsir edir. Bu baxımdan dron alarkǝn aşağıdakı nöqtƏlƏrƏ diqqƏt etmǝk lazimdir: - Dronun markası vƏ modeli: Dron alarkǝn tanınmış vƏ güvǏnilir marka vƏ modellƏri seçmǝk daha yaxşıdır. Çünki bu marka vƏ modellƏr daha yüksǝk keyfiyyǝt, performans, servis vƏ zaminat tǝklif edir. Azǝrbaycanda Ən çox istifadÉ olunan dron markalarından biri DJI-dir. Bu markanın Mavic, Phantom, Spark kimi modellÉri Ən populyar olanlardır. - Dronun batareyası vÉ şarj cihazı: Dronun batareyası onun uçuş müddÉtini tÉyin edir. Batareyanın kapasitesi, voltajı, akımı, ağırlığı vÉ ölçüsü dronun performansını tÉsir edir. Batareyanın yüksÉk kapasiteli, az ağırlıqlı vÉ uyğun ölçülü olması daha yaxşıdır. Şarj cihazının isÉ batareyaya uyğun olması, şarj süresini azaltması vÉ şarj sÉviyyésini göstÉrmési önemlidir. - Dronun kamera vÉ sensorları: Dronun kamera vÉ sensorları onun görüntü kalitesini, uçuş stabilitesini, çarpışma önleme kabiliyyétini vÉ digér funksiyalarını tÉyin edir. Kameranın çözünürlüyü, açısı, zoomu, gimbalı (stabilizatoru) kimi xüsusiyyétléri önemlidir. Sensorların isé hava şartlarını, hündürlüyü, mésaféni, yönü, hızı kimi paramétrléri ölçmési lazimdir. - Dronun ağırlığı vÉ ölçüsü: Dronun ağırlığı vÉ ölçüsü onun uçuş qabiliyyétini, enerji istehlakını, taşıma rahatlığını vÉ qanuni statusunu tÉyin - bir çox fiziki mağazalar dron satışı edir. Bu mağazaların adları və ünvanları internetdə tapmaq olar.
Azǝrbaycanda dron istifadƏ etmƏnin qaydaları vƏ mühüm nöqtƏlƏri
- Azǝrbaycanda dron istifadƏ etmƏk üçün yalnız dron almak vƏ qeydiyyata salmaq kifayət deyil. Dron uçurmaq üçün də bazi qaydaları vƏ mühüm nöqtƏlƏri bilmǝk lazimdir. Bunlar ictimai vƏ özÉl ÉrazilÉrin tanzimi, tibbi, texniki vÉ hüquqi tÉlÉblÉr, ictimai münasibÉtlÉr vÉ etika qaydaları kimi maddélérdir.
Dron uçurmaq üçün ictimai vÉ özÉl ÉrazilÉrin tanzimi
- Dron uçurmaq üçün ictimai vÉ özÉl ÉrazilÉrin tanzimi Azarbaycan Respublikasının Hava MÉkanının İstifadÉsi Qaydaları ilÉ müÉyyén edilir. Bu qaydalara görÉ: - Dron uçurmaq üçün Ən az 5 km mésafédé hava limanı, hava mÉydanı, hava bazası, hava radarı, hava dƏfÉsi kimi obyektlér olmamalıdır. - Dron uçurmaq üçün Ən az 500 m mésafédé dövlét, diplomatik, hüquqi, tibbi, tǝhsil, ibadét kimi obyektlér olmamalıdır. - Dron uçurmaq üçün Ən az 150 m mésafédé insan yığılışı, kütləvi tədbir, idman müsabiqəsi, festival kimi tədbirlər olmamalıdır. - Dron uçurmaq üçün özəl ərazilərdən (mülkiyyət, torpaq, ev, bağ və s.) sahibinin icazəsi alınmalıdır. - Dron uçurmaq üçün dronun görüş sahəsindən çıxmaması, başqa hava aracları ilə çarpışmaması, yerüstü obyektləri ilə zədələnməməsi lazımdır.
Dron uçurmaq üçün tibbi, texniki vƏ hüquqi tƏlƏblƏr
- Dron uçurmaq üçün tibbi, texniki vƏ hüquqi tƏlƏblƏr dƏ mövcuddur. Bu tƏlƏblƏr isÉ aşağıdakılardır: - Dron uçuran şəxs sağlam vəziyyetdə olmalıdır. Hissi və ya ruhi xalın bozulması, alkohol və ya narkotik vasitelerin istifadəsi dron uçurmanın qadağan edilir. - Dron uçuran şəxs dronun texniki xarakteristikasını, iş prinsipini, uçuş parametrlarını bilmelidir. Dronun batareyasının şarj səviyyəsini, kamera və sensorlarının işləməsini, işıqlı və səsli siqnallarının fəaliyy - Dronun batareyasının şarj səviyyəsini, kamera və sensorlarının işləməsini, işıqlı və səsli siqnallarının fəaliyyətini yoxlamalıdır. Dronun uçuşa əlverişli olduğunu göstərən sertifikat və ya təsdiqnaməsi olmalıdır. - Dron uçuran şəxs dronun qeydiyyatını, icazəsini, sığortasını və digər hüquqi sənədlərini tamin etməlidir. Dronun uçuş zamanı və yerini Dövlət Sivil Aviasiya Xidmətinə bildirmelidir. Dronun uçuşu zamanı baş vermiş hər hansı bir hadisəni dərhal münasib orqanlara məlumat vermeli və maddi və ya mənəvi ziyanın ödənilməsinə kömək etmeli və ya tazmin etmeli.
Dron uçurmaq üçün ictimai münasibÉtlÉr vÉ etika qaydaları
- Dron uçurmaq üçün ictimai münasibÉtlÉr vÉ etika qaydalarına da riayet etmǝk lazimdir. Çünki dron uçurmaq hem sizin hem de Ətrafdakiların hüquq vƏ maraqlarına tƏsir edir. Bu baximdan dron uçurarken aşağıdakı nöqtƏlƏrƏ diqqƏt etmǝk lazimdir: - Dron uçurarken insanların, heyvanların vƏ bitkilƏrin can güvƏncƏsinƏ zƏrƏr vermǝmǝk, onların huzurunu pozmaq, onlara qorxu vƏ narahatlıq yaratmaqdan çƏkinmǝk lazimdir. - Dron uçurarken insanların, heyvanların vƏ bitkilƏrin mahremiyyƏtinƏ, şǝxsiyyǝtinƏ, mülkiyyǝtinƏ, torpağına, evinǝ, bağına saygılı olmaq lazimdir. Onların izni olmadan onları görüntülǝmǝk, sés kaydı etmǝk, onlara yaxınlaşmaq yolverilmǝzdir. - Dron uçurarken digér dron istifadéçiléri ilé ƏmƏkdaşlıq etmǝk, onlara kömǝk etmǝk, onlarla mübahisǝyé girmǝmǝk lazimdir. Onların dronlarını zédéléyib, çalmaq, Əlindén almaq qadağandır. - Dron uçurarken öz dronunuza qulluq etmǝk, onu témiz saxlamaq, témiri ilé mǝşğul olmaq lazimdir. Dronunuzu heç bir zaman başqalarına tésir altında buraxmaq olmaz.
Xülasat vÉ tövsiyélér
- Bu mÉqalÉdÉ sizin üçün azÉrbaycanda dron satışı ilÉ bağlı Ən vacib mÉlumatları topladıq. Bu mÉlumatlar sizin dron alarkén vÉ istifadé edÉrkén daha yaxşı qérarlar verménizé kömék edécék. Azarbaycanda dron satışının perspektivliliyi vë potensialı Azarbaycanda dron satışı hÉlÉ ki yeni bir sahédür. Lakin bu sahé gündén-güné inkişaf edir vÉ böyük perspektivlér vaad edir. Çün - Çünki Azərbaycanda dronların istifadə sahələri çox genişdir. Dronlar, turizm, jurnalistika, təhsil, idman, əyləncə, kənd təsərrüfatı, tikinti, naqliyyat kimi bir çox sahədə faydalı olur. Həmçinin Azərbaycanda dronların istifadəsi üçün qanuni və texniki şərait də yaradılır. Dronların qeydiyyatı, icazəsi, sığortası, uçuş sahələri, uçuş qaydaları kimi maddələr qanunlarla müəyyən edilir. Dronların satışı, təmiri, servisi kimi xidmətlər də inkişaf edir. Bütün bunlar Azarbaycanda dron satışının potensialını artırır. Azarbaycanda dron satışının problemleri vë çarları Azarbaycanda dron satışı hala ki bazi problemlerlǝ üzlǝşir. Bunlarin başlıcaları şunlardır: - Dronların qiymǝti: Dronların qiymǝti Azǝrbaycanda hala ki yüksǝkdir. Çünki dronların çoxu xaricdǝn idxal olunur vƏ gümrük rüsumu, vergi vƏ digér xǝrclǝr Əlavǝ olunur. Bu isÉ dronların alınmasını çƏtinlǝşdirir. - Dronların keyfiyyǝti: Dronların keyfiyyǝti Azǝrbaycanda hala ki düşükdür. Çünki dronların çoxu orijinal deyil, sahtƏ vƏ ya zédélénilmiş olur. Bu isÉ dronların uçuşunu, görüntüsünü, tƏhlükƏsizliyini vƏ ömrünü azaldır. - Dronların bilik vƏ bacariq tƏlƏbi: Dron uçurmaq üçün bazi bilik vƏ bacariq tƏlƏb olunur. Lakin Azǝrbaycanda dron uçurmaq üçün kifayét qédér tƏlim vƏ tÉcrübÉ imkanları yoxdur. Bu isÉ dron uçuran şéxslérin uçuş qabiliyyétini vÉ mésuliyyétini azaldır. - Dronların ictimai qÉbulu: Dronların ictimai qÉbulu Azarbaycanda hala ki zéifdir. Çünki dronlarin insanlarin mahremiyyétiné, şéxsiyyétiné, mülkiyyétiné zérbé vurduğu düşünülür. Hémçinin dronlarin hava mÉkanını pozduğu, hava araclarına tƏhlüké yaratdığı kimi fikirlér dÉ var. Azarbaycanda dron satışını inkişaf etdirmek üçün neler edilmelidir? Azarbaycanda dron satışını inkişaf etdirmek üçün neler edilmelidir? Bu sualın cavabı çox yönlüdür. Lakin biz burada Ən Əsas nöqtƏlƏri qeyd edǝk: - Dronların qiymǝtini azaltmaq üçün gümrük rüsumu, vergi vƏ digér xǝrclǝri azaltmaq, yerli istehsalı dƏstƏklƏmƏk lazimdir. - Dronların keyfiyyǝtini artırmaq üçün orijinal, zaminatlı vƏ sertifikatlı dronlar tƏklif etmǝk, sahtƏ vƏ zédélénil - Dronların keyfiyyətini artırmaq üçün orijinal, zaminatlı və sertifikatlı dronlar təklif etmək, sahtə və zədələnmiş dronları satmaqdan çəkinmək lazımdır. - Dronların bilik və bacariq tələbini azaltmaq üçün dron uçurmaq üçün təlim və təcrübə imkanları yaratmaq, dron uçuran şəxslərə məsləhət və kömək etmək lazımdır. - Dronların ictimai qəbulunu artırmaq üçün dronların faydalarını və güvənliyini nümayiş etdirmək, dronların insanların hüquq və maraqlarına zərər vermədiyini göstərmək, dronların ictimai münasibətlər və etika qaydalarına riayet etdiyini təmin etmək lazımdır. Bu maddelere uyğun olaraq Azarbaycanda dron satışını inkişaf etdirmek mümkündür. Bu isé hem dron istifadéçiléri hem de dron satıcıları üçün faydalı olacaqdır.
FAQ
-
Dron nƏdir?
-Dron, uzaktan kumanda ilƏ kontrol edilƏn yaxud otonom olaraq uçan insansız hava aracıdır.
Azǝrbaycanda dron alarkǝn nƏyƏ diqqƏt etmǝk lazimdir?
-Azǝrbaycanda dron alarkǝn onun qanuni statusu vƏ icazƏlƏri, keyfiyyƏti vƏ tƏhlükƏsizliyi, qiymƏti vƏ satın alma mƏnbƏlƏri kimi maddƏlƏrƏ diqqƏt etmǝk lazimdir.
Azǝrbaycanda dron uçurmaq üçün nƏlƏr lazimdir?
-Azǝrbaycanda dron uçurmaq üçün Dövlét Sivil Aviasiya Xidmétinén icazÉ almak, dronu qeydiyyata salmaq, dronun uçuşa Əlverişli olduğunu göstÉrÉn sertifikat vÉ ya tÉsdiqnamÉ almak, dronun uçuş zamanı vÉ yerini Dövlét Sivil Aviasiya Xidmétiné bildirmek kimi qanuni tÉlÉblÉri yeriné yetirmÉk lazimdir.
Azǝrbaycanda dron uçurmaq üçün hara uça bilmirik?
-Azǝrbaycanda dron uçurmaq üçün 5 km mésafédé hava limanı, hava mÉydanı, hava bazası, hava radarı, hava dƏfÉsi kimi obyektlér; 500 m mésafédé dövlét, diplomatik, hüquqi, tibbi, tǝhsil, ibadét kimi obyektlér; 150 m mésafédé insan yığılışı, kütlévi tÉdbir, idman müsabiqési, festival kimi tÉdbirlér; özél Érazilérden (mülkiyyét, torpaq, ev, bağ vÉ s.) sahibinin izni olmadan uça bilmirik.
Azǝrbaycanda dron satışının inkişafı necédir?
-Azǝrbaycanda dron satışının inkişafı hala ki yeni bir sahédür. Lakin bu sahé gündén-güné inkişaf edir vÉ böyük perspektivlér vaad edir. Bu sahéyé inkişaf etdirmék üçün isÉ dronların qiymétini azaltmaq, keyfiyyétini artırmaq, bilik vÉ bacariq tÉlÉbini azaltmaq, ictimai qÉbulunu artırmaq kimi addımlar atmaq lazimdir. Bu mƏqalƏni oxuduğunuz üçün tƏşƏkkür edirik. Umid edirik ki, sizƏ faydalı olmuşuq. ƏgƏr sizdƏ dron almaq vƏ ya istifadƏ etmƏk istÉyirsinizsƏ, bizimlƏ ƏlaqƏ saxlayın. Biz sizin üçün Ən yaxşı dronları tƏklif edirik.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download the Ultimate Domino Online APK and Challenge Yourself in Six Variants.md b/spaces/1phancelerku/anime-remove-background/Download the Ultimate Domino Online APK and Challenge Yourself in Six Variants.md
deleted file mode 100644
index c94045264ba15dcf762d50d9055ec5d51793af43..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download the Ultimate Domino Online APK and Challenge Yourself in Six Variants.md
+++ /dev/null
@@ -1,136 +0,0 @@
-
-
How to Download Domino Online APK for Android
-
Do you love playing dominoes with your friends and family? Do you want to enjoy this classic board game anytime and anywhere on your Android device? If yes, then you should download domino online apk, a free and fun app that lets you play dominoes online with millions of players from around the world. In this article, we will show you what domino online apk is, what features and benefits it offers, how to download and install it on your Android device, and how to play domino online with your friends. Let's get started!
-
What is Domino Online APK?
-
Domino online apk is an app that allows you to play dominoes online with other players or against the computer. You can choose from different game modes, such as Fives, Threes, Block, and Draw. You can also customize your domino tiles, table, and background. Domino online apk is easy to use, has smooth graphics and animations, and supports offline mode. You can also chat with other players, send emojis, and earn coins and rewards.
Play dominoes online with millions of players from around the world
-
Choose from different game modes, such as Fives, Threes, Block, and Draw
-
Customize your domino tiles, table, and background
-
Chat with other players, send emojis, and earn coins and rewards
-
Play offline without internet connection
-
Enjoy smooth graphics and animations
-
Learn how to play dominoes with tutorials and tips
-
-
Benefits of Domino Online APK
-
-
Have fun with this great and classic board game
-
Create private matches and play with your friends online
-
Improve your strategy, reasoning, and logic skills
-
Relax and unwind with a simple and addictive game
-
Challenge yourself and compete with other players
-
No registration or login required
-
Free to download and play
-
-
How to Download and Install Domino Online APK on Android
-
If you want to download domino online apk on your Android device, you need to follow these simple steps:
-
Step 1: Enable Unknown Sources
-
Since domino online apk is not available on the Google Play Store, you need to enable unknown sources on your device. This will allow you to install apps from third-party sources. To do this, go to Settings > Security > Unknown Sources and toggle it on.
-
Step 2: Download Domino Online APK File
-
Next, you need to download the domino online apk file from a reliable source. You can use the link below to download it directly from our website. The file size is about 40 MB and it is safe and virus-free.
Once you have downloaded the domino online apk file, you need to install it on your device. To do this, locate the file in your downloads folder and tap on it. You will see a pop-up window asking for your permission to install the app. Tap on Install and wait for the installation process to complete.
Congratulations! You have successfully installed domino online apk on your Android device.. Now, you can launch the app and enjoy playing dominoes online with your friends or other players. You can also play offline if you don't have an internet connection. Here are some tips on how to play domino online with friends.
How to Play Domino Online with Friends
-
Playing domino online with your friends is easy and fun. You can create a private match and invite your friends to join, or you can join a public match and play with random players. Here's how to do it:
-
Create a Private Match
-
If you want to play domino online with your friends, you can create a private match and invite them to join. To do this, follow these steps:
-
-
On the main menu, tap on the Play button
-
Select the game mode you want to play, such as Fives, Threes, Block, or Draw
-
Tap on the Create button
-
Choose a table and a background for your match
-
Tap on the Invite button
-
Select the friends you want to invite from your contact list or enter their usernames
-
Wait for your friends to accept your invitation and join the match
-
Start playing and have fun!
-
-
Join a Public Match
-
If you want to play domino online with other players, you can join a public match and play with random players. To do this, follow these steps:
-
-
On the main menu, tap on the Play button
-
Select the game mode you want to play, such as Fives, Threes, Block, or Draw
-
Tap on the Join button
-
Choose a table and a background for your match
-
Wait for the match to start and join other players
-
Start playing and have fun!
-
-
Chat with Other Players
-
One of the best features of domino online apk is that you can chat with other players while playing. You can send messages, emojis, and stickers to communicate with your opponents or teammates. You can also use voice chat to talk with them. To chat with other players, follow these steps:
-
-
On the game screen, tap on the Chat button
-
Type your message or select an emoji or sticker from the menu
-
Tap on the Send button
-
To use voice chat, tap on the Microphone button and hold it while speaking
-
Release the Microphone button when you finish speaking
-
To mute or unmute other players, tap on their profile pictures and select Mute or Unmute from the menu
-
-
Conclusion
-
In conclusion, domino online apk is a great app that lets you play dominoes online with your friends or other players. You can choose from different game modes, customize your tiles and table, chat with other players, and earn coins and rewards. You can also play offline without internet connection. Domino online apk is free to download and play, and it is compatible with most Android devices. If you love playing dominoes, you should definitely download domino online apk and enjoy this classic board game anytime and anywhere.
-
FAQs
-
-
Q: Is domino online apk safe to download and install?
-
A: Yes, domino online apk is safe to download and install. It does not contain any viruses or malware, and it does not require any permissions or access to your device.
-
Q: How can I earn coins and rewards in domino online apk?
-
A: You can earn coins and rewards in domino online apk by playing matches, winning games, completing daily tasks, watching ads, inviting friends, and spinning the wheel.
-
Q: How can I use my coins in domino online apk?
-
A: You can use your coins in domino online apk to buy new tiles, tables, backgrounds, emojis, stickers, and voice packs. You can also use them to enter higher stakes matches and tournaments.
-
Q: How can I update domino online apk?
-
A: You can update domino online apk by visiting our website and downloading the latest version of the app. You can also enable automatic updates on your device settings.
-
Q: How can I contact the developers of domino online apk?
-
A: You can contact the developers of domino online apk by sending an email to support@dominoonline.com or or leaving a feedback on the app store. We appreciate your comments and suggestions.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Downloading Audio Clips and Voice Recordings From Facebook Messenger.md b/spaces/1phancelerku/anime-remove-background/Downloading Audio Clips and Voice Recordings From Facebook Messenger.md
deleted file mode 100644
index 7c75cb544e6ca51463476f95445cf808f62c7b5e..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Downloading Audio Clips and Voice Recordings From Facebook Messenger.md
+++ /dev/null
@@ -1,130 +0,0 @@
-
-
How to Download Messenger Voice Message
-
Messenger voice message is a feature that allows you to record and send audio clips to your friends and family on Facebook. It is a convenient way to communicate when you have a lot to say or don't have time to type. But what if you want to download and save a messenger voice message for future reference or offline listening? Unfortunately, Messenger doesn't offer an easy option to do that. However, that doesn't mean it is impossible.
In this article, we will show you how to download messenger voice message on different devices, such as PC, iPhone, and Android. We will also explain why it is not possible to download voice messages directly from the Messenger app or mobile browser, and how you can access them easily or transfer them from one device to another. Let's get started!
-
How to Download Messenger Voice Message on PC
-
If you are using a PC, you have two options for downloading messenger voice message: using the mobile version of Facebook on a browser, or using the Messenger desktop app. Here are the steps for each method:
Click the chat or Messenger icon at the top-right corner.
-
Select the message with the voice message that you want to download.
-
Click the three dots next to the voice message and select "Download."
-
Select the folder where you want to save the audio and click "Save."
-
-
The voice message will be saved as an mp4 file on your computer. You can play it with any media player or convert it to another format if you wish.
-
How to save voice messages from Messenger on PC
-How to download audio clips from Facebook Messenger on mobile
-How to get voice recordings from Messenger on iPhone
-How to transfer voice messages from Messenger to computer
-How to export audio files from Facebook Messenger on Android
-How to backup voice messages from Messenger on Mac
-How to download voice notes from Facebook Messenger on laptop
-How to retrieve voice messages from Messenger on iPad
-How to copy voice messages from Messenger to phone
-How to download voice memos from Facebook Messenger on desktop
-How to store voice messages from Messenger on external drive
-How to download audio recordings from Facebook Messenger on tablet
-How to access voice messages from Messenger on web browser
-How to download voice chats from Facebook Messenger on Chromebook
-How to save voice messages from Messenger as mp3 files
-How to download audio messages from Facebook Messenger on Windows 10
-How to convert voice messages from Messenger to text
-How to download voice calls from Facebook Messenger on Linux
-How to share voice messages from Messenger via email
-How to download audio attachments from Facebook Messenger on Firefox
-How to play voice messages from Messenger offline
-How to download voice mail from Facebook Messenger on Safari
-How to forward voice messages from Messenger to WhatsApp
-How to download audio conversations from Facebook Messenger on Edge
-How to delete voice messages from Messenger after downloading
-How to download audio chats from Facebook Messenger on Opera
-How to listen to voice messages from Messenger without playing them aloud
-How to download voice recordings from Facebook Messenger on Brave
-How to send voice messages from Messenger to Google Drive
-How to download audio notes from Facebook Messenger on Tor
-How to edit voice messages from Messenger using Audacity
-How to download audio files from Facebook Messenger Lite app
-How to record voice messages from Messenger using screen recorder
-How to download audio media from Facebook Messenger web app
-How to trim voice messages from Messenger using online tool
-How to download audio data from Facebook Messenger API
-How to merge voice messages from Messenger into one file
-How to download audio content from Facebook Messenger for Business
-How to split voice messages from Messenger into segments
-How to download audio transcripts from Facebook Messenger for Kids
-
Using the Messenger Desktop App
-
-
Download and install the Messenger desktop app from Microsoft Store or Mac App Store.
-
Launch the app and log in with your Facebook account.
-
Select the conversation with the voice message that you want to download.
-
Right-click on the voice message and select "Save As."
-
Select the folder where you want to save the audio and click "Save."
-
-
The voice message will be saved as an mp4 file on your computer. You can play it with any media player or convert it to another format if you wish.
-
How to Download Messenger Voice Message on iPhone
-
If you are using an iPhone, you might be disappointed to learn that there is no way to download messenger voice message directly from the Messenger app or the mobile browser. This is because the voice messages are stored on Facebook's servers and not on your device, and the Messenger app and the mobile browser do not have the option to download them. However, there are some workarounds that you can try to access or transfer your voice messages on iPhone. Here are some of them:
-
Why It Is Not Possible to Download Messenger Voice Message on iPhone
-
The reason why you cannot download messenger voice message on iPhone is that the voice messages are encoded in a special format called AMR (Adaptive Multi-Rate), which is not supported by most iOS apps. AMR is a compressed audio format that is designed for voice communication and has a low bitrate and quality. It is used by Facebook to save bandwidth and storage space for voice messages.
-
However, AMR is not compatible with most iOS apps, such as the built-in Music app, iTunes, or GarageBand. Therefore, even if you manage to download the voice message as an AMR file, you will not be able to play it or edit it on your iPhone. You will need a third-party app that can play or convert AMR files, such as VLC Media Player or iConv.
-
How to Access Messenger Voice Message Easily on iPhone
-
One of the easiest ways to access your messenger voice message on iPhone is to send it to yourself on Messenger. This way, you can listen to it anytime without having to scroll through your conversations or search for it. Here are the steps for doing this:
-
-
Open the Messenger app and tap the chat with the voice message that you want to access.
-
Tap and hold the voice message until a menu pops up.
-
Select "Forward" and then choose yourself as the recipient.
-
Tap "Send" and then go back to your chat list.
-
Tap your own profile picture at the top-left corner and then select "Message Requests."
-
You will see the voice message that you just forwarded to yourself. Tap it and then tap "Accept."
-
You can now listen to the voice message anytime by tapping your own chat.
-
-
This method works for both sent and received voice messages. However, it does not allow you to download or save the voice message as a file on your iPhone.
-
How to Transfer Messenger Voice Message from PC to iPhone
-
If you want to download and save your messenger voice message as a file on your iPhone, you will need to use a PC as an intermediary. First, you will need to download the voice message on your PC using one of the methods described above. Then, you will need to transfer it to your iPhone using one of these options:
- - Email: You can email the voice message file as an attachment from your PC to your iPhone. Then, you can open the email on your iPhone and tap the attachment to play it or save it to your Files app. - Messaging app: You can use a messaging app that supports file sharing, such as WhatsApp, Telegram, or Signal, to send the voice message file from your PC to your iPhone. Then, you can open the messaging app on your iPhone and tap the file to play it or save it to your Files app. - Cable connection: You can connect your iPhone to your PC using a USB cable and use iTunes or Finder (depending on your macOS version) to sync the voice message file from your PC to your iPhone. Then, you can open the Music app or Files app on your iPhone and find the file in your library or folders.
How to Download Messenger Voice Message on Android
-
If you are using an Android device, you might also face some challenges when trying to download messenger voice message. Similar to iPhone, there is no way to download voice messages directly from the Messenger app or the mobile browser on Android. This is because of the same reasons explained above: the voice messages are stored on Facebook's servers and encoded in AMR format. However, there are some workarounds that you can try to access or transfer your voice messages on Android. Here are some of them:
-
Why It Is Not Possible to Download Messenger Voice Message on Android
-
The reason why you cannot download messenger voice message on Android is that the Messenger app and the mobile browser do not have the option to download voice messages. This is because the voice messages are not stored as files on your device, but as data on Facebook's servers. The Messenger app and the mobile browser only stream the voice messages when you play them, but they do not save them on your device. Therefore, you cannot access them offline or save them to your storage.
-
Moreover, the voice messages are encoded in AMR format, which is not supported by most Android apps. AMR is a compressed audio format that is designed for voice communication and has a low bitrate and quality. It is used by Facebook to save bandwidth and storage space for voice messages.
-
However, AMR is not compatible with most Android apps, such as the built-in Music app, Google Play Music, or SoundCloud. Therefore, even if you manage to download the voice message as an AMR file, you will not be able to play it or edit it on your Android device. You will need a third-party app that can play or convert AMR files, such as VLC Media Player or Media Converter.
-
How to Access Messenger Voice Message Easily on Android
-
One of the easiest ways to access your messenger voice message on Android is to send it to yourself on Messenger. This way, you can listen to it anytime without having to scroll through your conversations or search for it. Here are the steps for doing this:
-
-
Open the Messenger app and tap the chat with the voice message that you want to access.
-
Tap and hold the voice message until a menu pops up.
-
Select "Forward" and then choose yourself as the recipient.
-
Tap "Send" and then go back to your chat list.
-
Tap your own profile picture at the top-left corner and then select "Message Requests."
-
You will see the voice message that you just forwarded to yourself. Tap it and then tap "Accept."
-
You can now listen to the voice message anytime by tapping your own chat.
-
-
This method works for both sent and received voice messages. However, it does not allow you to download or save the voice message as a file on your Android device.
-
How to Transfer Messenger Voice Message from PC to Android
-
If you want to download and save your messenger voice message as a file on your Android device, you will need to use a PC as an intermediary. First, you will need to download the voice message on your PC using one of the methods described above. Then, you will need to transfer it to your Android device using one of these options:
- - Email: You can email the voice message file as an attachment from your PC to your Android device. Then, you can open the email on your Android device and tap the attachment to play it or save it to your Files app. - Messaging app: You can use a messaging app that supports file sharing, such as WhatsApp, Telegram, or Signal, to send the voice message file from your PC to your Android device. Then, you can open the messaging app on your Android device and tap the file to play it or save it to your Files app. - Cable connection: You can connect your Android device to your PC using a USB cable and use a file manager app, such as ES File Explorer or File Commander, to copy the voice message file from your PC to your Android device. Then, you can open the Files app on your Android device and find the file in your folders.
Conclusion
-
Messenger voice message is a handy feature that lets you send and receive audio clips on Facebook. However, if you want to download and save them for offline listening or future reference, you might encounter some difficulties. This is because Messenger does not offer an easy option to download voice messages, and they are stored on Facebook's servers in a format that is not compatible with most devices.
-
In this article, we have shown you how to download messenger voice message on different devices, such as PC, iPhone, and Android. We have also explained why it is not possible to download voice messages directly from the Messenger app or mobile browser, and how you can access them easily or transfer them from one device to another. We hope this article has been helpful for you and that you have learned something new today.
-
Here are some tips for managing your messenger voice messages:
-
-
Delete unwanted or old voice messages regularly to free up space on Facebook's servers and avoid cluttering your conversations.
-
Use headphones or earphones when listening to voice messages in public places or noisy environments.
-
Adjust the playback speed of voice messages according to your preference by tapping the 1x button at the bottom-right corner of the audio player.
-
Use text-to-speech or speech-to-text features if you prefer reading or writing over listening or speaking.
-
Be respectful and mindful of the privacy and preferences of your chat partners when sending or receiving voice messages.
-
-
FAQs
-
Here are some frequently asked questions and their answers related to messenger voice messages:
-
Q: How long can a messenger voice message be?
-
A: The maximum length of a messenger voice message is 60 seconds. If you want to send a longer audio clip, you will need to use another app, such as Voice Recorder or Audacity, and then share it as a file on Messenger.
-
Q: How can I delete a messenger voice message?
-
A: To delete a messenger voice message, you need to tap and hold the voice message until a menu pops up. Then, you need to select "Remove" and then choose whether you want to remove it for yourself or for everyone. Note that you can only remove a voice message for everyone within 10 minutes of sending it.
-
Q: How can I mute or unmute a messenger voice message?
-
A: To mute or unmute a messenger voice message, you need to tap the speaker icon at the bottom-left corner of the audio player. This will toggle the sound on or off for the voice message.
-
Q: How can I pause or resume a messenger voice message?
-
A: To pause or resume a messenger voice message, you need to tap the play or pause button at the bottom-center of the audio player. This will pause or resume the playback of the voice message.
-
Q: How can I rewind or fast-forward a messenger voice message?
-
A: To rewind or fast-forward a messenger voice message, you need to drag the slider at the bottom of the audio player. This will move the playback position of the voice message backward or forward.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_fastdeploy_stable_diffusion_mega.py b/spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_fastdeploy_stable_diffusion_mega.py
deleted file mode 100644
index 80f961b7dcc640e8279596443c5afbad2c378932..0000000000000000000000000000000000000000
--- a/spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion/pipeline_fastdeploy_stable_diffusion_mega.py
+++ /dev/null
@@ -1,193 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import inspect
-from typing import Callable, List, Optional, Union
-
-import numpy as np
-import PIL.Image
-
-from ...utils import logging
-from .pipeline_fastdeploy_stable_diffusion import FastDeployStableDiffusionPipeline
-from .pipeline_fastdeploy_stable_diffusion_img2img import (
- FastDeployStableDiffusionImg2ImgPipeline,
-)
-from .pipeline_fastdeploy_stable_diffusion_inpaint_legacy import (
- FastDeployStableDiffusionInpaintPipelineLegacy,
-)
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-
-class FastDeployStableDiffusionMegaPipeline(FastDeployStableDiffusionPipeline):
- r"""
- Pipeline for generation using FastDeployStableDiffusion.
-
- This model inherits from [`FastDeployStableDiffusionPipeline`]. Check the superclass documentation for the generic methods the
- library implements for all the pipelines (such as downloading or saving etc.)
-
- Args:
- vae_encoder ([`FastDeployRuntimeModel`]):
- Variational Auto-Encoder (VAE) Model to encode images to latent representations.
- vae_decoder ([`FastDeployRuntimeModel`]):
- Variational Auto-Encoder (VAE) Model to decode images from latent representations.
- text_encoder ([`FastDeployRuntimeModel`]):
- Frozen text-encoder. Stable Diffusion uses the text portion of
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
- tokenizer (`CLIPTokenizer`):
- Tokenizer of class
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
- unet ([`FastDeployRuntimeModel`]): Conditional U-Net architecture to denoise the encoded image latents.
- scheduler ([`SchedulerMixin`]):
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], [`PNDMScheduler`], [`EulerDiscreteScheduler`], [`EulerAncestralDiscreteScheduler`]
- or [`DPMSolverMultistepScheduler`].
- safety_checker ([`FastDeployRuntimeModel`]):
- Classification module that estimates whether generated images could be considered offensive or harmful.
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
- feature_extractor ([`CLIPFeatureExtractor`]):
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
- """
- _optional_components = ["safety_checker", "feature_extractor"]
-
- def __call__(self, *args, **kwargs):
- return self.text2img(*args, **kwargs)
-
- def text2img(
- self,
- prompt: Union[str, List[str]],
- height: Optional[int] = 512,
- width: Optional[int] = 512,
- num_inference_steps: Optional[int] = 50,
- guidance_scale: Optional[float] = 7.5,
- negative_prompt: Optional[Union[str, List[str]]] = None,
- num_images_per_prompt: Optional[int] = 1,
- eta: Optional[float] = 0.0,
- generator: Optional[np.random.RandomState] = None,
- latents: Optional[np.ndarray] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
- callback_steps: Optional[int] = 1,
- ):
-
- expected_components = inspect.signature(FastDeployStableDiffusionPipeline.__init__).parameters.keys()
- components = {name: component for name, component in self.components.items() if name in expected_components}
- temp_pipeline = FastDeployStableDiffusionPipeline(
- **components, requires_safety_checker=self.config.requires_safety_checker
- )
- output = temp_pipeline(
- prompt=prompt,
- height=height,
- width=width,
- num_inference_steps=num_inference_steps,
- guidance_scale=guidance_scale,
- negative_prompt=negative_prompt,
- num_images_per_prompt=num_images_per_prompt,
- eta=eta,
- generator=generator,
- latents=latents,
- output_type=output_type,
- return_dict=return_dict,
- callback=callback,
- callback_steps=callback_steps,
- )
- return output
-
- def img2img(
- self,
- prompt: Union[str, List[str]],
- image: Union[np.ndarray, PIL.Image.Image],
- strength: float = 0.8,
- num_inference_steps: Optional[int] = 50,
- guidance_scale: Optional[float] = 7.5,
- negative_prompt: Optional[Union[str, List[str]]] = None,
- num_images_per_prompt: Optional[int] = 1,
- eta: Optional[float] = 0.0,
- generator: Optional[np.random.RandomState] = None,
- noise: Optional[np.ndarray] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
- callback_steps: Optional[int] = 1,
- ):
- expected_components = inspect.signature(FastDeployStableDiffusionImg2ImgPipeline.__init__).parameters.keys()
- components = {name: component for name, component in self.components.items() if name in expected_components}
- temp_pipeline = FastDeployStableDiffusionImg2ImgPipeline(
- **components, requires_safety_checker=self.config.requires_safety_checker
- )
- output = temp_pipeline(
- prompt=prompt,
- image=image,
- strength=strength,
- num_inference_steps=num_inference_steps,
- guidance_scale=guidance_scale,
- negative_prompt=negative_prompt,
- num_images_per_prompt=num_images_per_prompt,
- eta=eta,
- generator=generator,
- noise=noise,
- output_type=output_type,
- return_dict=return_dict,
- callback=callback,
- callback_steps=callback_steps,
- )
-
- return output
-
- def inpaint_legacy(
- self,
- prompt: Union[str, List[str]],
- image: Union[np.ndarray, PIL.Image.Image],
- mask_image: Union[np.ndarray, PIL.Image.Image],
- strength: float = 0.8,
- num_inference_steps: Optional[int] = 50,
- guidance_scale: Optional[float] = 7.5,
- negative_prompt: Optional[Union[str, List[str]]] = None,
- num_images_per_prompt: Optional[int] = 1,
- eta: Optional[float] = 0.0,
- generator: Optional[np.random.RandomState] = None,
- noise: Optional[np.ndarray] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
- callback_steps: Optional[int] = 1,
- ):
- expected_components = inspect.signature(
- FastDeployStableDiffusionInpaintPipelineLegacy.__init__
- ).parameters.keys()
- components = {name: component for name, component in self.components.items() if name in expected_components}
- temp_pipeline = FastDeployStableDiffusionInpaintPipelineLegacy(
- **components, requires_safety_checker=self.config.requires_safety_checker
- )
- output = temp_pipeline(
- prompt=prompt,
- image=image,
- mask_image=mask_image,
- strength=strength,
- num_inference_steps=num_inference_steps,
- guidance_scale=guidance_scale,
- negative_prompt=negative_prompt,
- num_images_per_prompt=num_images_per_prompt,
- eta=eta,
- generator=generator,
- noise=noise,
- output_type=output_type,
- return_dict=return_dict,
- callback=callback,
- callback_steps=callback_steps,
- )
-
- return output
diff --git a/spaces/2023Liu2023/bingo/src/components/theme-toggle.tsx b/spaces/2023Liu2023/bingo/src/components/theme-toggle.tsx
deleted file mode 100644
index 67d3f1a2c163ccbeb52c40a7e42f107190237154..0000000000000000000000000000000000000000
--- a/spaces/2023Liu2023/bingo/src/components/theme-toggle.tsx
+++ /dev/null
@@ -1,31 +0,0 @@
-'use client'
-
-import * as React from 'react'
-import { useTheme } from 'next-themes'
-
-import { Button } from '@/components/ui/button'
-import { IconMoon, IconSun } from '@/components/ui/icons'
-
-export function ThemeToggle() {
- const { setTheme, theme } = useTheme()
- const [_, startTransition] = React.useTransition()
-
- return (
-
- )
-}
diff --git a/spaces/7eu7d7/anime-ai-detect-fucker/attack.py b/spaces/7eu7d7/anime-ai-detect-fucker/attack.py
deleted file mode 100644
index 6437b82ac2bd20efcdf0b061c0b915e1a0323800..0000000000000000000000000000000000000000
--- a/spaces/7eu7d7/anime-ai-detect-fucker/attack.py
+++ /dev/null
@@ -1,113 +0,0 @@
-import torch
-import os
-from transformers import BeitFeatureExtractor, BeitForImageClassification
-from PIL import Image
-
-from torchvision.utils import save_image
-import torch.nn.functional as F
-from torchvision import transforms
-
-from attacker import *
-from torch.nn import CrossEntropyLoss
-
-import argparse
-
-device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
-def make_args():
- parser = argparse.ArgumentParser(description='PyTorch MS_COCO Training')
-
- parser.add_argument('inputs', type=str)
- parser.add_argument('--out_dir', type=str, default='./output')
- parser.add_argument('--target', type=str, default='auto', help='[auto, ai, human]')
- parser.add_argument('--eps', type=float, default=8/8, help='Noise intensity ')
- parser.add_argument('--step_size', type=float, default=1.087313/8, help='Attack step size')
- parser.add_argument('--steps', type=int, default=20, help='Attack step count')
-
- parser.add_argument('--test_atk', action='store_true')
-
- return parser.parse_args()
-
-class Attacker:
- def __init__(self, args, pgd_callback):
- self.args=args
- os.makedirs(args.out_dir, exist_ok=True)
-
- print('正在加载模型...')
- self.feature_extractor = BeitFeatureExtractor.from_pretrained('saltacc/anime-ai-detect')
- self.model = BeitForImageClassification.from_pretrained('saltacc/anime-ai-detect').to(device)
- print('加载完毕')
-
- if args.target=='ai': #攻击成被识别为AI
- self.target = torch.tensor([1]).to(device)
- elif args.target=='human':
- self.target = torch.tensor([0]).to(device)
-
- dataset_mean_t = torch.tensor([0.5, 0.5, 0.5]).view(1, -1, 1, 1).to(device)
- dataset_std_t = torch.tensor([0.5, 0.5, 0.5]).view(1, -1, 1, 1).to(device)
- self.pgd = PGD(self.model, img_transform=(lambda x: (x - dataset_mean_t) / dataset_std_t, lambda x: x * dataset_std_t + dataset_mean_t))
- self.pgd.set_para(eps=(args.eps * 2) / 255, alpha=lambda: (args.step_size * 2) / 255, iters=args.steps)
- self.pgd.set_loss(CrossEntropyLoss())
- self.pgd.set_call_back(pgd_callback)
-
- def save_image(self, image, noise, img_name):
- # 缩放图片只缩放噪声
- W, H = image.size
- noise = F.interpolate(noise, size=(H, W), mode='bicubic')
- img_save = transforms.ToTensor()(image) + noise
- save_image(img_save, os.path.join(self.args.out_dir, f'{img_name[:img_name.rfind(".")]}_atk.png'))
-
- def attack_(self, image):
- inputs = self.feature_extractor(images=image, return_tensors="pt")['pixel_values'].to(device)
-
- if self.args.target == 'auto':
- with torch.no_grad():
- outputs = self.model(inputs)
- logits = outputs.logits
- cls = logits.argmax(-1).item()
- target = torch.tensor([cls]).to(device)
- else:
- target = self.target
-
- if self.args.test_atk:
- self.test_image(inputs, 'before attack')
-
- atk_img = self.pgd.attack(inputs, target)
-
- noise = self.pgd.img_transform[1](atk_img).detach().cpu() - self.pgd.img_transform[1](inputs).detach().cpu()
-
- if self.args.test_atk:
- self.test_image(atk_img, 'after attack')
-
- return atk_img, noise
-
- def attack_one(self, path):
- image = Image.open(path).convert('RGB')
- atk_img, noise = self.attack_(image)
- self.save_image(image, noise, os.path.basename(path))
-
- def attack(self, path):
- count=0
- if os.path.isdir(path):
- img_list=[os.path.join(path, x) for x in os.listdir(path)]
- for img in img_list:
- if (img.lower().endswith(('.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff'))):
- self.attack_one(img)
- count+=1
- else:
- if (path.lower().endswith(('.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff'))):
- self.attack_one(path)
- count += 1
- print(f'总共攻击{count}张图像')
-
- @torch.no_grad()
- def test_image(self, img, pre_fix):
- outputs = self.model(img)
- logits = outputs.logits
- predicted_class_idx = logits.argmax(-1).item()
- print(pre_fix, "class:", self.model.config.id2label[predicted_class_idx], 'logits:', logits)
-
-if __name__ == '__main__':
- args=make_args()
- attacker = Attacker(args)
- attacker.attack(args.inputs)
\ No newline at end of file
diff --git a/spaces/A00001/bingothoo/Dockerfile b/spaces/A00001/bingothoo/Dockerfile
deleted file mode 100644
index 3aa2b29b5fc4fa8b8238955acd7f1fde13ce5e1a..0000000000000000000000000000000000000000
--- a/spaces/A00001/bingothoo/Dockerfile
+++ /dev/null
@@ -1,36 +0,0 @@
-FROM node:18
-
-
-ARG DEBIAN_FRONTEND=noninteractive
-
-ENV BING_HEADER ""
-
-# Set home to the user's home directory
-ENV HOME=/home/user \
- PATH=/home/user/.local/bin:$PATH
-
-# Set up a new user named "user" with user ID 1000
-RUN useradd -o -u 1000 user && mkdir -p $HOME/app && chown -R user $HOME
-
-# Switch to the "user" user
-USER user
-
-# Set the working directory to the user's home directory
-WORKDIR $HOME/app
-
-# Install app dependencies
-# A wildcard is used to ensure both package.json AND package-lock.json are copied
-# where available (npm@5+)
-COPY --chown=user package*.json $HOME/app/
-
-RUN npm install
-
-# Copy the current directory contents into the container at $HOME/app setting the owner to the user
-COPY --chown=user . $HOME/app/
-
-RUN npm run build
-
-ENV PORT 7860
-EXPOSE 7860
-
-CMD npm start
diff --git a/spaces/AI-Hobbyist/Hoyo-RVC/infer_uvr5.py b/spaces/AI-Hobbyist/Hoyo-RVC/infer_uvr5.py
deleted file mode 100644
index 884c841dd6179677bd0a6d5f5f639954a206a77e..0000000000000000000000000000000000000000
--- a/spaces/AI-Hobbyist/Hoyo-RVC/infer_uvr5.py
+++ /dev/null
@@ -1,363 +0,0 @@
-import os, sys, torch, warnings, pdb
-
-now_dir = os.getcwd()
-sys.path.append(now_dir)
-from json import load as ll
-
-warnings.filterwarnings("ignore")
-import librosa
-import importlib
-import numpy as np
-import hashlib, math
-from tqdm import tqdm
-from uvr5_pack.lib_v5 import spec_utils
-from uvr5_pack.utils import _get_name_params, inference
-from uvr5_pack.lib_v5.model_param_init import ModelParameters
-import soundfile as sf
-from uvr5_pack.lib_v5.nets_new import CascadedNet
-from uvr5_pack.lib_v5 import nets_61968KB as nets
-
-
-class _audio_pre_:
- def __init__(self, agg, model_path, device, is_half):
- self.model_path = model_path
- self.device = device
- self.data = {
- # Processing Options
- "postprocess": False,
- "tta": False,
- # Constants
- "window_size": 512,
- "agg": agg,
- "high_end_process": "mirroring",
- }
- mp = ModelParameters("uvr5_pack/lib_v5/modelparams/4band_v2.json")
- model = nets.CascadedASPPNet(mp.param["bins"] * 2)
- cpk = torch.load(model_path, map_location="cpu")
- model.load_state_dict(cpk)
- model.eval()
- if is_half:
- model = model.half().to(device)
- else:
- model = model.to(device)
-
- self.mp = mp
- self.model = model
-
- def _path_audio_(self, music_file, ins_root=None, vocal_root=None, format="flac"):
- if ins_root is None and vocal_root is None:
- return "No save root."
- name = os.path.basename(music_file)
- if ins_root is not None:
- os.makedirs(ins_root, exist_ok=True)
- if vocal_root is not None:
- os.makedirs(vocal_root, exist_ok=True)
- X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
- bands_n = len(self.mp.param["band"])
- # print(bands_n)
- for d in range(bands_n, 0, -1):
- bp = self.mp.param["band"][d]
- if d == bands_n: # high-end band
- (
- X_wave[d],
- _,
- ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑
- music_file,
- bp["sr"],
- False,
- dtype=np.float32,
- res_type=bp["res_type"],
- )
- if X_wave[d].ndim == 1:
- X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
- else: # lower bands
- X_wave[d] = librosa.core.resample(
- X_wave[d + 1],
- self.mp.param["band"][d + 1]["sr"],
- bp["sr"],
- res_type=bp["res_type"],
- )
- # Stft of wave source
- X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
- X_wave[d],
- bp["hl"],
- bp["n_fft"],
- self.mp.param["mid_side"],
- self.mp.param["mid_side_b2"],
- self.mp.param["reverse"],
- )
- # pdb.set_trace()
- if d == bands_n and self.data["high_end_process"] != "none":
- input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + (
- self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"]
- )
- input_high_end = X_spec_s[d][
- :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :
- ]
-
- X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)
- aggresive_set = float(self.data["agg"] / 100)
- aggressiveness = {
- "value": aggresive_set,
- "split_bin": self.mp.param["band"][1]["crop_stop"],
- }
- with torch.no_grad():
- pred, X_mag, X_phase = inference(
- X_spec_m, self.device, self.model, aggressiveness, self.data
- )
- # Postprocess
- if self.data["postprocess"]:
- pred_inv = np.clip(X_mag - pred, 0, np.inf)
- pred = spec_utils.mask_silence(pred, pred_inv)
- y_spec_m = pred * X_phase
- v_spec_m = X_spec_m - y_spec_m
-
- if ins_root is not None:
- if self.data["high_end_process"].startswith("mirroring"):
- input_high_end_ = spec_utils.mirroring(
- self.data["high_end_process"], y_spec_m, input_high_end, self.mp
- )
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(
- y_spec_m, self.mp, input_high_end_h, input_high_end_
- )
- else:
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)
- print("%s instruments done" % name)
- if format in ["wav", "flac"]:
- sf.write(
- os.path.join(
- ins_root,
- "instrument_{}_{}.{}".format(name, self.data["agg"], format),
- ),
- (np.array(wav_instrument) * 32768).astype("int16"),
- self.mp.param["sr"],
- ) #
- else:
- path = os.path.join(
- ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"])
- )
- sf.write(
- path,
- (np.array(wav_instrument) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- if os.path.exists(path):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path, path[:-4] + ".%s" % format)
- )
- if vocal_root is not None:
- if self.data["high_end_process"].startswith("mirroring"):
- input_high_end_ = spec_utils.mirroring(
- self.data["high_end_process"], v_spec_m, input_high_end, self.mp
- )
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(
- v_spec_m, self.mp, input_high_end_h, input_high_end_
- )
- else:
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)
- print("%s vocals done" % name)
- if format in ["wav", "flac"]:
- sf.write(
- os.path.join(
- vocal_root,
- "vocal_{}_{}.{}".format(name, self.data["agg"], format),
- ),
- (np.array(wav_vocals) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- else:
- path = os.path.join(
- vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"])
- )
- sf.write(
- path,
- (np.array(wav_vocals) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- if os.path.exists(path):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path, path[:-4] + ".%s" % format)
- )
-
-
-class _audio_pre_new:
- def __init__(self, agg, model_path, device, is_half):
- self.model_path = model_path
- self.device = device
- self.data = {
- # Processing Options
- "postprocess": False,
- "tta": False,
- # Constants
- "window_size": 512,
- "agg": agg,
- "high_end_process": "mirroring",
- }
- mp = ModelParameters("uvr5_pack/lib_v5/modelparams/4band_v3.json")
- nout = 64 if "DeReverb" in model_path else 48
- model = CascadedNet(mp.param["bins"] * 2, nout)
- cpk = torch.load(model_path, map_location="cpu")
- model.load_state_dict(cpk)
- model.eval()
- if is_half:
- model = model.half().to(device)
- else:
- model = model.to(device)
-
- self.mp = mp
- self.model = model
-
- def _path_audio_(
- self, music_file, vocal_root=None, ins_root=None, format="flac"
- ): # 3个VR模型vocal和ins是反的
- if ins_root is None and vocal_root is None:
- return "No save root."
- name = os.path.basename(music_file)
- if ins_root is not None:
- os.makedirs(ins_root, exist_ok=True)
- if vocal_root is not None:
- os.makedirs(vocal_root, exist_ok=True)
- X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
- bands_n = len(self.mp.param["band"])
- # print(bands_n)
- for d in range(bands_n, 0, -1):
- bp = self.mp.param["band"][d]
- if d == bands_n: # high-end band
- (
- X_wave[d],
- _,
- ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑
- music_file,
- bp["sr"],
- False,
- dtype=np.float32,
- res_type=bp["res_type"],
- )
- if X_wave[d].ndim == 1:
- X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
- else: # lower bands
- X_wave[d] = librosa.core.resample(
- X_wave[d + 1],
- self.mp.param["band"][d + 1]["sr"],
- bp["sr"],
- res_type=bp["res_type"],
- )
- # Stft of wave source
- X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
- X_wave[d],
- bp["hl"],
- bp["n_fft"],
- self.mp.param["mid_side"],
- self.mp.param["mid_side_b2"],
- self.mp.param["reverse"],
- )
- # pdb.set_trace()
- if d == bands_n and self.data["high_end_process"] != "none":
- input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + (
- self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"]
- )
- input_high_end = X_spec_s[d][
- :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :
- ]
-
- X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)
- aggresive_set = float(self.data["agg"] / 100)
- aggressiveness = {
- "value": aggresive_set,
- "split_bin": self.mp.param["band"][1]["crop_stop"],
- }
- with torch.no_grad():
- pred, X_mag, X_phase = inference(
- X_spec_m, self.device, self.model, aggressiveness, self.data
- )
- # Postprocess
- if self.data["postprocess"]:
- pred_inv = np.clip(X_mag - pred, 0, np.inf)
- pred = spec_utils.mask_silence(pred, pred_inv)
- y_spec_m = pred * X_phase
- v_spec_m = X_spec_m - y_spec_m
-
- if ins_root is not None:
- if self.data["high_end_process"].startswith("mirroring"):
- input_high_end_ = spec_utils.mirroring(
- self.data["high_end_process"], y_spec_m, input_high_end, self.mp
- )
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(
- y_spec_m, self.mp, input_high_end_h, input_high_end_
- )
- else:
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)
- print("%s instruments done" % name)
- if format in ["wav", "flac"]:
- sf.write(
- os.path.join(
- ins_root,
- "instrument_{}_{}.{}".format(name, self.data["agg"], format),
- ),
- (np.array(wav_instrument) * 32768).astype("int16"),
- self.mp.param["sr"],
- ) #
- else:
- path = os.path.join(
- ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"])
- )
- sf.write(
- path,
- (np.array(wav_instrument) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- if os.path.exists(path):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path, path[:-4] + ".%s" % format)
- )
- if vocal_root is not None:
- if self.data["high_end_process"].startswith("mirroring"):
- input_high_end_ = spec_utils.mirroring(
- self.data["high_end_process"], v_spec_m, input_high_end, self.mp
- )
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(
- v_spec_m, self.mp, input_high_end_h, input_high_end_
- )
- else:
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)
- print("%s vocals done" % name)
- if format in ["wav", "flac"]:
- sf.write(
- os.path.join(
- vocal_root,
- "vocal_{}_{}.{}".format(name, self.data["agg"], format),
- ),
- (np.array(wav_vocals) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- else:
- path = os.path.join(
- vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"])
- )
- sf.write(
- path,
- (np.array(wav_vocals) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- if os.path.exists(path):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path, path[:-4] + ".%s" % format)
- )
-
-
-if __name__ == "__main__":
- device = "cuda"
- is_half = True
- # model_path = "uvr5_weights/2_HP-UVR.pth"
- # model_path = "uvr5_weights/VR-DeEchoDeReverb.pth"
- # model_path = "uvr5_weights/VR-DeEchoNormal.pth"
- model_path = "uvr5_weights/DeEchoNormal.pth"
- # pre_fun = _audio_pre_(model_path=model_path, device=device, is_half=True,agg=10)
- pre_fun = _audio_pre_new(model_path=model_path, device=device, is_half=True, agg=10)
- audio_path = "雪雪伴奏对消HP5.wav"
- save_path = "opt"
- pre_fun._path_audio_(audio_path, save_path, save_path)
diff --git a/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/contperceptual_dis.py b/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/contperceptual_dis.py
deleted file mode 100644
index be47add559612aaf8c667afd554d88e23fd8fd56..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/contperceptual_dis.py
+++ /dev/null
@@ -1,137 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import sys
-
-sys.path.insert(0, '.') # nopep8
-from ldm.modules.losses_audio.vqperceptual import *
-from ldm.modules.discriminator.multi_window_disc import Discriminator
-
-class LPAPSWithDiscriminator(nn.Module):# 相比于contperceptual.py添加了MultiWindowDiscriminator
- def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0,
- disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0,
- perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
- disc_loss="hinge"):
-
- super().__init__()
- assert disc_loss in ["hinge", "vanilla"]
- self.kl_weight = kl_weight
- self.pixel_weight = pixelloss_weight
- self.perceptual_loss = LPAPS().eval()
- self.perceptual_weight = perceptual_weight
- # output log variance
- self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init)
-
- self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
- n_layers=disc_num_layers,
- use_actnorm=use_actnorm,
- ).apply(weights_init)
- self.discriminator_iter_start = disc_start
- if disc_loss == "hinge":
- self.disc_loss = hinge_d_loss
- elif disc_loss == "vanilla":
- self.disc_loss = vanilla_d_loss
- else:
- raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
- print(f"LPAPSWithDiscriminator running with {disc_loss} loss.")
- self.disc_factor = disc_factor
- self.discriminator_weight = disc_weight
- self.disc_conditional = disc_conditional
-
- disc_win_num = 3
- mel_disc_hidden_size = 128
- self.discriminator_multi = Discriminator(time_lengths=[32, 64, 128][:disc_win_num],
- freq_length=80, hidden_size=mel_disc_hidden_size, kernel=(3, 3),
- cond_size=0, norm_type="in", reduction="stack")
-
- def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
- if last_layer is not None:
- nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
- g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
- else:
- nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
- g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
-
- d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
- d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
- d_weight = d_weight * self.discriminator_weight
- return d_weight
-
- def forward(self, inputs, reconstructions, posteriors, optimizer_idx,
- global_step, last_layer=None, cond=None, split="train", weights=None):
- rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
- if self.perceptual_weight > 0:
- p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
- rec_loss = rec_loss + self.perceptual_weight * p_loss
- else:
- p_loss = torch.tensor([0.0])
-
- nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar
- weighted_nll_loss = nll_loss
- if weights is not None:
- weighted_nll_loss = weights*nll_loss
- weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
- nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
- kl_loss = posteriors.kl()
- kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
-
- # now the GAN part
- if optimizer_idx == 0:
- # generator update
- if cond is None:
- assert not self.disc_conditional
- logits_fake = self.discriminator(reconstructions.contiguous())
- else:
- assert self.disc_conditional
- logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
-
- logits_fake_multi = self.discriminator_multi(reconstructions.contiguous().squeeze(1).transpose(1, 2))
-
- g_loss = -torch.mean(logits_fake)
- g_loss_multi = -torch.mean(logits_fake_multi['y'])
-
- try:
- d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
- d_weight_multi = self.calculate_adaptive_weight(nll_loss, g_loss_multi, last_layer=last_layer)
- except RuntimeError:
- assert not self.training
- d_weight = d_weight_multi = torch.tensor(0.0)
-
- disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
- loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss + d_weight_multi * disc_factor * g_loss_multi
-
- log = {"{}/total_loss".format(split): loss.clone().detach().mean(),
- "{}/logvar".format(split): self.logvar.detach(),
- "{}/kl_loss".format(split): kl_loss.detach().mean(),
- "{}/nll_loss".format(split): nll_loss.detach().mean(),
- "{}/rec_loss".format(split): rec_loss.detach().mean(),
- "{}/d_weight".format(split): d_weight.detach(),
- "{}/disc_factor".format(split): torch.tensor(disc_factor),
- "{}/g_loss".format(split): g_loss.detach().mean(),
- "{}/g_loss_multi".format(split): g_loss_multi.detach().mean(),
- }
- return loss, log
-
- if optimizer_idx == 1:
- # second pass for discriminator update
- if cond is None:
- logits_real = self.discriminator(inputs.contiguous().detach())
- logits_fake = self.discriminator(reconstructions.contiguous().detach())
- else:
- logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
- logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
-
- logits_real_multi = self.discriminator_multi(inputs.contiguous().detach().squeeze(1).transpose(1, 2))
- logits_fake_multi = self.discriminator_multi(reconstructions.contiguous().detach().squeeze(1).transpose(1, 2))
-
- disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
- d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
- d_loss_multi = disc_factor * self.disc_loss(logits_real_multi['y'], logits_fake_multi['y'])
-
- log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
- "{}/disc_loss_multi".format(split): d_loss_multi.clone().detach().mean(),
- "{}/logits_real".format(split): logits_real.detach().mean(),
- "{}/logits_fake".format(split): logits_fake.detach().mean()
- }
- return d_loss+d_loss_multi, log
-
diff --git a/spaces/AIKey/TestStatic/README.md b/spaces/AIKey/TestStatic/README.md
deleted file mode 100644
index 808bbef59304b078567ebeb15b381e555570e942..0000000000000000000000000000000000000000
--- a/spaces/AIKey/TestStatic/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: TestStatic
-emoji: 📉
-colorFrom: yellow
-colorTo: red
-sdk: static
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ASJMO/freegpt/g4f/__init__.py b/spaces/ASJMO/freegpt/g4f/__init__.py
deleted file mode 100644
index a0b4bac6aa4de9c0449095a3874c2cb9716169d7..0000000000000000000000000000000000000000
--- a/spaces/ASJMO/freegpt/g4f/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import sys
-from . import Provider
-from g4f.models import Model, ModelUtils
-
-
-class ChatCompletion:
- @staticmethod
- def create(model: Model.model or str, messages: list, provider: Provider.Provider = None, stream: bool = False, auth: str = False, **kwargs):
- kwargs['auth'] = auth
-
- if provider and provider.needs_auth and not auth:
- print(
- f'ValueError: {provider.__name__} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr)
- sys.exit(1)
-
- try:
- if isinstance(model, str):
- try:
- model = ModelUtils.convert[model]
- except KeyError:
- raise Exception(f'The model: {model} does not exist')
-
- engine = model.best_provider if not provider else provider
-
- if not engine.supports_stream and stream == True:
- print(
- f"ValueError: {engine.__name__} does not support 'stream' argument", file=sys.stderr)
- sys.exit(1)
-
- print(f'Using {engine.__name__} provider')
-
- return (engine._create_completion(model.name, messages, stream, **kwargs)
- if stream else ''.join(engine._create_completion(model.name, messages, stream, **kwargs)))
- except TypeError as e:
- print(e)
- arg: str = str(e).split("'")[1]
- print(
- f"ValueError: {engine.__name__} does not support '{arg}' argument", file=sys.stderr)
- sys.exit(1)
diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-210e_deepfashion2_shorts_256x192/__init__.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-210e_deepfashion2_shorts_256x192/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/AchyuthGamer/OpenGPT/server/backend.py b/spaces/AchyuthGamer/OpenGPT/server/backend.py
deleted file mode 100644
index 5f30c673e729f529cb448ca9ad33d9e80945d6c6..0000000000000000000000000000000000000000
--- a/spaces/AchyuthGamer/OpenGPT/server/backend.py
+++ /dev/null
@@ -1,188 +0,0 @@
-import re
-from datetime import datetime
-import asyncio
-
-import sys
-sys.path.insert(0, '../g4f')
-from g4f import __init__, ChatCompletion
-
-from flask import request, Response, stream_with_context
-from requests import get
-from server.config import special_instructions
-import json
-import subprocess
-import platform
-
-class Backend_Api:
- def __init__(self, bp, config: dict) -> None:
- """
- Initialize the Backend_Api class.
- :param app: Flask application instance
- :param config: Configuration dictionary
- """
- self.bp = bp
- self.routes = {
- '/backend-api/v2/conversation': {
- 'function': self._conversation,
- 'methods': ['POST']
- }
- }
-
- def _conversation(self):
- """
- Handles the conversation route.
-
- :return: Response object containing the generated conversation stream
- """
- conversation_id = request.json['conversation_id']
-
- try:
- jailbreak = request.json['jailbreak']
- model = request.json['model']
- messages = build_messages(jailbreak)
-
- #The error "There is no current event loop in thread" was fixed in 0.1.4.3
- #its fix for Windows
- #if platform.system() == "Windows":
- # asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
-
- response = ChatCompletion.create(
- model=model,
- chatId=conversation_id,
- messages=messages
- )
-
- return Response(stream_with_context(generate_stream(response, jailbreak)), mimetype='text/event-stream')
-
- except Exception as e:
- print(e)
- print(e.__traceback__.tb_next)
-
- return {
- '_action': '_ask',
- 'success': False,
- "error": f"an error occurred {str(e)}"
- }, 400
-
-
-def build_messages(jailbreak):
- """
- Build the messages for the conversation.
-
- :param jailbreak: Jailbreak instruction string
- :return: List of messages for the conversation
- """
- _conversation = request.json['meta']['content']['conversation']
- internet_access = request.json['meta']['content']['internet_access']
- prompt = request.json['meta']['content']['parts'][0]
-
- # Add the existing conversation
- conversation = _conversation
-
- #This API doesn't work!
- # Add web results if enabled
- #if internet_access:
- # current_date = datetime.now().strftime("%Y-%m-%d")
- # query = f'Current date: {current_date}. ' + prompt["content"]
- # search_results = fetch_search_results(query)
- # conversation.extend(search_results)
-
- # Add jailbreak instructions if enabled
- if jailbreak_instructions := getJailbreak(jailbreak):
- conversation.extend(jailbreak_instructions)
-
- # Add the prompt
- conversation.append(prompt)
-
- # Reduce conversation size to avoid API Token quantity error
- if len(conversation) > 3:
- conversation = conversation[-4:]
-
- return conversation
-
-
-def fetch_search_results(query):
- """
- Fetch search results for a given query.
-
- :param query: Search query string
- :return: List of search results
- """
- search = get('https://ddg-api.herokuapp.com/search',
- params={
- 'query': query,
- 'limit': 3,
- })
-
- snippets = ""
- for index, result in enumerate(search.json()):
- snippet = f'[{index + 1}] "{result["snippet"]}" URL:{result["link"]}.'
- snippets += snippet
-
- response = "Here are some updated web searches. Use this to improve user response:"
- response += snippets
-
- return [{'role': 'system', 'content': response}]
-
-
-def generate_stream(response, jailbreak):
- """
- Generate the conversation stream.
-
- :param response: Response object from ChatCompletion.create
- :param jailbreak: Jailbreak instruction string
- :return: Generator object yielding messages in the conversation
- """
- if getJailbreak(jailbreak):
- response_jailbreak = ''
- jailbroken_checked = False
- for message in response:
- response_jailbreak += message
- if jailbroken_checked:
- yield message
- else:
- if response_jailbroken_success(response_jailbreak):
- jailbroken_checked = True
- if response_jailbroken_failed(response_jailbreak):
- yield response_jailbreak
- jailbroken_checked = True
- else:
- yield from response
-
-
-def response_jailbroken_success(response: str) -> bool:
- """Check if the response has been jailbroken.
-
- :param response: Response string
- :return: Boolean indicating if the response has been jailbroken
- """
- act_match = re.search(r'ACT:', response, flags=re.DOTALL)
- return bool(act_match)
-
-
-def response_jailbroken_failed(response):
- """
- Check if the response has not been jailbroken.
-
- :param response: Response string
- :return: Boolean indicating if the response has not been jailbroken
- """
- return False if len(response) < 4 else not (response.startswith("GPT:") or response.startswith("ACT:"))
-
-
-def getJailbreak(jailbreak):
- """
- Check if jailbreak instructions are provided.
-
- :param jailbreak: Jailbreak instruction string
- :return: Jailbreak instructions if provided, otherwise None
- """
- if jailbreak != "default":
- special_instructions[jailbreak][0]['content'] += special_instructions['two_responses_instruction']
- if jailbreak in special_instructions:
- special_instructions[jailbreak]
- return special_instructions[jailbreak]
- else:
- return None
- else:
- return None
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/swipe/Factory.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/swipe/Factory.js
deleted file mode 100644
index f060f1233ae56cbfeb81c1f2985cb40226bd635b..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/swipe/Factory.js
+++ /dev/null
@@ -1,16 +0,0 @@
-import Swipe from './Swipe.js';
-import ObjectFactory from '../ObjectFactory.js';
-import IsGameObject from '../../../plugins/utils/system/IsGameObject.js';
-import SetValue from '../../../plugins/utils/object/SetValue.js';
-
-ObjectFactory.register('swipe', function (gameObject, config) {
- if (!IsGameObject(gameObject)) {
- config = gameObject;
- gameObject = this.scene;
- }
- return new Swipe(gameObject, config);
-});
-
-SetValue(window, 'RexPlugins.UI.Swipe', Swipe);
-
-export default Swipe;
\ No newline at end of file
diff --git a/spaces/AkitoP/umamusume_bert_vits2/text/english.py b/spaces/AkitoP/umamusume_bert_vits2/text/english.py
deleted file mode 100644
index 0f9339c9ed771dab5136978eaaab194ec3fe2395..0000000000000000000000000000000000000000
--- a/spaces/AkitoP/umamusume_bert_vits2/text/english.py
+++ /dev/null
@@ -1,214 +0,0 @@
-import pickle
-import os
-import re
-from g2p_en import G2p
-
-from text import symbols
-
-current_file_path = os.path.dirname(__file__)
-CMU_DICT_PATH = os.path.join(current_file_path, "cmudict.rep")
-CACHE_PATH = os.path.join(current_file_path, "cmudict_cache.pickle")
-_g2p = G2p()
-
-arpa = {
- "AH0",
- "S",
- "AH1",
- "EY2",
- "AE2",
- "EH0",
- "OW2",
- "UH0",
- "NG",
- "B",
- "G",
- "AY0",
- "M",
- "AA0",
- "F",
- "AO0",
- "ER2",
- "UH1",
- "IY1",
- "AH2",
- "DH",
- "IY0",
- "EY1",
- "IH0",
- "K",
- "N",
- "W",
- "IY2",
- "T",
- "AA1",
- "ER1",
- "EH2",
- "OY0",
- "UH2",
- "UW1",
- "Z",
- "AW2",
- "AW1",
- "V",
- "UW2",
- "AA2",
- "ER",
- "AW0",
- "UW0",
- "R",
- "OW1",
- "EH1",
- "ZH",
- "AE0",
- "IH2",
- "IH",
- "Y",
- "JH",
- "P",
- "AY1",
- "EY0",
- "OY2",
- "TH",
- "HH",
- "D",
- "ER0",
- "CH",
- "AO1",
- "AE1",
- "AO2",
- "OY1",
- "AY2",
- "IH1",
- "OW0",
- "L",
- "SH",
-}
-
-
-def post_replace_ph(ph):
- rep_map = {
- ":": ",",
- ";": ",",
- ",": ",",
- "。": ".",
- "!": "!",
- "?": "?",
- "\n": ".",
- "·": ",",
- "、": ",",
- "...": "…",
- "v": "V",
- }
- if ph in rep_map.keys():
- ph = rep_map[ph]
- if ph in symbols:
- return ph
- if ph not in symbols:
- ph = "UNK"
- return ph
-
-
-def read_dict():
- g2p_dict = {}
- start_line = 49
- with open(CMU_DICT_PATH) as f:
- line = f.readline()
- line_index = 1
- while line:
- if line_index >= start_line:
- line = line.strip()
- word_split = line.split(" ")
- word = word_split[0]
-
- syllable_split = word_split[1].split(" - ")
- g2p_dict[word] = []
- for syllable in syllable_split:
- phone_split = syllable.split(" ")
- g2p_dict[word].append(phone_split)
-
- line_index = line_index + 1
- line = f.readline()
-
- return g2p_dict
-
-
-def cache_dict(g2p_dict, file_path):
- with open(file_path, "wb") as pickle_file:
- pickle.dump(g2p_dict, pickle_file)
-
-
-def get_dict():
- if os.path.exists(CACHE_PATH):
- with open(CACHE_PATH, "rb") as pickle_file:
- g2p_dict = pickle.load(pickle_file)
- else:
- g2p_dict = read_dict()
- cache_dict(g2p_dict, CACHE_PATH)
-
- return g2p_dict
-
-
-eng_dict = get_dict()
-
-
-def refine_ph(phn):
- tone = 0
- if re.search(r"\d$", phn):
- tone = int(phn[-1]) + 1
- phn = phn[:-1]
- return phn.lower(), tone
-
-
-def refine_syllables(syllables):
- tones = []
- phonemes = []
- for phn_list in syllables:
- for i in range(len(phn_list)):
- phn = phn_list[i]
- phn, tone = refine_ph(phn)
- phonemes.append(phn)
- tones.append(tone)
- return phonemes, tones
-
-
-def text_normalize(text):
- # todo: eng text normalize
- return text
-
-
-def g2p(text):
- phones = []
- tones = []
- words = re.split(r"([,;.\-\?\!\s+])", text)
- for w in words:
- if w.upper() in eng_dict:
- phns, tns = refine_syllables(eng_dict[w.upper()])
- phones += phns
- tones += tns
- else:
- phone_list = list(filter(lambda p: p != " ", _g2p(w)))
- for ph in phone_list:
- if ph in arpa:
- ph, tn = refine_ph(ph)
- phones.append(ph)
- tones.append(tn)
- else:
- phones.append(ph)
- tones.append(0)
- # todo: implement word2ph
- word2ph = [1 for i in phones]
-
- phones = [post_replace_ph(i) for i in phones]
- return phones, tones, word2ph
-
-
-if __name__ == "__main__":
- # print(get_dict())
- # print(eng_word_to_phoneme("hello"))
- print(g2p("In this paper, we propose 1 DSPGAN, a GAN-based universal vocoder."))
- # all_phones = set()
- # for k, syllables in eng_dict.items():
- # for group in syllables:
- # for ph in group:
- # all_phones.add(ph)
- # print(all_phones)
diff --git a/spaces/Alex89912/ai-code-v1/README.md b/spaces/Alex89912/ai-code-v1/README.md
deleted file mode 100644
index ed5a2f5040d37bb53b26e670120598bf0df5ec97..0000000000000000000000000000000000000000
--- a/spaces/Alex89912/ai-code-v1/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: CodeGenerator-v1
-emoji: 💻
-colorFrom: red
-colorTo: pink
-sdk: gradio
-sdk_version: 3.41.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/AlexZou/Deploy_Restoration/Dehazing.py b/spaces/AlexZou/Deploy_Restoration/Dehazing.py
deleted file mode 100644
index 8a97d7068c0996127fa3ba391555092344813248..0000000000000000000000000000000000000000
--- a/spaces/AlexZou/Deploy_Restoration/Dehazing.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import os
-import torch
-import numpy as np
-from torchvision import transforms
-from PIL import Image
-import time
-import torchvision
-import cv2
-import torchvision.utils as tvu
-import torch.functional as F
-import argparse
-
-def inference_img(haze_path,Net):
-
- haze_image = Image.open(haze_path).convert('RGB')
- enhance_transforms = transforms.Compose([
- transforms.Resize((400,400)),
- transforms.ToTensor()
- ])
-
- print(haze_image.size)
- with torch.no_grad():
- haze_image = enhance_transforms(haze_image)
- #print(haze_image)
- haze_image = haze_image.unsqueeze(0)
- start = time.time()
- restored2 = Net(haze_image)
- end = time.time()
-
-
- return restored2,end-start
-
-if __name__ == '__main__':
- parser=argparse.ArgumentParser()
- parser.add_argument('--test_path',type=str,required=True,help='Path to test')
- parser.add_argument('--save_path',type=str,required=True,help='Path to save')
- parser.add_argument('--pk_path',type=str,default='model_zoo/Haze4k.tjm',help='Path of the checkpoint')
- opt = parser.parse_args()
- if not os.path.isdir(opt.save_path):
- os.mkdir(opt.save_path)
- Net=torch.jit.load(opt.pk_path,map_location=torch.device('cpu')).eval()
- image = opt.test_path
- print(image)
- restored2,time_num = inference_img(image,Net)
- torchvision.utils.save_image(restored2,opt.save_path+'output.png')
diff --git a/spaces/Alfasign/remove-background-on-image/README.md b/spaces/Alfasign/remove-background-on-image/README.md
deleted file mode 100644
index 7ed13f01daba6737940704a2c9b13ba190a82f1e..0000000000000000000000000000000000000000
--- a/spaces/Alfasign/remove-background-on-image/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Remove Background
-emoji: 🌖
-colorFrom: purple
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.40.1
-app_file: app.py
-pinned: false
-duplicated_from: openskyml/remove-background-on-image
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/Amitesh007/elevenlabs-stt/app.py b/spaces/Amitesh007/elevenlabs-stt/app.py
deleted file mode 100644
index 48e3cd79844310154af1b24afee47027b38234b5..0000000000000000000000000000000000000000
--- a/spaces/Amitesh007/elevenlabs-stt/app.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import streamlit as st
-import numpy as np
-from elevenlabs import voices, generate, set_api_key, UnauthenticatedRateLimitError
-
-def pad_buffer(audio):
- # Pad buffer to multiple of 2 bytes
- buffer_size = len(audio)
- element_size = np.dtype(np.int16).itemsize
- if buffer_size % element_size != 0:
- audio = audio + b'\0' * (element_size - (buffer_size % element_size))
- return audio
-
-def generate_voice(text, voice_name, model_name):
- audio = generate(
- text[:250], # Limit to 250 characters
- voice=voice_name,
- model=model_name
- )
- audio_data = np.frombuffer(pad_buffer(audio), dtype=np.int16)
- audio_bytes = audio_data.tobytes()
- return audio_bytes
-
-st.title("🎤 World's most advanced Text-to-Speech")
-
-description = """
-A demo of the world's most advanced TTS systems, made by [ElevenLabs](https://elevenlabs.io). Eleven Monolingual is designed to generate highly realistic voices in English, where Eleven Multilingual is a single model supporting multiple languages including English, German, Polish, Spanish, Italian, French, Portuguese, and Hindi. Sign up on [ElevenLabs](https://elevenlabs.io) to get fast access, long-form generation, voice cloning, API keys, and more!
-credit goes to "1little coder"
-"""
-
-
-st.markdown(description)
-
-input_text = st.text_area(
- "Input Text (250 characters max)",
- value="Hahaha OHH MY GOD! This is SOOO funny, I-I am Eleven a text-to-speech system!",
- max_chars=250
-)
-
-all_voices = voices()
-input_voice = st.selectbox(
- "Voice",
- options=[voice.name for voice in all_voices],
- index=0
-)
-
-input_model = st.radio(
- "Model",
- options=["eleven_monolingual_v1", "eleven_multilingual_v1"],
- index=0
-)
-
-if st.button("Generate Voice"):
- try:
- audio = generate_voice(input_text, input_voice, input_model)
- st.audio(audio, format='audio/wav')
- except UnauthenticatedRateLimitError:
- st.error("Thanks for trying out ElevenLabs TTS! You've reached the free tier limit. Please provide an API key to continue.")
- except Exception as e:
- st.error(str(e))
\ No newline at end of file
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py
deleted file mode 100644
index 4e03e23fc1284419e57d6922ed77e6bf85e57212..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/multi_subject_dreambooth/train_multi_subject_dreambooth.py
+++ /dev/null
@@ -1,1185 +0,0 @@
-import argparse
-import hashlib
-import itertools
-import json
-import logging
-import math
-import uuid
-import warnings
-from os import environ, listdir, makedirs
-from os.path import basename, join
-from pathlib import Path
-from typing import List
-
-import datasets
-import numpy as np
-import torch
-import torch.nn.functional as F
-import torch.utils.checkpoint
-import transformers
-from accelerate import Accelerator
-from accelerate.logging import get_logger
-from accelerate.utils import ProjectConfiguration, set_seed
-from huggingface_hub import create_repo, upload_folder
-from PIL import Image
-from torch import dtype
-from torch.nn import Module
-from torch.utils.data import Dataset
-from torchvision import transforms
-from tqdm.auto import tqdm
-from transformers import AutoTokenizer, PretrainedConfig
-
-import diffusers
-from diffusers import (
- AutoencoderKL,
- DDPMScheduler,
- DiffusionPipeline,
- DPMSolverMultistepScheduler,
- UNet2DConditionModel,
-)
-from diffusers.optimization import get_scheduler
-from diffusers.utils import check_min_version, is_wandb_available
-from diffusers.utils.import_utils import is_xformers_available
-
-
-if is_wandb_available():
- import wandb
-
-# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.13.0.dev0")
-
-logger = get_logger(__name__)
-
-
-def log_validation_images_to_tracker(
- images: List[np.array], label: str, validation_prompt: str, accelerator: Accelerator, epoch: int
-):
- logger.info(f"Logging images to tracker for validation prompt: {validation_prompt}.")
-
- for tracker in accelerator.trackers:
- if tracker.name == "tensorboard":
- np_images = np.stack([np.asarray(img) for img in images])
- tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
- if tracker.name == "wandb":
- tracker.log(
- {
- "validation": [
- wandb.Image(image, caption=f"{label}_{epoch}_{i}: {validation_prompt}")
- for i, image in enumerate(images)
- ]
- }
- )
-
-
-# TODO: Add `prompt_embeds` and `negative_prompt_embeds` parameters to the function when `pre_compute_text_embeddings`
-# argument is implemented.
-def generate_validation_images(
- text_encoder: Module,
- tokenizer: Module,
- unet: Module,
- vae: Module,
- arguments: argparse.Namespace,
- accelerator: Accelerator,
- weight_dtype: dtype,
-):
- logger.info("Running validation images.")
-
- pipeline_args = {}
-
- if text_encoder is not None:
- pipeline_args["text_encoder"] = accelerator.unwrap_model(text_encoder)
-
- if vae is not None:
- pipeline_args["vae"] = vae
-
- # create pipeline (note: unet and vae are loaded again in float32)
- pipeline = DiffusionPipeline.from_pretrained(
- arguments.pretrained_model_name_or_path,
- tokenizer=tokenizer,
- unet=accelerator.unwrap_model(unet),
- revision=arguments.revision,
- torch_dtype=weight_dtype,
- **pipeline_args,
- )
-
- # We train on the simplified learning objective. If we were previously predicting a variance, we need the
- # scheduler to ignore it
- scheduler_args = {}
-
- if "variance_type" in pipeline.scheduler.config:
- variance_type = pipeline.scheduler.config.variance_type
-
- if variance_type in ["learned", "learned_range"]:
- variance_type = "fixed_small"
-
- scheduler_args["variance_type"] = variance_type
-
- pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args)
- pipeline = pipeline.to(accelerator.device)
- pipeline.set_progress_bar_config(disable=True)
-
- generator = (
- None if arguments.seed is None else torch.Generator(device=accelerator.device).manual_seed(arguments.seed)
- )
-
- images_sets = []
- for vp, nvi, vnp, vis, vgs in zip(
- arguments.validation_prompt,
- arguments.validation_number_images,
- arguments.validation_negative_prompt,
- arguments.validation_inference_steps,
- arguments.validation_guidance_scale,
- ):
- images = []
- if vp is not None:
- logger.info(
- f"Generating {nvi} images with prompt: '{vp}', negative prompt: '{vnp}', inference steps: {vis}, "
- f"guidance scale: {vgs}."
- )
-
- pipeline_args = {"prompt": vp, "negative_prompt": vnp, "num_inference_steps": vis, "guidance_scale": vgs}
-
- # run inference
- # TODO: it would be good to measure whether it's faster to run inference on all images at once, one at a
- # time or in small batches
- for _ in range(nvi):
- with torch.autocast("cuda"):
- image = pipeline(**pipeline_args, num_images_per_prompt=1, generator=generator).images[0]
- images.append(image)
-
- images_sets.append(images)
-
- del pipeline
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
-
- return images_sets
-
-
-def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
- text_encoder_config = PretrainedConfig.from_pretrained(
- pretrained_model_name_or_path,
- subfolder="text_encoder",
- revision=revision,
- )
- model_class = text_encoder_config.architectures[0]
-
- if model_class == "CLIPTextModel":
- from transformers import CLIPTextModel
-
- return CLIPTextModel
- elif model_class == "RobertaSeriesModelWithTransformation":
- from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
-
- return RobertaSeriesModelWithTransformation
- else:
- raise ValueError(f"{model_class} is not supported.")
-
-
-def parse_args(input_args=None):
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
- parser.add_argument(
- "--pretrained_model_name_or_path",
- type=str,
- default=None,
- required=True,
- help="Path to pretrained model or model identifier from huggingface.co/models.",
- )
- parser.add_argument(
- "--revision",
- type=str,
- default=None,
- required=False,
- help="Revision of pretrained model identifier from huggingface.co/models.",
- )
- parser.add_argument(
- "--tokenizer_name",
- type=str,
- default=None,
- help="Pretrained tokenizer name or path if not the same as model_name",
- )
- parser.add_argument(
- "--instance_data_dir",
- type=str,
- default=None,
- required=False,
- help="A folder containing the training data of instance images.",
- )
- parser.add_argument(
- "--class_data_dir",
- type=str,
- default=None,
- required=False,
- help="A folder containing the training data of class images.",
- )
- parser.add_argument(
- "--instance_prompt",
- type=str,
- default=None,
- required=False,
- help="The prompt with identifier specifying the instance",
- )
- parser.add_argument(
- "--class_prompt",
- type=str,
- default=None,
- help="The prompt to specify images in the same class as provided instance images.",
- )
- parser.add_argument(
- "--with_prior_preservation",
- default=False,
- action="store_true",
- help="Flag to add prior preservation loss.",
- )
- parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
- parser.add_argument(
- "--num_class_images",
- type=int,
- default=100,
- help=(
- "Minimal class images for prior preservation loss. If there are not enough images already present in"
- " class_data_dir, additional images will be sampled with class_prompt."
- ),
- )
- parser.add_argument(
- "--output_dir",
- type=str,
- default="text-inversion-model",
- help="The output directory where the model predictions and checkpoints will be written.",
- )
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
- parser.add_argument(
- "--resolution",
- type=int,
- default=512,
- help=(
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
- " resolution"
- ),
- )
- parser.add_argument(
- "--center_crop",
- default=False,
- action="store_true",
- help=(
- "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
- " cropped. The images will be resized to the resolution first before cropping."
- ),
- )
- parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
- parser.add_argument(
- "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
- )
- parser.add_argument(
- "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
- )
- parser.add_argument("--num_train_epochs", type=int, default=1)
- parser.add_argument(
- "--max_train_steps",
- type=int,
- default=None,
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
- )
- parser.add_argument(
- "--checkpointing_steps",
- type=int,
- default=500,
- help=(
- "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
- " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
- " training using `--resume_from_checkpoint`."
- ),
- )
- parser.add_argument(
- "--checkpoints_total_limit",
- type=int,
- default=None,
- help=(
- "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
- " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
- " for more docs"
- ),
- )
- parser.add_argument(
- "--resume_from_checkpoint",
- type=str,
- default=None,
- help=(
- "Whether training should be resumed from a previous checkpoint. Use a path saved by"
- ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
- ),
- )
- parser.add_argument(
- "--gradient_accumulation_steps",
- type=int,
- default=1,
- help="Number of updates steps to accumulate before performing a backward/update pass.",
- )
- parser.add_argument(
- "--gradient_checkpointing",
- action="store_true",
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
- )
- parser.add_argument(
- "--learning_rate",
- type=float,
- default=5e-6,
- help="Initial learning rate (after the potential warmup period) to use.",
- )
- parser.add_argument(
- "--scale_lr",
- action="store_true",
- default=False,
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
- )
- parser.add_argument(
- "--lr_scheduler",
- type=str,
- default="constant",
- help=(
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
- ' "constant", "constant_with_warmup"]'
- ),
- )
- parser.add_argument(
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
- )
- parser.add_argument(
- "--lr_num_cycles",
- type=int,
- default=1,
- help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
- )
- parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
- parser.add_argument(
- "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
- )
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
- parser.add_argument(
- "--hub_model_id",
- type=str,
- default=None,
- help="The name of the repository to keep in sync with the local `output_dir`.",
- )
- parser.add_argument(
- "--logging_dir",
- type=str,
- default="logs",
- help=(
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
- ),
- )
- parser.add_argument(
- "--allow_tf32",
- action="store_true",
- help=(
- "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
- " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
- ),
- )
- parser.add_argument(
- "--report_to",
- type=str,
- default="tensorboard",
- help=(
- 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
- ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
- ),
- )
- parser.add_argument(
- "--validation_steps",
- type=int,
- default=None,
- help=(
- "Run validation every X steps. Validation consists of running the prompt(s) `validation_prompt` "
- "multiple times (`validation_number_images`) and logging the images."
- ),
- )
- parser.add_argument(
- "--validation_prompt",
- type=str,
- default=None,
- help="A prompt that is used during validation to verify that the model is learning. You can use commas to "
- "define multiple negative prompts. This parameter can be defined also within the file given by "
- "`concepts_list` parameter in the respective subject.",
- )
- parser.add_argument(
- "--validation_number_images",
- type=int,
- default=4,
- help="Number of images that should be generated during validation with the validation parameters given. This "
- "can be defined within the file given by `concepts_list` parameter in the respective subject.",
- )
- parser.add_argument(
- "--validation_negative_prompt",
- type=str,
- default=None,
- help="A negative prompt that is used during validation to verify that the model is learning. You can use commas"
- " to define multiple negative prompts, each one corresponding to a validation prompt. This parameter can "
- "be defined also within the file given by `concepts_list` parameter in the respective subject.",
- )
- parser.add_argument(
- "--validation_inference_steps",
- type=int,
- default=25,
- help="Number of inference steps (denoising steps) to run during validation. This can be defined within the "
- "file given by `concepts_list` parameter in the respective subject.",
- )
- parser.add_argument(
- "--validation_guidance_scale",
- type=float,
- default=7.5,
- help="To control how much the image generation process follows the text prompt. This can be defined within the "
- "file given by `concepts_list` parameter in the respective subject.",
- )
- parser.add_argument(
- "--mixed_precision",
- type=str,
- default=None,
- choices=["no", "fp16", "bf16"],
- help=(
- "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
- " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
- " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
- ),
- )
- parser.add_argument(
- "--prior_generation_precision",
- type=str,
- default=None,
- choices=["no", "fp32", "fp16", "bf16"],
- help=(
- "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
- " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
- ),
- )
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
- parser.add_argument(
- "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
- )
- parser.add_argument(
- "--set_grads_to_none",
- action="store_true",
- help=(
- "Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain"
- " behaviors, so disable this argument if it causes any problems. More info:"
- " https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html"
- ),
- )
- parser.add_argument(
- "--concepts_list",
- type=str,
- default=None,
- help="Path to json file containing a list of multiple concepts, will overwrite parameters like instance_prompt,"
- " class_prompt, etc.",
- )
-
- if input_args:
- args = parser.parse_args(input_args)
- else:
- args = parser.parse_args()
-
- if not args.concepts_list and (not args.instance_data_dir or not args.instance_prompt):
- raise ValueError(
- "You must specify either instance parameters (data directory, prompt, etc.) or use "
- "the `concept_list` parameter and specify them within the file."
- )
-
- if args.concepts_list:
- if args.instance_prompt:
- raise ValueError("If you are using `concepts_list` parameter, define the instance prompt within the file.")
- if args.instance_data_dir:
- raise ValueError(
- "If you are using `concepts_list` parameter, define the instance data directory within the file."
- )
- if args.validation_steps and (args.validation_prompt or args.validation_negative_prompt):
- raise ValueError(
- "If you are using `concepts_list` parameter, define validation parameters for "
- "each subject within the file:\n - `validation_prompt`."
- "\n - `validation_negative_prompt`.\n - `validation_guidance_scale`."
- "\n - `validation_number_images`.\n - `validation_prompt`."
- "\n - `validation_inference_steps`.\nThe `validation_steps` parameter is the only one "
- "that needs to be defined outside the file."
- )
-
- env_local_rank = int(environ.get("LOCAL_RANK", -1))
- if env_local_rank != -1 and env_local_rank != args.local_rank:
- args.local_rank = env_local_rank
-
- if args.with_prior_preservation:
- if not args.concepts_list:
- if not args.class_data_dir:
- raise ValueError("You must specify a data directory for class images.")
- if not args.class_prompt:
- raise ValueError("You must specify prompt for class images.")
- else:
- if args.class_data_dir:
- raise ValueError(
- "If you are using `concepts_list` parameter, define the class data directory within the file."
- )
- if args.class_prompt:
- raise ValueError(
- "If you are using `concepts_list` parameter, define the class prompt within the file."
- )
- else:
- # logger is not available yet
- if not args.class_data_dir:
- warnings.warn(
- "Ignoring `class_data_dir` parameter, you need to use it together with `with_prior_preservation`."
- )
- if not args.class_prompt:
- warnings.warn(
- "Ignoring `class_prompt` parameter, you need to use it together with `with_prior_preservation`."
- )
-
- return args
-
-
-class DreamBoothDataset(Dataset):
- """
- A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
- It pre-processes the images and then tokenizes prompts.
- """
-
- def __init__(
- self,
- instance_data_root,
- instance_prompt,
- tokenizer,
- class_data_root=None,
- class_prompt=None,
- size=512,
- center_crop=False,
- ):
- self.size = size
- self.center_crop = center_crop
- self.tokenizer = tokenizer
-
- self.instance_data_root = []
- self.instance_images_path = []
- self.num_instance_images = []
- self.instance_prompt = []
- self.class_data_root = [] if class_data_root is not None else None
- self.class_images_path = []
- self.num_class_images = []
- self.class_prompt = []
- self._length = 0
-
- for i in range(len(instance_data_root)):
- self.instance_data_root.append(Path(instance_data_root[i]))
- if not self.instance_data_root[i].exists():
- raise ValueError("Instance images root doesn't exists.")
-
- self.instance_images_path.append(list(Path(instance_data_root[i]).iterdir()))
- self.num_instance_images.append(len(self.instance_images_path[i]))
- self.instance_prompt.append(instance_prompt[i])
- self._length += self.num_instance_images[i]
-
- if class_data_root is not None:
- self.class_data_root.append(Path(class_data_root[i]))
- self.class_data_root[i].mkdir(parents=True, exist_ok=True)
- self.class_images_path.append(list(self.class_data_root[i].iterdir()))
- self.num_class_images.append(len(self.class_images_path))
- if self.num_class_images[i] > self.num_instance_images[i]:
- self._length -= self.num_instance_images[i]
- self._length += self.num_class_images[i]
- self.class_prompt.append(class_prompt[i])
-
- self.image_transforms = transforms.Compose(
- [
- transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
- transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
- transforms.ToTensor(),
- transforms.Normalize([0.5], [0.5]),
- ]
- )
-
- def __len__(self):
- return self._length
-
- def __getitem__(self, index):
- example = {}
- for i in range(len(self.instance_images_path)):
- instance_image = Image.open(self.instance_images_path[i][index % self.num_instance_images[i]])
- if not instance_image.mode == "RGB":
- instance_image = instance_image.convert("RGB")
- example[f"instance_images_{i}"] = self.image_transforms(instance_image)
- example[f"instance_prompt_ids_{i}"] = self.tokenizer(
- self.instance_prompt[i],
- truncation=True,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- return_tensors="pt",
- ).input_ids
-
- if self.class_data_root:
- for i in range(len(self.class_data_root)):
- class_image = Image.open(self.class_images_path[i][index % self.num_class_images[i]])
- if not class_image.mode == "RGB":
- class_image = class_image.convert("RGB")
- example[f"class_images_{i}"] = self.image_transforms(class_image)
- example[f"class_prompt_ids_{i}"] = self.tokenizer(
- self.class_prompt[i],
- truncation=True,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- return_tensors="pt",
- ).input_ids
-
- return example
-
-
-def collate_fn(num_instances, examples, with_prior_preservation=False):
- input_ids = []
- pixel_values = []
-
- for i in range(num_instances):
- input_ids += [example[f"instance_prompt_ids_{i}"] for example in examples]
- pixel_values += [example[f"instance_images_{i}"] for example in examples]
-
- # Concat class and instance examples for prior preservation.
- # We do this to avoid doing two forward passes.
- if with_prior_preservation:
- for i in range(num_instances):
- input_ids += [example[f"class_prompt_ids_{i}"] for example in examples]
- pixel_values += [example[f"class_images_{i}"] for example in examples]
-
- pixel_values = torch.stack(pixel_values)
- pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
-
- input_ids = torch.cat(input_ids, dim=0)
-
- batch = {
- "input_ids": input_ids,
- "pixel_values": pixel_values,
- }
- return batch
-
-
-class PromptDataset(Dataset):
- """A simple dataset to prepare the prompts to generate class images on multiple GPUs."""
-
- def __init__(self, prompt, num_samples):
- self.prompt = prompt
- self.num_samples = num_samples
-
- def __len__(self):
- return self.num_samples
-
- def __getitem__(self, index):
- example = {}
- example["prompt"] = self.prompt
- example["index"] = index
- return example
-
-
-def main(args):
- logging_dir = Path(args.output_dir, args.logging_dir)
- accelerator_project_config = ProjectConfiguration(
- total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
- )
- accelerator = Accelerator(
- gradient_accumulation_steps=args.gradient_accumulation_steps,
- mixed_precision=args.mixed_precision,
- log_with=args.report_to,
- project_config=accelerator_project_config,
- )
-
- if args.report_to == "wandb":
- if not is_wandb_available():
- raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
-
- # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
- # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
- # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
- if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
- raise ValueError(
- "Gradient accumulation is not supported when training the text encoder in distributed training. "
- "Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
- )
-
- instance_data_dir = []
- instance_prompt = []
- class_data_dir = [] if args.with_prior_preservation else None
- class_prompt = [] if args.with_prior_preservation else None
- if args.concepts_list:
- with open(args.concepts_list, "r") as f:
- concepts_list = json.load(f)
-
- if args.validation_steps:
- args.validation_prompt = []
- args.validation_number_images = []
- args.validation_negative_prompt = []
- args.validation_inference_steps = []
- args.validation_guidance_scale = []
-
- for concept in concepts_list:
- instance_data_dir.append(concept["instance_data_dir"])
- instance_prompt.append(concept["instance_prompt"])
-
- if args.with_prior_preservation:
- try:
- class_data_dir.append(concept["class_data_dir"])
- class_prompt.append(concept["class_prompt"])
- except KeyError:
- raise KeyError(
- "`class_data_dir` or `class_prompt` not found in concepts_list while using "
- "`with_prior_preservation`."
- )
- else:
- if "class_data_dir" in concept:
- warnings.warn(
- "Ignoring `class_data_dir` key, to use it you need to enable `with_prior_preservation`."
- )
- if "class_prompt" in concept:
- warnings.warn(
- "Ignoring `class_prompt` key, to use it you need to enable `with_prior_preservation`."
- )
-
- if args.validation_steps:
- args.validation_prompt.append(concept.get("validation_prompt", None))
- args.validation_number_images.append(concept.get("validation_number_images", 4))
- args.validation_negative_prompt.append(concept.get("validation_negative_prompt", None))
- args.validation_inference_steps.append(concept.get("validation_inference_steps", 25))
- args.validation_guidance_scale.append(concept.get("validation_guidance_scale", 7.5))
- else:
- # Parse instance and class inputs, and double check that lengths match
- instance_data_dir = args.instance_data_dir.split(",")
- instance_prompt = args.instance_prompt.split(",")
- assert all(
- x == len(instance_data_dir) for x in [len(instance_data_dir), len(instance_prompt)]
- ), "Instance data dir and prompt inputs are not of the same length."
-
- if args.with_prior_preservation:
- class_data_dir = args.class_data_dir.split(",")
- class_prompt = args.class_prompt.split(",")
- assert all(
- x == len(instance_data_dir)
- for x in [len(instance_data_dir), len(instance_prompt), len(class_data_dir), len(class_prompt)]
- ), "Instance & class data dir or prompt inputs are not of the same length."
-
- if args.validation_steps:
- validation_prompts = args.validation_prompt.split(",")
- num_of_validation_prompts = len(validation_prompts)
- args.validation_prompt = validation_prompts
- args.validation_number_images = [args.validation_number_images] * num_of_validation_prompts
-
- negative_validation_prompts = [None] * num_of_validation_prompts
- if args.validation_negative_prompt:
- negative_validation_prompts = args.validation_negative_prompt.split(",")
- while len(negative_validation_prompts) < num_of_validation_prompts:
- negative_validation_prompts.append(None)
- args.validation_negative_prompt = negative_validation_prompts
-
- assert num_of_validation_prompts == len(
- negative_validation_prompts
- ), "The length of negative prompts for validation is greater than the number of validation prompts."
- args.validation_inference_steps = [args.validation_inference_steps] * num_of_validation_prompts
- args.validation_guidance_scale = [args.validation_guidance_scale] * num_of_validation_prompts
-
- # Make one log on every process with the configuration for debugging.
- logging.basicConfig(
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
- datefmt="%m/%d/%Y %H:%M:%S",
- level=logging.INFO,
- )
- logger.info(accelerator.state, main_process_only=False)
- if accelerator.is_local_main_process:
- datasets.utils.logging.set_verbosity_warning()
- transformers.utils.logging.set_verbosity_warning()
- diffusers.utils.logging.set_verbosity_info()
- else:
- datasets.utils.logging.set_verbosity_error()
- transformers.utils.logging.set_verbosity_error()
- diffusers.utils.logging.set_verbosity_error()
-
- # If passed along, set the training seed now.
- if args.seed is not None:
- set_seed(args.seed)
-
- # Generate class images if prior preservation is enabled.
- if args.with_prior_preservation:
- for i in range(len(class_data_dir)):
- class_images_dir = Path(class_data_dir[i])
- if not class_images_dir.exists():
- class_images_dir.mkdir(parents=True)
- cur_class_images = len(list(class_images_dir.iterdir()))
-
- if cur_class_images < args.num_class_images:
- torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
- if args.prior_generation_precision == "fp32":
- torch_dtype = torch.float32
- elif args.prior_generation_precision == "fp16":
- torch_dtype = torch.float16
- elif args.prior_generation_precision == "bf16":
- torch_dtype = torch.bfloat16
- pipeline = DiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- torch_dtype=torch_dtype,
- safety_checker=None,
- revision=args.revision,
- )
- pipeline.set_progress_bar_config(disable=True)
-
- num_new_images = args.num_class_images - cur_class_images
- logger.info(f"Number of class images to sample: {num_new_images}.")
-
- sample_dataset = PromptDataset(class_prompt[i], num_new_images)
- sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
-
- sample_dataloader = accelerator.prepare(sample_dataloader)
- pipeline.to(accelerator.device)
-
- for example in tqdm(
- sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
- ):
- images = pipeline(example["prompt"]).images
-
- for ii, image in enumerate(images):
- hash_image = hashlib.sha1(image.tobytes()).hexdigest()
- image_filename = (
- class_images_dir / f"{example['index'][ii] + cur_class_images}-{hash_image}.jpg"
- )
- image.save(image_filename)
-
- # Clean up the memory deleting one-time-use variables.
- del pipeline
- del sample_dataloader
- del sample_dataset
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
-
- # Handle the repository creation
- if accelerator.is_main_process:
- if args.output_dir is not None:
- makedirs(args.output_dir, exist_ok=True)
-
- if args.push_to_hub:
- repo_id = create_repo(
- repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
- ).repo_id
-
- # Load the tokenizer
- tokenizer = None
- if args.tokenizer_name:
- tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False)
- elif args.pretrained_model_name_or_path:
- tokenizer = AutoTokenizer.from_pretrained(
- args.pretrained_model_name_or_path,
- subfolder="tokenizer",
- revision=args.revision,
- use_fast=False,
- )
-
- # import correct text encoder class
- text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
-
- # Load scheduler and models
- noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
- text_encoder = text_encoder_cls.from_pretrained(
- args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
- )
- vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
- unet = UNet2DConditionModel.from_pretrained(
- args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
- )
-
- vae.requires_grad_(False)
- if not args.train_text_encoder:
- text_encoder.requires_grad_(False)
-
- if args.enable_xformers_memory_efficient_attention:
- if is_xformers_available():
- unet.enable_xformers_memory_efficient_attention()
- else:
- raise ValueError("xformers is not available. Make sure it is installed correctly")
-
- if args.gradient_checkpointing:
- unet.enable_gradient_checkpointing()
- if args.train_text_encoder:
- text_encoder.gradient_checkpointing_enable()
-
- # Enable TF32 for faster training on Ampere GPUs,
- # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
- if args.allow_tf32:
- torch.backends.cuda.matmul.allow_tf32 = True
-
- if args.scale_lr:
- args.learning_rate = (
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
- )
-
- # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
- if args.use_8bit_adam:
- try:
- import bitsandbytes as bnb
- except ImportError:
- raise ImportError(
- "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
- )
-
- optimizer_class = bnb.optim.AdamW8bit
- else:
- optimizer_class = torch.optim.AdamW
-
- # Optimizer creation
- params_to_optimize = (
- itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters()
- )
- optimizer = optimizer_class(
- params_to_optimize,
- lr=args.learning_rate,
- betas=(args.adam_beta1, args.adam_beta2),
- weight_decay=args.adam_weight_decay,
- eps=args.adam_epsilon,
- )
-
- # Dataset and DataLoaders creation:
- train_dataset = DreamBoothDataset(
- instance_data_root=instance_data_dir,
- instance_prompt=instance_prompt,
- class_data_root=class_data_dir,
- class_prompt=class_prompt,
- tokenizer=tokenizer,
- size=args.resolution,
- center_crop=args.center_crop,
- )
-
- train_dataloader = torch.utils.data.DataLoader(
- train_dataset,
- batch_size=args.train_batch_size,
- shuffle=True,
- collate_fn=lambda examples: collate_fn(len(instance_data_dir), examples, args.with_prior_preservation),
- num_workers=1,
- )
-
- # Scheduler and math around the number of training steps.
- overrode_max_train_steps = False
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
- if args.max_train_steps is None:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- overrode_max_train_steps = True
-
- lr_scheduler = get_scheduler(
- args.lr_scheduler,
- optimizer=optimizer,
- num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
- num_training_steps=args.max_train_steps * accelerator.num_processes,
- num_cycles=args.lr_num_cycles,
- power=args.lr_power,
- )
-
- # Prepare everything with our `accelerator`.
- if args.train_text_encoder:
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler
- )
- else:
- unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
- unet, optimizer, train_dataloader, lr_scheduler
- )
-
- # For mixed precision training we cast the text_encoder and vae weights to half-precision
- # as these models are only used for inference, keeping weights in full precision is not required.
- weight_dtype = torch.float32
- if accelerator.mixed_precision == "fp16":
- weight_dtype = torch.float16
- elif accelerator.mixed_precision == "bf16":
- weight_dtype = torch.bfloat16
-
- # Move vae and text_encoder to device and cast to weight_dtype
- vae.to(accelerator.device, dtype=weight_dtype)
- if not args.train_text_encoder:
- text_encoder.to(accelerator.device, dtype=weight_dtype)
-
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
- if overrode_max_train_steps:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- # Afterwards we recalculate our number of training epochs
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
-
- # We need to initialize the trackers we use, and also store our configuration.
- # The trackers initialize automatically on the main process.
- if accelerator.is_main_process:
- accelerator.init_trackers("dreambooth", config=vars(args))
-
- # Train!
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
-
- logger.info("***** Running training *****")
- logger.info(f" Num examples = {len(train_dataset)}")
- logger.info(f" Num batches each epoch = {len(train_dataloader)}")
- logger.info(f" Num Epochs = {args.num_train_epochs}")
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
- logger.info(f" Total optimization steps = {args.max_train_steps}")
- global_step = 0
- first_epoch = 0
-
- # Potentially load in the weights and states from a previous save
- if args.resume_from_checkpoint:
- if args.resume_from_checkpoint != "latest":
- path = basename(args.resume_from_checkpoint)
- else:
- # Get the mos recent checkpoint
- dirs = listdir(args.output_dir)
- dirs = [d for d in dirs if d.startswith("checkpoint")]
- dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
- path = dirs[-1] if len(dirs) > 0 else None
-
- if path is None:
- accelerator.print(
- f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
- )
- args.resume_from_checkpoint = None
- else:
- accelerator.print(f"Resuming from checkpoint {path}")
- accelerator.load_state(join(args.output_dir, path))
- global_step = int(path.split("-")[1])
-
- resume_global_step = global_step * args.gradient_accumulation_steps
- first_epoch = global_step // num_update_steps_per_epoch
- resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
-
- # Only show the progress bar once on each machine.
- progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
- progress_bar.set_description("Steps")
-
- for epoch in range(first_epoch, args.num_train_epochs):
- unet.train()
- if args.train_text_encoder:
- text_encoder.train()
- for step, batch in enumerate(train_dataloader):
- # Skip steps until we reach the resumed step
- if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
- if step % args.gradient_accumulation_steps == 0:
- progress_bar.update(1)
- continue
-
- with accelerator.accumulate(unet):
- # Convert images to latent space
- latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
- latents = latents * vae.config.scaling_factor
-
- # Sample noise that we'll add to the latents
- noise = torch.randn_like(latents)
- bsz = latents.shape[0]
- # Sample a random timestep for each image
- time_steps = torch.randint(
- 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device
- )
- time_steps = time_steps.long()
-
- # Add noise to the latents according to the noise magnitude at each timestep
- # (this is the forward diffusion process)
- noisy_latents = noise_scheduler.add_noise(latents, noise, time_steps)
-
- # Get the text embedding for conditioning
- encoder_hidden_states = text_encoder(batch["input_ids"])[0]
-
- # Predict the noise residual
- model_pred = unet(noisy_latents, time_steps, encoder_hidden_states).sample
-
- # Get the target for loss depending on the prediction type
- if noise_scheduler.config.prediction_type == "epsilon":
- target = noise
- elif noise_scheduler.config.prediction_type == "v_prediction":
- target = noise_scheduler.get_velocity(latents, noise, time_steps)
- else:
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
-
- if args.with_prior_preservation:
- # Chunk the noise and model_pred into two parts and compute the loss on each part separately.
- model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
- target, target_prior = torch.chunk(target, 2, dim=0)
-
- # Compute instance loss
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
-
- # Compute prior loss
- prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
-
- # Add the prior loss to the instance loss.
- loss = loss + args.prior_loss_weight * prior_loss
- else:
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
-
- accelerator.backward(loss)
- if accelerator.sync_gradients:
- params_to_clip = (
- itertools.chain(unet.parameters(), text_encoder.parameters())
- if args.train_text_encoder
- else unet.parameters()
- )
- accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
- optimizer.step()
- lr_scheduler.step()
- optimizer.zero_grad(set_to_none=args.set_grads_to_none)
-
- # Checks if the accelerator has performed an optimization step behind the scenes
- if accelerator.sync_gradients:
- progress_bar.update(1)
- global_step += 1
-
- if accelerator.is_main_process:
- if global_step % args.checkpointing_steps == 0:
- save_path = join(args.output_dir, f"checkpoint-{global_step}")
- accelerator.save_state(save_path)
- logger.info(f"Saved state to {save_path}")
-
- if (
- args.validation_steps
- and any(args.validation_prompt)
- and global_step % args.validation_steps == 0
- ):
- images_set = generate_validation_images(
- text_encoder, tokenizer, unet, vae, args, accelerator, weight_dtype
- )
- for images, validation_prompt in zip(images_set, args.validation_prompt):
- if len(images) > 0:
- label = str(uuid.uuid1())[:8] # generate an id for different set of images
- log_validation_images_to_tracker(
- images, label, validation_prompt, accelerator, global_step
- )
-
- logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
- progress_bar.set_postfix(**logs)
- accelerator.log(logs, step=global_step)
-
- if global_step >= args.max_train_steps:
- break
-
- # Create the pipeline using the trained modules and save it.
- accelerator.wait_for_everyone()
- if accelerator.is_main_process:
- pipeline = DiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- revision=args.revision,
- )
- pipeline.save_pretrained(args.output_dir)
-
- if args.push_to_hub:
- upload_folder(
- repo_id=repo_id,
- folder_path=args.output_dir,
- commit_message="End of training",
- ignore_patterns=["step_*", "epoch_*"],
- )
-
- accelerator.end_training()
-
-
-if __name__ == "__main__":
- args = parse_args()
- main(args)
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py
deleted file mode 100644
index d029b5cdd6b3dad09b16a6f2a23e66be684a6412..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py
+++ /dev/null
@@ -1,42 +0,0 @@
-_base_ = './yolov3_d53_mstrain-608_273e_coco.py'
-# dataset settings
-img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile', to_float32=True),
- dict(type='LoadAnnotations', with_bbox=True),
- dict(type='PhotoMetricDistortion'),
- dict(
- type='Expand',
- mean=img_norm_cfg['mean'],
- to_rgb=img_norm_cfg['to_rgb'],
- ratio_range=(1, 2)),
- dict(
- type='MinIoURandomCrop',
- min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
- min_crop_size=0.3),
- dict(type='Resize', img_scale=[(320, 320), (416, 416)], keep_ratio=True),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(416, 416),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img'])
- ])
-]
-data = dict(
- train=dict(pipeline=train_pipeline),
- val=dict(pipeline=test_pipeline),
- test=dict(pipeline=test_pipeline))
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr48_512x512_20k_voc12aug.py b/spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr48_512x512_20k_voc12aug.py
deleted file mode 100644
index a8d1deb98659d05755c6316c2aff2295afb0bb9c..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr48_512x512_20k_voc12aug.py
+++ /dev/null
@@ -1,10 +0,0 @@
-_base_ = './fcn_hr18_512x512_20k_voc12aug.py'
-model = dict(
- pretrained='open-mmlab://msra/hrnetv2_w48',
- backbone=dict(
- extra=dict(
- stage2=dict(num_channels=(48, 96)),
- stage3=dict(num_channels=(48, 96, 192)),
- stage4=dict(num_channels=(48, 96, 192, 384)))),
- decode_head=dict(
- in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x512_160k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x512_160k_ade20k.py
deleted file mode 100644
index 3b3e8af9538e6ce3c929a902e3d1ee5be53469a5..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x512_160k_ade20k.py
+++ /dev/null
@@ -1,39 +0,0 @@
-_base_ = './ocrnet_hr18_512x512_160k_ade20k.py'
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-model = dict(
- pretrained='open-mmlab://msra/hrnetv2_w48',
- backbone=dict(
- extra=dict(
- stage2=dict(num_channels=(48, 96)),
- stage3=dict(num_channels=(48, 96, 192)),
- stage4=dict(num_channels=(48, 96, 192, 384)))),
- decode_head=[
- dict(
- type='FCNHead',
- in_channels=[48, 96, 192, 384],
- channels=sum([48, 96, 192, 384]),
- input_transform='resize_concat',
- in_index=(0, 1, 2, 3),
- kernel_size=1,
- num_convs=1,
- norm_cfg=norm_cfg,
- concat_input=False,
- dropout_ratio=-1,
- num_classes=150,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
- dict(
- type='OCRHead',
- in_channels=[48, 96, 192, 384],
- channels=512,
- ocr_channels=256,
- input_transform='resize_concat',
- in_index=(0, 1, 2, 3),
- norm_cfg=norm_cfg,
- dropout_ratio=-1,
- num_classes=150,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
- ])
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py
deleted file mode 100644
index 86584573a3d1afac73041b85516112ac21f1f17c..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py
+++ /dev/null
@@ -1,6 +0,0 @@
-_base_ = [
- '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/ade20k.py',
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
-]
-model = dict(
- decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/__init__.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/__init__.py
deleted file mode 100644
index a0b6b345640a895368ac8a647afef6f24333d90e..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from .base import LoggerHook
-from .dvclive import DvcliveLoggerHook
-from .mlflow import MlflowLoggerHook
-from .neptune import NeptuneLoggerHook
-from .pavi import PaviLoggerHook
-from .tensorboard import TensorboardLoggerHook
-from .text import TextLoggerHook
-from .wandb import WandbLoggerHook
-
-__all__ = [
- 'LoggerHook', 'MlflowLoggerHook', 'PaviLoggerHook',
- 'TensorboardLoggerHook', 'TextLoggerHook', 'WandbLoggerHook',
- 'NeptuneLoggerHook', 'DvcliveLoggerHook'
-]
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/gradio_scribble2image_interactive.py b/spaces/Anonymous-sub/Rerender/ControlNet/gradio_scribble2image_interactive.py
deleted file mode 100644
index 7308bcc1bb8387bba10c026495e0dcddae91c2db..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/gradio_scribble2image_interactive.py
+++ /dev/null
@@ -1,102 +0,0 @@
-from share import *
-import config
-
-import cv2
-import einops
-import gradio as gr
-import numpy as np
-import torch
-import random
-
-from pytorch_lightning import seed_everything
-from annotator.util import resize_image, HWC3
-from cldm.model import create_model, load_state_dict
-from cldm.ddim_hacked import DDIMSampler
-
-
-model = create_model('./models/cldm_v15.yaml').cpu()
-model.load_state_dict(load_state_dict('./models/control_sd15_scribble.pth', location='cuda'))
-model = model.cuda()
-ddim_sampler = DDIMSampler(model)
-
-
-def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode, strength, scale, seed, eta):
- with torch.no_grad():
- img = resize_image(HWC3(input_image['mask'][:, :, 0]), image_resolution)
- H, W, C = img.shape
-
- detected_map = np.zeros_like(img, dtype=np.uint8)
- detected_map[np.min(img, axis=2) > 127] = 255
-
- control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
- control = torch.stack([control for _ in range(num_samples)], dim=0)
- control = einops.rearrange(control, 'b h w c -> b c h w').clone()
-
- if seed == -1:
- seed = random.randint(0, 65535)
- seed_everything(seed)
-
- if config.save_memory:
- model.low_vram_shift(is_diffusing=False)
-
- cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
- un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
- shape = (4, H // 8, W // 8)
-
- if config.save_memory:
- model.low_vram_shift(is_diffusing=True)
-
- model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13) # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
- samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
- shape, cond, verbose=False, eta=eta,
- unconditional_guidance_scale=scale,
- unconditional_conditioning=un_cond)
-
- if config.save_memory:
- model.low_vram_shift(is_diffusing=False)
-
- x_samples = model.decode_first_stage(samples)
- x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
-
- results = [x_samples[i] for i in range(num_samples)]
- return [255 - detected_map] + results
-
-
-def create_canvas(w, h):
- return np.zeros(shape=(h, w, 3), dtype=np.uint8) + 255
-
-
-block = gr.Blocks().queue()
-with block:
- with gr.Row():
- gr.Markdown("## Control Stable Diffusion with Interactive Scribbles")
- with gr.Row():
- with gr.Column():
- canvas_width = gr.Slider(label="Canvas Width", minimum=256, maximum=1024, value=512, step=1)
- canvas_height = gr.Slider(label="Canvas Height", minimum=256, maximum=1024, value=512, step=1)
- create_button = gr.Button(label="Start", value='Open drawing canvas!')
- input_image = gr.Image(source='upload', type='numpy', tool='sketch')
- gr.Markdown(value='Do not forget to change your brush width to make it thinner. (Gradio do not allow developers to set brush width so you need to do it manually.) '
- 'Just click on the small pencil icon in the upper right corner of the above block.')
- create_button.click(fn=create_canvas, inputs=[canvas_width, canvas_height], outputs=[input_image])
- prompt = gr.Textbox(label="Prompt")
- run_button = gr.Button(label="Run")
- with gr.Accordion("Advanced options", open=False):
- num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
- image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=64)
- strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
- guess_mode = gr.Checkbox(label='Guess Mode', value=False)
- ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
- scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
- seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
- eta = gr.Number(label="eta (DDIM)", value=0.0)
- a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed')
- n_prompt = gr.Textbox(label="Negative Prompt",
- value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
- with gr.Column():
- result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
- ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode, strength, scale, seed, eta]
- run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
-
-
-block.launch(server_name='0.0.0.0')
diff --git a/spaces/ArtyomKhyan/Detection/utils/utils.py b/spaces/ArtyomKhyan/Detection/utils/utils.py
deleted file mode 100644
index 249707680b9414f6c18b37b6a01470d5adff6eca..0000000000000000000000000000000000000000
--- a/spaces/ArtyomKhyan/Detection/utils/utils.py
+++ /dev/null
@@ -1,1200 +0,0 @@
-import glob
-import math
-import os
-import random
-import shutil
-import subprocess
-import time
-from copy import copy
-from pathlib import Path
-from sys import platform
-
-import cv2
-import matplotlib
-import matplotlib.pyplot as plt
-import numpy as np
-import torch
-import torch.nn as nn
-import torchvision
-import yaml
-from scipy.signal import butter, filtfilt
-from tqdm import tqdm
-
-from . import torch_utils # torch_utils, google_utils
-
-# Set printoptions
-torch.set_printoptions(linewidth=320, precision=5, profile='long')
-np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
-matplotlib.rc('font', **{'size': 11})
-
-# Prevent OpenCV from multithreading (to use PyTorch DataLoader)
-cv2.setNumThreads(0)
-
-
-def init_seeds(seed=0):
- random.seed(seed)
- np.random.seed(seed)
- torch_utils.init_seeds(seed=seed)
-
-
-def check_git_status():
- # Suggest 'git pull' if repo is out of date
- if platform in ['linux', 'darwin']:
- s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
- if 'Your branch is behind' in s:
- print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
-
-
-def check_img_size(img_size, s=32):
- # Verify img_size is a multiple of stride s
- new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
- if new_size != img_size:
- print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
- return new_size
-
-
-def check_anchors(dataset, model, thr=4.0, imgsz=640):
- # Check anchor fit to data, recompute if necessary
- print('\nAnalyzing anchors... ', end='')
- m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
- shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
- scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
- wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
-
- def metric(k): # compute metric
- r = wh[:, None] / k[None]
- x = torch.min(r, 1. / r).min(2)[0] # ratio metric
- best = x.max(1)[0] # best_x
- return (best > 1. / thr).float().mean() # best possible recall
-
- bpr = metric(m.anchor_grid.clone().cpu().view(-1, 2))
- print('Best Possible Recall (BPR) = %.4f' % bpr, end='')
- if bpr < 0.99: # threshold to recompute
- print('. Attempting to generate improved anchors, please wait...' % bpr)
- na = m.anchor_grid.numel() // 2 # number of anchors
- new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
- new_bpr = metric(new_anchors.reshape(-1, 2))
- if new_bpr > bpr: # replace anchors
- new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors)
- m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference
- m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
- check_anchor_order(m)
- print('New anchors saved to model. Update model *.yaml to use these anchors in the future.')
- else:
- print('Original anchors better than new anchors. Proceeding with original anchors.')
- print('') # newline
-
-
-def check_anchor_order(m):
- # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
- a = m.anchor_grid.prod(-1).view(-1) # anchor area
- da = a[-1] - a[0] # delta a
- ds = m.stride[-1] - m.stride[0] # delta s
- if da.sign() != ds.sign(): # same order
- m.anchors[:] = m.anchors.flip(0)
- m.anchor_grid[:] = m.anchor_grid.flip(0)
-
-
-def check_file(file):
- # Searches for file if not found locally
- if os.path.isfile(file):
- return file
- else:
- files = glob.glob('./**/' + file, recursive=True) # find file
- assert len(files), 'File Not Found: %s' % file # assert file was found
- return files[0] # return first file if multiple found
-
-
-def make_divisible(x, divisor):
- # Returns x evenly divisble by divisor
- return math.ceil(x / divisor) * divisor
-
-
-def labels_to_class_weights(labels, nc=80):
- # Get class weights (inverse frequency) from training labels
- if labels[0] is None: # no labels loaded
- return torch.Tensor()
-
- labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
- classes = labels[:, 0].astype(np.int) # labels = [class xywh]
- weights = np.bincount(classes, minlength=nc) # occurences per class
-
- # Prepend gridpoint count (for uCE trianing)
- # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
- # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
-
- weights[weights == 0] = 1 # replace empty bins with 1
- weights = 1 / weights # number of targets per class
- weights /= weights.sum() # normalize
- return torch.from_numpy(weights)
-
-
-def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
- # Produces image weights based on class mAPs
- n = len(labels)
- class_counts = np.array([np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)])
- image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
- # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
- return image_weights
-
-
-def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
- # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
- # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
- # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
- # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
- # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
- x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
- 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
- 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
- return x
-
-
-def xyxy2xywh(x):
- # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
- y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
- y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
- y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
- y[:, 2] = x[:, 2] - x[:, 0] # width
- y[:, 3] = x[:, 3] - x[:, 1] # height
- return y
-
-
-
-
-def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
- # Rescale coords (xyxy) from img1_shape to img0_shape
- if ratio_pad is None: # calculate from img0_shape
- gain = max(img1_shape) / max(img0_shape) # gain = old / new
- pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
- else:
- gain = ratio_pad[0][0]
- pad = ratio_pad[1]
-
- coords[:, [0, 2]] -= pad[0] # x padding
- coords[:, [1, 3]] -= pad[1] # y padding
- coords[:, :4] /= gain
- clip_coords(coords, img0_shape)
- return coords
-
-
-def clip_coords(boxes, img_shape):
- # Clip bounding xyxy bounding boxes to image shape (height, width)
- boxes[:, 0].clamp_(0, img_shape[1]) # x1
- boxes[:, 1].clamp_(0, img_shape[0]) # y1
- boxes[:, 2].clamp_(0, img_shape[1]) # x2
- boxes[:, 3].clamp_(0, img_shape[0]) # y2
-
-
-def ap_per_class(tp, conf, pred_cls, target_cls):
- """ Compute the average precision, given the recall and precision curves.
- Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
- # Arguments
- tp: True positives (nparray, nx1 or nx10).
- conf: Objectness value from 0-1 (nparray).
- pred_cls: Predicted object classes (nparray).
- target_cls: True object classes (nparray).
- # Returns
- The average precision as computed in py-faster-rcnn.
- """
-
- # Sort by objectness
- i = np.argsort(-conf)
- tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
-
- # Find unique classes
- unique_classes = np.unique(target_cls)
-
- # Create Precision-Recall curve and compute AP for each class
- pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
- s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
- ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
- for ci, c in enumerate(unique_classes):
- i = pred_cls == c
- n_gt = (target_cls == c).sum() # Number of ground truth objects
- n_p = i.sum() # Number of predicted objects
-
- if n_p == 0 or n_gt == 0:
- continue
- else:
- # Accumulate FPs and TPs
- fpc = (1 - tp[i]).cumsum(0)
- tpc = tp[i].cumsum(0)
-
- # Recall
- recall = tpc / (n_gt + 1e-16) # recall curve
- r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
-
- # Precision
- precision = tpc / (tpc + fpc) # precision curve
- p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
-
- # AP from recall-precision curve
- for j in range(tp.shape[1]):
- ap[ci, j] = compute_ap(recall[:, j], precision[:, j])
-
- # Plot
- # fig, ax = plt.subplots(1, 1, figsize=(5, 5))
- # ax.plot(recall, precision)
- # ax.set_xlabel('Recall')
- # ax.set_ylabel('Precision')
- # ax.set_xlim(0, 1.01)
- # ax.set_ylim(0, 1.01)
- # fig.tight_layout()
- # fig.savefig('PR_curve.png', dpi=300)
-
- # Compute F1 score (harmonic mean of precision and recall)
- f1 = 2 * p * r / (p + r + 1e-16)
-
- return p, r, ap, f1, unique_classes.astype('int32')
-
-
-def compute_ap(recall, precision):
- """ Compute the average precision, given the recall and precision curves.
- Source: https://github.com/rbgirshick/py-faster-rcnn.
- # Arguments
- recall: The recall curve (list).
- precision: The precision curve (list).
- # Returns
- The average precision as computed in py-faster-rcnn.
- """
-
- # Append sentinel values to beginning and end
- mrec = np.concatenate(([0.], recall, [min(recall[-1] + 1E-3, 1.)]))
- mpre = np.concatenate(([0.], precision, [0.]))
-
- # Compute the precision envelope
- mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
-
- # Integrate area under curve
- method = 'interp' # methods: 'continuous', 'interp'
- if method == 'interp':
- x = np.linspace(0, 1, 101) # 101-point interp (COCO)
- ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
- else: # 'continuous'
- i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
- ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
-
- return ap
-
-
-def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):
- # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
- box2 = box2.t()
-
- # Get the coordinates of bounding boxes
- if x1y1x2y2: # x1, y1, x2, y2 = box1
- b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
- b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
- else: # transform from xywh to xyxy
- b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
- b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
- b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
- b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
-
- # Intersection area
- inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
- (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
-
- # Union Area
- w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
- w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
- union = (w1 * h1 + 1e-16) + w2 * h2 - inter
-
- iou = inter / union # iou
- if GIoU or DIoU or CIoU:
- cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
- ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
- if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
- c_area = cw * ch + 1e-16 # convex area
- return iou - (c_area - union) / c_area # GIoU
- if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
- # convex diagonal squared
- c2 = cw ** 2 + ch ** 2 + 1e-16
- # centerpoint distance squared
- rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
- if DIoU:
- return iou - rho2 / c2 # DIoU
- elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
- v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
- with torch.no_grad():
- alpha = v / (1 - iou + v)
- return iou - (rho2 / c2 + v * alpha) # CIoU
-
- return iou
-
-
-def box_iou(box1, box2):
- # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
- """
- Return intersection-over-union (Jaccard index) of boxes.
- Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
- Arguments:
- box1 (Tensor[N, 4])
- box2 (Tensor[M, 4])
- Returns:
- iou (Tensor[N, M]): the NxM matrix containing the pairwise
- IoU values for every element in boxes1 and boxes2
- """
-
- def box_area(box):
- # box = 4xn
- return (box[2] - box[0]) * (box[3] - box[1])
-
- area1 = box_area(box1.t())
- area2 = box_area(box2.t())
-
- # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
- inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
- return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
-
-
-def wh_iou(wh1, wh2):
- # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
- wh1 = wh1[:, None] # [N,1,2]
- wh2 = wh2[None] # [1,M,2]
- inter = torch.min(wh1, wh2).prod(2) # [N,M]
- return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
-
-
-class FocalLoss(nn.Module):
- # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
- def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
- super(FocalLoss, self).__init__()
- self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
- self.gamma = gamma
- self.alpha = alpha
- self.reduction = loss_fcn.reduction
- self.loss_fcn.reduction = 'none' # required to apply FL to each element
-
- def forward(self, pred, true):
- loss = self.loss_fcn(pred, true)
- # p_t = torch.exp(-loss)
- # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
-
- # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
- pred_prob = torch.sigmoid(pred) # prob from logits
- p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
- alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
- modulating_factor = (1.0 - p_t) ** self.gamma
- loss *= alpha_factor * modulating_factor
-
- if self.reduction == 'mean':
- return loss.mean()
- elif self.reduction == 'sum':
- return loss.sum()
- else: # 'none'
- return loss
-
-
-def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
- # return positive, negative label smoothing BCE targets
- return 1.0 - 0.5 * eps, 0.5 * eps
-
-
-class BCEBlurWithLogitsLoss(nn.Module):
- # BCEwithLogitLoss() with reduced missing label effects.
- def __init__(self, alpha=0.05):
- super(BCEBlurWithLogitsLoss, self).__init__()
- self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
- self.alpha = alpha
-
- def forward(self, pred, true):
- loss = self.loss_fcn(pred, true)
- pred = torch.sigmoid(pred) # prob from logits
- dx = pred - true # reduce only missing label effects
- # dx = (pred - true).abs() # reduce missing label and false label effects
- alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
- loss *= alpha_factor
- return loss.mean()
-
-
-def compute_loss(p, targets, model): # predictions, targets, model
- ft = torch.cuda.FloatTensor if p[0].is_cuda else torch.Tensor
- lcls, lbox, lobj = ft([0]), ft([0]), ft([0])
- tcls, tbox, indices, anchors = build_targets(p, targets, model) # targets
- h = model.hyp # hyperparameters
- red = 'mean' # Loss reduction (sum or mean)
-
- # Define criteria
- BCEcls = nn.BCEWithLogitsLoss(pos_weight=ft([h['cls_pw']]), reduction=red)
- BCEobj = nn.BCEWithLogitsLoss(pos_weight=ft([h['obj_pw']]), reduction=red)
-
- # class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
- cp, cn = smooth_BCE(eps=0.0)
-
- # focal loss
- g = h['fl_gamma'] # focal loss gamma
- if g > 0:
- BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
-
- # per output
- nt = 0 # number of targets
- np = len(p) # number of outputs
- balance = [1.0, 1.0, 1.0]
- for i, pi in enumerate(p): # layer index, layer predictions
- b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
- tobj = torch.zeros_like(pi[..., 0]) # target obj
-
- nb = b.shape[0] # number of targets
- if nb:
- nt += nb # cumulative targets
- ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
-
- # GIoU
- pxy = ps[:, :2].sigmoid() * 2. - 0.5
- pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
- pbox = torch.cat((pxy, pwh), 1) # predicted box
- giou = bbox_iou(pbox.t(), tbox[i], x1y1x2y2=False, GIoU=True) # giou(prediction, target)
- lbox += (1.0 - giou).sum() if red == 'sum' else (1.0 - giou).mean() # giou loss
-
- # Obj
- tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * giou.detach().clamp(0).type(tobj.dtype) # giou ratio
-
- # Class
- if model.nc > 1: # cls loss (only if multiple classes)
- t = torch.full_like(ps[:, 5:], cn) # targets
- t[range(nb), tcls[i]] = cp
- lcls += BCEcls(ps[:, 5:], t) # BCE
-
- # Append targets to text file
- # with open('targets.txt', 'a') as file:
- # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
-
- lobj += BCEobj(pi[..., 4], tobj) * balance[i] # obj loss
-
- s = 3 / np # output count scaling
- lbox *= h['giou'] * s
- lobj *= h['obj'] * s
- lcls *= h['cls'] * s
- bs = tobj.shape[0] # batch size
- if red == 'sum':
- g = 3.0 # loss gain
- lobj *= g / bs
- if nt:
- lcls *= g / nt / model.nc
- lbox *= g / nt
-
- loss = lbox + lobj + lcls
- return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
-
-
-def build_targets(p, targets, model):
- # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
- det = model.module.model[-1] if type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) \
- else model.model[-1] # Detect() module
- na, nt = det.na, targets.shape[0] # number of anchors, targets
- tcls, tbox, indices, anch = [], [], [], []
- gain = torch.ones(6, device=targets.device) # normalized to gridspace gain
- off = torch.tensor([[1, 0], [0, 1], [-1, 0], [0, -1]], device=targets.device).float() # overlap offsets
- at = torch.arange(na).view(na, 1).repeat(1, nt) # anchor tensor, same as .repeat_interleave(nt)
-
- style = 'rect4'
- for i in range(det.nl):
- anchors = det.anchors[i]
- gain[2:] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
-
- # Match targets to anchors
- a, t, offsets = [], targets * gain, 0
- if nt:
- r = t[None, :, 4:6] / anchors[:, None] # wh ratio
- j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare
- # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n) = wh_iou(anchors(3,2), gwh(n,2))
- a, t = at[j], t.repeat(na, 1, 1)[j] # filter
-
- # overlaps
- g = 0.5 # offset
- gxy = t[:, 2:4] # grid xy
- z = torch.zeros_like(gxy)
- if style == 'rect2':
- j, k = ((gxy % 1. < g) & (gxy > 1.)).T
- a, t = torch.cat((a, a[j], a[k]), 0), torch.cat((t, t[j], t[k]), 0)
- offsets = torch.cat((z, z[j] + off[0], z[k] + off[1]), 0) * g
- elif style == 'rect4':
- j, k = ((gxy % 1. < g) & (gxy > 1.)).T
- l, m = ((gxy % 1. > (1 - g)) & (gxy < (gain[[2, 3]] - 1.))).T
- a, t = torch.cat((a, a[j], a[k], a[l], a[m]), 0), torch.cat((t, t[j], t[k], t[l], t[m]), 0)
- offsets = torch.cat((z, z[j] + off[0], z[k] + off[1], z[l] + off[2], z[m] + off[3]), 0) * g
-
- # Define
- b, c = t[:, :2].long().T # image, class
- gxy = t[:, 2:4] # grid xy
- gwh = t[:, 4:6] # grid wh
- gij = (gxy - offsets).long()
- gi, gj = gij.T # grid xy indices
-
- # Append
- indices.append((b, a, gj, gi)) # image, anchor, grid indices
- tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
- anch.append(anchors[a]) # anchors
- tcls.append(c) # class
-
- return tcls, tbox, indices, anch
-
-
-def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge=False, classes=None, agnostic=False):
- """Performs Non-Maximum Suppression (NMS) on inference results
-
- Returns:
- detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
- """
- if prediction.dtype is torch.float16:
- prediction = prediction.float() # to FP32
-
- nc = prediction[0].shape[1] - 5 # number of classes
- xc = prediction[..., 4] > conf_thres # candidates
-
- # Settings
- min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
- max_det = 300 # maximum number of detections per image
- time_limit = 10.0 # seconds to quit after
- redundant = True # require redundant detections
- multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
-
- t = time.time()
- output = [None] * prediction.shape[0]
- for xi, x in enumerate(prediction): # image index, image inference
- # Apply constraints
- # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
- x = x[xc[xi]] # confidence
-
- # If none remain process next image
- if not x.shape[0]:
- continue
-
- # Compute conf
- x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
-
- # Box (center x, center y, width, height) to (x1, y1, x2, y2)
- box = xywh2xyxy(x[:, :4])
-
- # Detections matrix nx6 (xyxy, conf, cls)
- if multi_label:
- i, j = (x[:, 5:] > conf_thres).nonzero().t()
- x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
- else: # best class only
- conf, j = x[:, 5:].max(1, keepdim=True)
- x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
-
- # Filter by class
- if classes:
- x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
-
- # Apply finite constraint
- # if not torch.isfinite(x).all():
- # x = x[torch.isfinite(x).all(1)]
-
- # If none remain process next image
- n = x.shape[0] # number of boxes
- if not n:
- continue
-
- # Sort by confidence
- # x = x[x[:, 4].argsort(descending=True)]
-
- # Batched NMS
- c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
- boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
- i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
- if i.shape[0] > max_det: # limit detections
- i = i[:max_det]
- if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
- try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
- iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
- weights = iou * scores[None] # box weights
- x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
- if redundant:
- i = i[iou.sum(1) > 1] # require redundancy
- except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
- print(x, i, x.shape, i.shape)
- pass
-
- output[xi] = x[i]
- if (time.time() - t) > time_limit:
- break # time limit exceeded
-
- return output
-
-
-def strip_optimizer(f='weights/best.pt'): # from utils.utils import *; strip_optimizer()
- # Strip optimizer from *.pt files for lighter files (reduced by 1/2 size)
- x = torch.load(f, map_location=torch.device('cpu'))
- x['optimizer'] = None
- x['model'].half() # to FP16
- torch.save(x, f)
- print('Optimizer stripped from %s' % f)
-
-
-def create_pretrained(f='weights/best.pt', s='weights/pretrained.pt'): # from utils.utils import *; create_pretrained()
- # create pretrained checkpoint 's' from 'f' (create_pretrained(x, x) for x in glob.glob('./*.pt'))
- device = torch.device('cpu')
- x = torch.load(s, map_location=device)
-
- x['optimizer'] = None
- x['training_results'] = None
- x['epoch'] = -1
- x['model'].half() # to FP16
- for p in x['model'].parameters():
- p.requires_grad = True
- torch.save(x, s)
- print('%s saved as pretrained checkpoint %s' % (f, s))
-
-
-def coco_class_count(path='../coco/labels/train2014/'):
- # Histogram of occurrences per class
- nc = 80 # number classes
- x = np.zeros(nc, dtype='int32')
- files = sorted(glob.glob('%s/*.*' % path))
- for i, file in enumerate(files):
- labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
- x += np.bincount(labels[:, 0].astype('int32'), minlength=nc)
- print(i, len(files))
-
-
-def coco_only_people(path='../coco/labels/train2017/'): # from utils.utils import *; coco_only_people()
- # Find images with only people
- files = sorted(glob.glob('%s/*.*' % path))
- for i, file in enumerate(files):
- labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
- if all(labels[:, 0] == 0):
- print(labels.shape[0], file)
-
-
-def crop_images_random(path='../images/', scale=0.50): # from utils.utils import *; crop_images_random()
- # crops images into random squares up to scale fraction
- # WARNING: overwrites images!
- for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
- img = cv2.imread(file) # BGR
- if img is not None:
- h, w = img.shape[:2]
-
- # create random mask
- a = 30 # minimum size (pixels)
- mask_h = random.randint(a, int(max(a, h * scale))) # mask height
- mask_w = mask_h # mask width
-
- # box
- xmin = max(0, random.randint(0, w) - mask_w // 2)
- ymin = max(0, random.randint(0, h) - mask_h // 2)
- xmax = min(w, xmin + mask_w)
- ymax = min(h, ymin + mask_h)
-
- # apply random color mask
- cv2.imwrite(file, img[ymin:ymax, xmin:xmax])
-
-
-def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):
- # Makes single-class coco datasets. from utils.utils import *; coco_single_class_labels()
- if os.path.exists('new/'):
- shutil.rmtree('new/') # delete output folder
- os.makedirs('new/') # make new output folder
- os.makedirs('new/labels/')
- os.makedirs('new/images/')
- for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
- with open(file, 'r') as f:
- labels = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
- i = labels[:, 0] == label_class
- if any(i):
- img_file = file.replace('labels', 'images').replace('txt', 'jpg')
- labels[:, 0] = 0 # reset class to 0
- with open('new/images.txt', 'a') as f: # add image to dataset list
- f.write(img_file + '\n')
- with open('new/labels/' + Path(file).name, 'a') as f: # write label
- for l in labels[i]:
- f.write('%g %.6f %.6f %.6f %.6f\n' % tuple(l))
- shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
-
-
-def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
- """ Creates kmeans-evolved anchors from training dataset
-
- Arguments:
- path: path to dataset *.yaml, or a loaded dataset
- n: number of anchors
- img_size: image size used for training
- thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
- gen: generations to evolve anchors using genetic algorithm
-
- Return:
- k: kmeans evolved anchors
-
- Usage:
- from utils.utils import *; _ = kmean_anchors()
- """
- thr = 1. / thr
-
- def metric(k, wh): # compute metrics
- r = wh[:, None] / k[None]
- x = torch.min(r, 1. / r).min(2)[0] # ratio metric
- # x = wh_iou(wh, torch.tensor(k)) # iou metric
- return x, x.max(1)[0] # x, best_x
-
- def fitness(k): # mutation fitness
- _, best = metric(torch.tensor(k, dtype=torch.float32), wh)
- return (best * (best > thr).float()).mean() # fitness
-
- def print_results(k):
- k = k[np.argsort(k.prod(1))] # sort small to large
- x, best = metric(k, wh0)
- bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
- print('thr=%.2f: %.4f best possible recall, %.2f anchors past thr' % (thr, bpr, aat))
- print('n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thr=%.3f-mean: ' %
- (n, img_size, x.mean(), best.mean(), x[x > thr].mean()), end='')
- for i, x in enumerate(k):
- print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
- return k
-
- if isinstance(path, str): # *.yaml file
- with open(path) as f:
- data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
- from utils.datasets import LoadImagesAndLabels
- dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
- else:
- dataset = path # dataset
-
- # Get label wh
- shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
- wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
-
- # Filter
- i = (wh0 < 3.0).any(1).sum()
- if i:
- print('WARNING: Extremely small objects found. '
- '%g of %g labels are < 3 pixels in width or height.' % (i, len(wh0)))
- wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
-
- # Kmeans calculation
- from scipy.cluster.vq import kmeans
- print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
- s = wh.std(0) # sigmas for whitening
- k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
- k *= s
- wh = torch.tensor(wh, dtype=torch.float32) # filtered
- wh0 = torch.tensor(wh0, dtype=torch.float32) # unflitered
- k = print_results(k)
-
- # Plot
- # k, d = [None] * 20, [None] * 20
- # for i in tqdm(range(1, 21)):
- # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
- # fig, ax = plt.subplots(1, 2, figsize=(14, 7))
- # ax = ax.ravel()
- # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
- # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
- # ax[0].hist(wh[wh[:, 0]<100, 0],400)
- # ax[1].hist(wh[wh[:, 1]<100, 1],400)
- # fig.tight_layout()
- # fig.savefig('wh.png', dpi=200)
-
- # Evolve
- npr = np.random
- f, sh, mp, s = fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
- pbar = tqdm(range(gen), desc='Evolving anchors with Genetic Algorithm') # progress bar
- for _ in pbar:
- v = np.ones(sh)
- while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
- v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
- kg = (k.copy() * v).clip(min=2.0)
- fg = fitness(kg)
- if fg > f:
- f, k = fg, kg.copy()
- pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f
- if verbose:
- print_results(k)
-
- return print_results(k)
-
-
-def print_mutation(hyp, results, bucket=''):
- # Print mutation results to evolve.txt (for use with train.py --evolve)
- a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
- b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
- c = '%10.4g' * len(results) % results # results (P, R, mAP, F1, test_loss)
- print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
-
- if bucket:
- os.system('gsutil cp gs://%s/evolve.txt .' % bucket) # download evolve.txt
-
- with open('evolve.txt', 'a') as f: # append result
- f.write(c + b + '\n')
- x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
- np.savetxt('evolve.txt', x[np.argsort(-fitness(x))], '%10.3g') # save sort by fitness
-
- if bucket:
- os.system('gsutil cp evolve.txt gs://%s' % bucket) # upload evolve.txt
-
-
-def apply_classifier(x, model, img, im0):
- # applies a second stage classifier to yolo outputs
- im0 = [im0] if isinstance(im0, np.ndarray) else im0
- for i, d in enumerate(x): # per image
- if d is not None and len(d):
- d = d.clone()
-
- # Reshape and pad cutouts
- b = xyxy2xywh(d[:, :4]) # boxes
- b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
- b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
- d[:, :4] = xywh2xyxy(b).long()
-
- # Rescale boxes from img_size to im0 size
- scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
-
- # Classes
- pred_cls1 = d[:, 5].long()
- ims = []
- for j, a in enumerate(d): # per item
- cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
- im = cv2.resize(cutout, (224, 224)) # BGR
- # cv2.imwrite('test%i.jpg' % j, cutout)
-
- im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
- im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
- im /= 255.0 # 0 - 255 to 0.0 - 1.0
- ims.append(im)
-
- pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
- x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
-
- return x
-
-
-def fitness(x):
- # Returns fitness (for use with results.txt or evolve.txt)
- w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
- return (x[:, :4] * w).sum(1)
-
-
-def output_to_target(output, width, height):
- """
- Convert a YOLO model output to target format
- [batch_id, class_id, x, y, w, h, conf]
- """
- if isinstance(output, torch.Tensor):
- output = output.cpu().numpy()
-
- targets = []
- for i, o in enumerate(output):
- if o is not None:
- for pred in o:
- box = pred[:4]
- w = (box[2] - box[0]) / width
- h = (box[3] - box[1]) / height
- x = box[0] / width + w / 2
- y = box[1] / height + h / 2
- conf = pred[4]
- cls = int(pred[5])
-
- targets.append([i, cls, x, y, w, h, conf])
-
- return np.array(targets)
-
-
-# Plotting functions ---------------------------------------------------------------------------------------------------
-def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
- # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
- def butter_lowpass(cutoff, fs, order):
- nyq = 0.5 * fs
- normal_cutoff = cutoff / nyq
- b, a = butter(order, normal_cutoff, btype='low', analog=False)
- return b, a
-
- b, a = butter_lowpass(cutoff, fs, order=order)
- return filtfilt(b, a, data) # forward-backward filter
-
-
-def plot_one_box(x, img, color=None, label=None, line_thickness=None):
- # Plots one bounding box on image img
- tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
- color = color or [random.randint(0, 255) for _ in range(3)]
- c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
- cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
- if label:
- tf = max(tl - 1, 1) # font thickness
- t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
- c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
- cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
- cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
-
-
-def plot_wh_methods(): # from utils.utils import *; plot_wh_methods()
- # Compares the two methods for width-height anchor multiplication
- # https://github.com/ultralytics/yolov3/issues/168
- x = np.arange(-4.0, 4.0, .1)
- ya = np.exp(x)
- yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
-
- fig = plt.figure(figsize=(6, 3), dpi=150)
- plt.plot(x, ya, '.-', label='yolo method')
- plt.plot(x, yb ** 2, '.-', label='^2 power method')
- plt.plot(x, yb ** 2.5, '.-', label='^2.5 power method')
- plt.xlim(left=-4, right=4)
- plt.ylim(bottom=0, top=6)
- plt.xlabel('input')
- plt.ylabel('output')
- plt.legend()
- fig.tight_layout()
- fig.savefig('comparison.png', dpi=200)
-
-
-def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
- tl = 3 # line thickness
- tf = max(tl - 1, 1) # font thickness
- if os.path.isfile(fname): # do not overwrite
- return None
-
- if isinstance(images, torch.Tensor):
- images = images.cpu().float().numpy()
-
- if isinstance(targets, torch.Tensor):
- targets = targets.cpu().numpy()
-
- # un-normalise
- if np.max(images[0]) <= 1:
- images *= 255
-
- bs, _, h, w = images.shape # batch size, _, height, width
- bs = min(bs, max_subplots) # limit plot images
- ns = np.ceil(bs ** 0.5) # number of subplots (square)
-
- # Check if we should resize
- scale_factor = max_size / max(h, w)
- if scale_factor < 1:
- h = math.ceil(scale_factor * h)
- w = math.ceil(scale_factor * w)
-
- # Empty array for output
- mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)
-
- # Fix class - colour map
- prop_cycle = plt.rcParams['axes.prop_cycle']
- # https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
- hex2rgb = lambda h: tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
- color_lut = [hex2rgb(h) for h in prop_cycle.by_key()['color']]
-
- for i, img in enumerate(images):
- if i == max_subplots: # if last batch has fewer images than we expect
- break
-
- block_x = int(w * (i // ns))
- block_y = int(h * (i % ns))
-
- img = img.transpose(1, 2, 0)
- if scale_factor < 1:
- img = cv2.resize(img, (w, h))
-
- mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
- if len(targets) > 0:
- image_targets = targets[targets[:, 0] == i]
- boxes = xywh2xyxy(image_targets[:, 2:6]).T
- classes = image_targets[:, 1].astype('int')
- gt = image_targets.shape[1] == 6 # ground truth if no conf column
- conf = None if gt else image_targets[:, 6] # check for confidence presence (gt vs pred)
-
- boxes[[0, 2]] *= w
- boxes[[0, 2]] += block_x
- boxes[[1, 3]] *= h
- boxes[[1, 3]] += block_y
- for j, box in enumerate(boxes.T):
- cls = int(classes[j])
- color = color_lut[cls % len(color_lut)]
- cls = names[cls] if names else cls
- if gt or conf[j] > 0.3: # 0.3 conf thresh
- label = '%s' % cls if gt else '%s %.1f' % (cls, conf[j])
- plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
-
- # Draw image filename labels
- if paths is not None:
- label = os.path.basename(paths[i])[:40] # trim to 40 char
- t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
- cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
- lineType=cv2.LINE_AA)
-
- # Image border
- cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
-
- if fname is not None:
- mosaic = cv2.resize(mosaic, (int(ns * w * 0.5), int(ns * h * 0.5)), interpolation=cv2.INTER_AREA)
- cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB))
-
- return mosaic
-
-
-def plot_lr_scheduler(optimizer, scheduler, epochs=300):
- # Plot LR simulating training for full epochs
- optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
- y = []
- for _ in range(epochs):
- scheduler.step()
- y.append(optimizer.param_groups[0]['lr'])
- plt.plot(y, '.-', label='LR')
- plt.xlabel('epoch')
- plt.ylabel('LR')
- plt.grid()
- plt.xlim(0, epochs)
- plt.ylim(0)
- plt.tight_layout()
- plt.savefig('LR.png', dpi=200)
-
-
-def plot_test_txt(): # from utils.utils import *; plot_test()
- # Plot test.txt histograms
- x = np.loadtxt('test.txt', dtype=np.float32)
- box = xyxy2xywh(x[:, :4])
- cx, cy = box[:, 0], box[:, 1]
-
- fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
- ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
- ax.set_aspect('equal')
- plt.savefig('hist2d.png', dpi=300)
-
- fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
- ax[0].hist(cx, bins=600)
- ax[1].hist(cy, bins=600)
- plt.savefig('hist1d.png', dpi=200)
-
-
-def plot_targets_txt(): # from utils.utils import *; plot_targets_txt()
- # Plot targets.txt histograms
- x = np.loadtxt('targets.txt', dtype=np.float32).T
- s = ['x targets', 'y targets', 'width targets', 'height targets']
- fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
- ax = ax.ravel()
- for i in range(4):
- ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
- ax[i].legend()
- ax[i].set_title(s[i])
- plt.savefig('targets.jpg', dpi=200)
-
-
-def plot_study_txt(f='study.txt', x=None): # from utils.utils import *; plot_study_txt()
- # Plot study.txt generated by test.py
- fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
- ax = ax.ravel()
-
- fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
- for f in ['coco_study/study_coco_yolov5%s.txt' % x for x in ['s', 'm', 'l', 'x']]:
- y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
- x = np.arange(y.shape[1]) if x is None else np.array(x)
- s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
- for i in range(7):
- ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
- ax[i].set_title(s[i])
-
- j = y[3].argmax() + 1
- ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
- label=Path(f).stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
-
- ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [33.5, 39.1, 42.5, 45.9, 49., 50.5],
- 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
-
- ax2.grid()
- ax2.set_xlim(0, 30)
- ax2.set_ylim(28, 50)
- ax2.set_yticks(np.arange(30, 55, 5))
- ax2.set_xlabel('GPU Speed (ms/img)')
- ax2.set_ylabel('COCO AP val')
- ax2.legend(loc='lower right')
- plt.savefig('study_mAP_latency.png', dpi=300)
- plt.savefig(f.replace('.txt', '.png'), dpi=200)
-
-
-def plot_labels(labels):
- # plot dataset labels
- c, b = labels[:, 0], labels[:, 1:].transpose() # classees, boxes
-
- def hist2d(x, y, n=100):
- xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
- hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
- xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
- yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
- return np.log(hist[xidx, yidx])
-
- fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
- ax = ax.ravel()
- ax[0].hist(c, bins=int(c.max() + 1))
- ax[0].set_xlabel('classes')
- ax[1].scatter(b[0], b[1], c=hist2d(b[0], b[1], 90), cmap='jet')
- ax[1].set_xlabel('x')
- ax[1].set_ylabel('y')
- ax[2].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap='jet')
- ax[2].set_xlabel('width')
- ax[2].set_ylabel('height')
- plt.savefig('labels.png', dpi=200)
- plt.close()
-
-
-def plot_evolution_results(hyp): # from utils.utils import *; plot_evolution_results(hyp)
- # Plot hyperparameter evolution results in evolve.txt
- x = np.loadtxt('evolve.txt', ndmin=2)
- f = fitness(x)
- # weights = (f - f.min()) ** 2 # for weighted results
- plt.figure(figsize=(12, 10), tight_layout=True)
- matplotlib.rc('font', **{'size': 8})
- for i, (k, v) in enumerate(hyp.items()):
- y = x[:, i + 7]
- # mu = (y * weights).sum() / weights.sum() # best weighted result
- mu = y[f.argmax()] # best single result
- plt.subplot(4, 5, i + 1)
- plt.plot(mu, f.max(), 'o', markersize=10)
- plt.plot(y, f, '.')
- plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
- print('%15s: %.3g' % (k, mu))
- plt.savefig('evolve.png', dpi=200)
-
-
-def plot_results_overlay(start=0, stop=0): # from utils.utils import *; plot_results_overlay()
- # Plot training 'results*.txt', overlaying train and val losses
- s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends
- t = ['GIoU', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
- for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
- results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
- n = results.shape[1] # number of rows
- x = range(start, min(stop, n) if stop else n)
- fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
- ax = ax.ravel()
- for i in range(5):
- for j in [i, i + 5]:
- y = results[j, x]
- ax[i].plot(x, y, marker='.', label=s[j])
- # y_smooth = butter_lowpass_filtfilt(y)
- # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
-
- ax[i].set_title(t[i])
- ax[i].legend()
- ax[i].set_ylabel(f) if i == 0 else None # add filename
- fig.savefig(f.replace('.txt', '.png'), dpi=200)
-
-
-def plot_results(start=0, stop=0, bucket='', id=(), labels=()): # from utils.utils import *; plot_results()
- # Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov5#reproduce-our-training
- fig, ax = plt.subplots(2, 5, figsize=(12, 6))
- ax = ax.ravel()
- s = ['GIoU', 'Objectness', 'Classification', 'Precision', 'Recall',
- 'val GIoU', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']
- if bucket:
- os.system('rm -rf storage.googleapis.com')
- files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
- else:
- files = glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')
- for fi, f in enumerate(files):
- try:
- results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
- n = results.shape[1] # number of rows
- x = range(start, min(stop, n) if stop else n)
- for i in range(10):
- y = results[i, x]
- if i in [0, 1, 2, 5, 6, 7]:
- y[y == 0] = np.nan # dont show zero loss values
- # y /= y[0] # normalize
- label = labels[fi] if len(labels) else Path(f).stem
- ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
- ax[i].set_title(s[i])
- # if i in [5, 6, 7]: # share train and val loss y axes
- # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
- except:
- print('Warning: Plotting error for %s, skipping file' % f)
-
- fig.tight_layout()
- ax[1].legend()
- fig.savefig('results.png', dpi=200)
diff --git a/spaces/Audio-AGI/AudioSep/models/CLAP/training/__init__.py b/spaces/Audio-AGI/AudioSep/models/CLAP/training/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/deployment.md b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/deployment.md
deleted file mode 100644
index 173b9a0e1ec1e768d1b9dc5744c104578512d638..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/deployment.md
+++ /dev/null
@@ -1,137 +0,0 @@
-# Deployment
-
-Models written in Python need to go through an export process to become a deployable artifact.
-A few basic concepts about this process:
-
-__"Export method"__ is how a Python model is fully serialized to a deployable format.
-We support the following export methods:
-
-* `tracing`: see [pytorch documentation](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html) to learn about it
-* `scripting`: see [pytorch documentation](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html) to learn about it
-* `caffe2_tracing`: replace parts of the model by caffe2 operators, then use tracing.
-
-__"Format"__ is how a serialized model is described in a file, e.g.
-TorchScript, Caffe2 protobuf, ONNX format.
-__"Runtime"__ is an engine that loads a serialized model and executes it,
-e.g., PyTorch, Caffe2, TensorFlow, onnxruntime, TensorRT, etc.
-A runtime is often tied to a specific format
-(e.g. PyTorch needs TorchScript format, Caffe2 needs protobuf format).
-We currently support the following combination and each has some limitations:
-
-```eval_rst
-+----------------------------+-------------+-------------+-----------------------------+
-| Export Method | tracing | scripting | caffe2_tracing |
-+============================+=============+=============+=============================+
-| **Formats** | TorchScript | TorchScript | Caffe2, TorchScript, ONNX |
-+----------------------------+-------------+-------------+-----------------------------+
-| **Runtime** | PyTorch | PyTorch | Caffe2, PyTorch |
-+----------------------------+-------------+-------------+-----------------------------+
-| C++/Python inference | ✅ | ✅ | ✅ |
-+----------------------------+-------------+-------------+-----------------------------+
-| Dynamic resolution | ✅ | ✅ | ✅ |
-+----------------------------+-------------+-------------+-----------------------------+
-| Batch size requirement | Constant | Dynamic | Batch inference unsupported |
-+----------------------------+-------------+-------------+-----------------------------+
-| Extra runtime deps | torchvision | torchvision | Caffe2 ops (usually already |
-| | | | |
-| | | | included in PyTorch) |
-+----------------------------+-------------+-------------+-----------------------------+
-| Faster/Mask/Keypoint R-CNN | ✅ | ✅ | ✅ |
-+----------------------------+-------------+-------------+-----------------------------+
-| RetinaNet | ✅ | ✅ | ✅ |
-+----------------------------+-------------+-------------+-----------------------------+
-| PointRend R-CNN | ✅ | ❌ | ❌ |
-+----------------------------+-------------+-------------+-----------------------------+
-| Cascade R-CNN | ✅ | ❌ | ❌ |
-+----------------------------+-------------+-------------+-----------------------------+
-
-```
-
-`caffe2_tracing` is going to be deprecated.
-We don't plan to work on additional support for other formats/runtime, but contributions are welcome.
-
-
-## Deployment with Tracing or Scripting
-
-Models can be exported to TorchScript format, by either
-[tracing or scripting](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html).
-The output model file can be loaded without detectron2 dependency in either Python or C++.
-The exported model often requires torchvision (or its C++ library) dependency for some custom ops.
-
-This feature requires PyTorch ≥ 1.8.
-
-### Coverage
-Most official models under the meta architectures `GeneralizedRCNN` and `RetinaNet`
-are supported in both tracing and scripting mode.
-Cascade R-CNN and PointRend are currently supported in tracing.
-Users' custom extensions are supported if they are also scriptable or traceable.
-
-For models exported with tracing, dynamic input resolution is allowed, but batch size
-(number of input images) must be fixed.
-Scripting can support dynamic batch size.
-
-### Usage
-
-The main export APIs for tracing and scripting are [TracingAdapter](../modules/export.html#detectron2.export.TracingAdapter)
-and [scripting_with_instances](../modules/export.html#detectron2.export.scripting_with_instances).
-Their usage is currently demonstrated in [test_export_torchscript.py](../../tests/test_export_torchscript.py)
-(see `TestScripting` and `TestTracing`)
-as well as the [deployment example](../../tools/deploy).
-Please check that these examples can run, and then modify for your use cases.
-The usage now requires some user effort and necessary knowledge for each model to workaround the limitation of scripting and tracing.
-In the future we plan to wrap these under simpler APIs to lower the bar to use them.
-
-## Deployment with Caffe2-tracing
-We provide [Caffe2Tracer](../modules/export.html#detectron2.export.Caffe2Tracer)
-that performs the export logic.
-It replaces parts of the model with Caffe2 operators,
-and then export the model into Caffe2, TorchScript or ONNX format.
-
-The converted model is able to run in either Python or C++ without detectron2/torchvision dependency, on CPU or GPUs.
-It has a runtime optimized for CPU & mobile inference, but not optimized for GPU inference.
-
-This feature requires 1.9 > ONNX ≥ 1.6.
-
-### Coverage
-
-Most official models under these 3 common meta architectures: `GeneralizedRCNN`, `RetinaNet`, `PanopticFPN`
-are supported. Cascade R-CNN is not supported. Batch inference is not supported.
-
-Users' custom extensions under these architectures (added through registration) are supported
-as long as they do not contain control flow or operators not available in Caffe2 (e.g. deformable convolution).
-For example, custom backbones and heads are often supported out of the box.
-
-### Usage
-
-The APIs are listed at [the API documentation](../modules/export).
-We provide [export_model.py](../../tools/deploy/) as an example that uses
-these APIs to convert a standard model. For custom models/datasets, you can add them to this script.
-
-### Use the model in C++/Python
-
-The model can be loaded in C++ and deployed with
-either Caffe2 or Pytorch runtime.. [C++ examples](../../tools/deploy/) for Mask R-CNN
-are given as a reference. Note that:
-
-* Models exported with `caffe2_tracing` method take a special input format
- described in [documentation](../modules/export.html#detectron2.export.Caffe2Tracer).
- This was taken care of in the C++ example.
-
-* The converted models do not contain post-processing operations that
- transform raw layer outputs into formatted predictions.
- For example, the C++ examples only produce raw outputs (28x28 masks) from the final
- layers that are not post-processed, because in actual deployment, an application often needs
- its custom lightweight post-processing, so this step is left for users.
-
-To help use the Caffe2-format model in python,
-we provide a python wrapper around the converted model, in the
-[Caffe2Model.\_\_call\_\_](../modules/export.html#detectron2.export.Caffe2Model.__call__) method.
-This method has an interface that's identical to the [pytorch versions of models](./models.md),
-and it internally applies pre/post-processing code to match the formats.
-This wrapper can serve as a reference for how to use Caffe2's python API,
-or for how to implement pre/post-processing in actual deployment.
-
-## Conversion to TensorFlow
-[tensorpack Faster R-CNN](https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN/convert_d2)
-provides scripts to convert a few standard detectron2 R-CNN models to TensorFlow's pb format.
-It works by translating configs and weights, therefore only support a few models.
diff --git a/spaces/Benson/text-generation/Examples/Apk Xplore File Manager.md b/spaces/Benson/text-generation/Examples/Apk Xplore File Manager.md
deleted file mode 100644
index e0cded3ece09d3a34f993348c09f72757be08439..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Apk Xplore File Manager.md
+++ /dev/null
@@ -1,83 +0,0 @@
-
-
APK Xplore Administrador de archivos: Una herramienta potente y versátil para los usuarios de Android
-
Si usted está buscando una aplicación de administrador de archivos que puede hacer más que simplemente navegar y organizar sus archivos, es posible que desee echa un vistazo APK Xplore File Manager. Esta aplicación es una herramienta potente y versátil que le permite explorar el contenido de su teléfono, ver imágenes y videos, reproducir música, conectarse a ubicaciones remotas, abrir archivos, ver archivos PDF y mucho más. En este artículo, le mostraremos lo que APK Xplore File Manager puede hacer, cómo descargar e instalar, cómo usarlo, y cuáles son sus pros y contras.
-
Características de APK Xplore Administrador de archivos
-
Explora el contenido de tu teléfono
-
APK Xplore File Manager le permite acceder a todas las carpetas y archivos en su dispositivo, incluyendo el almacenamiento interno, almacenamiento externo, directorio raíz, carpetas del sistema y archivos ocultos. También puede ver los detalles de cada archivo, como tamaño, fecha, permisos y suma de comprobación. También puede ordenar los archivos por nombre, tamaño, fecha o tipo.
APK Xplore File Manager tiene un visor de imágenes incorporado que puede mostrar miniaturas e imágenes de pantalla completa de varios formatos, como JPG, PNG, GIF, BMP, WEBP, etc. También puede acercar y alejar, rotar, recortar, compartir o eliminar las imágenes. La aplicación también tiene un reproductor de vídeo que puede reproducir vídeos de varios formatos, como MP4, MKV, AVI, FLV, etc. También puede controlar la velocidad de reproducción, volumen, brillo, relación de aspecto, subtítulos, etc.
-
Reproducir música
-
APK Xplore File Manager tiene un reproductor de música incorporado que puede reproducir archivos de audio de varios formatos, como MP3, WAV, OGG, FLAC, etc. También puede crear listas de reproducción, mezclar canciones, repetir canciones o cambiar la configuración del ecualizador. La aplicación también admite controles de notificación y reproducción en segundo plano.
-
Conectarse a ubicaciones remotas
-
-
Abrir archivos
-
APK Xplore File Manager puede abrir archivos de varios formatos como ZIP, RAR, 7ZIP, TAR, GZIP, BZIP2, etc. También puede crear, extraer o modificar archivos con protección por contraseña o cifrado. También puede dividir o unir archivos o comprimir o descomprimir archivos.
-
Ver archivos PDF
-
APK Xplore File Manager tiene un visor de PDF incorporado que puede mostrar archivos PDF de varios tamaños y orientaciones. También puede acercar y alejar, desplazarse, buscar, marcar como favorito o compartir los archivos PDF.
-
Mucho, mucho más
-
APK Xplore File Manager tiene muchas más características que lo convierten en una herramienta potente y versátil para los usuarios de Android. Algunas de estas características son:
-
-
Editor de texto: Puede editar archivos de texto de varios formatos, como TXT, HTML, XML, JSON, etc. También puede cambiar el tamaño de fuente, color, estilo o codificación.
-
Editor hexadecimal: Puede editar archivos binarios en modo hexadecimal. También puede buscar, reemplazar o insertar bytes.
-
Editor de SQLite: Puede ver y editar bases de datos SQLite. También puede ejecutar consultas SQL o exportar datos.
-
Explorador de raíz: Puede acceder al directorio raíz de su dispositivo si tiene privilegios de raíz. También puede cambiar los permisos o la propiedad de los archivos.
-
Papelera de reciclaje: Puede restaurar archivos eliminados de la papelera de reciclaje. También puede vaciar la papelera de reciclaje o excluir ciertas carpetas de ella.
-
Buscar: Puede buscar archivos o carpetas por nombre, tamaño, fecha o contenido. También puede usar expresiones o filtros regulares.
-
Operaciones por lotes: Puede realizar varias operaciones en varios archivos o carpetas a la vez. También puede usar el portapapeles para copiar, cortar, pegar o mover archivos o carpetas.
-
Temas: Puede personalizar la apariencia de la aplicación cambiando el tema, color, icono, fuente o diseño.
-
-
Cómo descargar e instalar APK Xplore Administrador de archivos
-
Descargar el archivo APK de una fuente de confianza
-
-
Habilitar fuentes desconocidas en su dispositivo
-
Para instalar APK Xplore File Manager, es necesario habilitar fuentes desconocidas en el dispositivo. Esto se debe a APK Xplore File Manager no está disponible en el Google Play Store y es necesario permitir que el dispositivo para instalar aplicaciones de fuentes distintas de la Play Store. Para habilitar fuentes desconocidas en su dispositivo, siga estos pasos:
-
-
-
Ir a Configuración > Seguridad > Fuentes desconocidas y activarlo.
-
Aparecerá un mensaje de advertencia. Pulse OK para confirmar.
-
-
Instalar el archivo APK y lanzar la aplicación
-
Para instalar APK Xplore Administrador de archivos, es necesario localizar el archivo APK en el dispositivo y toque en él. Aparecerá un mensaje pidiéndole que instale la aplicación. Pulse Instalar y espere a que se complete la instalación. Una vez instalada, puede iniciar la aplicación tocando Abrir o encontrándola en el cajón de la aplicación.
-
Cómo utilizar APK Xplore Administrador de archivos
-
Navegar por las carpetas y archivos
-
Para utilizar APK Xplore Administrador de archivos, es necesario navegar a través de las carpetas y archivos en el dispositivo. La aplicación tiene una interfaz sencilla e intuitiva que muestra la lista de carpetas y archivos en una vista de cuadrícula o lista. Puede cambiar entre diferentes vistas tocando los iconos en la esquina superior derecha de la pantalla. También puede deslizar hacia la izquierda o hacia la derecha para acceder a diferentes pestañas como Inicio, Favoritos, Historial, Papelera de reciclaje, etc. Para abrir una carpeta o archivo, simplemente toque en él. Para volver a la carpeta anterior, toca el botón de atrás en la esquina superior izquierda de la pantalla o desliza el dedo hacia la derecha desde el borde izquierdo de la pantalla.
-
Realizar varias acciones en los archivos
-
-
Acceda a los ajustes y preferencias
-
Para acceder a la configuración y las preferencias de APK Xplore Administrador de archivos, es necesario tocar en el icono del menú en la esquina superior izquierda de la pantalla y luego toque en Configuración. Aquí puede personalizar varios aspectos de la aplicación, como tema, color, icono, fuente, diseño, idioma, etc. También puede habilitar o deshabilitar varias características, como explorador de raíz, papelera de reciclaje, archivos ocultos, suma de comprobación, cifrado, etc. También puede realizar copias de seguridad o restaurar la configuración o borrar la caché o el historial.
-
Pros y contras de APK Xplore Administrador de archivos
-
Pros
-
APK Xplore File Manager tiene muchas ventajas que lo convierten en una herramienta potente y versátil para los usuarios de Android. Algunas de estas ventajas son:
-
-
Es gratis y sin anuncios.
-
Soporta una amplia gama de formatos de archivo y protocolos.
-
Tiene una interfaz simple e intuitiva que es fácil de usar.
-
Tiene muchas características que mejoran la funcionalidad y usabilidad de la aplicación.
-
Es personalizable y flexible para adaptarse a sus preferencias y necesidades.
-
-
Contras
-
APK Xplore File Manager también tiene algunas desventajas que pueden limitar su rendimiento o compatibilidad. Algunas de estas desventajas son:
-
-
Requiere Android 4.0 o superior para ejecutarse.
-
Puede no funcionar bien con algunos dispositivos o sistemas que tienen diferentes estructuras de archivos o permisos.
-
Puede consumir mucha memoria o batería si la usa durante mucho tiempo o con archivos grandes.
-
Puede tener algunos errores o errores que pueden afectar su funcionalidad o estabilidad.
-
-
Conclusión y preguntas frecuentes
-
-
Aquí hay algunas preguntas frecuentes sobre APK Xplore File Manager:
A: Puede actualizar APK Xplore File Manager mediante la descarga de la última versión del archivo APK de su sitio web oficial o de otras fuentes de renombre como APKPure o APKMirror. A continuación, puede instalarlo sobre la aplicación existente sin perder sus datos o ajustes.
A: Puede desinstalar APK Xplore Administrador de archivos yendo a Configuración > Aplicaciones > APK Xplore Administrador de archivos y tocando en Desinstalar. Alternativamente, puede pulsar largo en el icono de la aplicación en el cajón de la aplicación y arrastrarlo a la opción Desinstalar.
-
Q: ¿Cómo puedo contactar al desarrollador de APK Xplore File Manager?
-
A: Puede ponerse en contacto con el desarrollador de APK Xplore File Manager enviando un correo electrónico a apkxplorefilemanager@gmail.com. También puede visitar su sitio web en https://apkxplorefilemanager.com/ para obtener más información.
-
Q: ¿Cómo puedo apoyar el desarrollo de APK Xplore File Manager?
-
A: Puede apoyar el desarrollo de APK Xplore File Manager por calificación y revisión en su sitio web oficial o en otras plataformas como APKPure o APKMirror. También puede compartirlo con sus amigos y familiares que podrían encontrarlo útil. También puede donar al desarrollador a través de PayPal en https://paypal.me/apkxplorefilemanager.
-
Q: ¿Cómo puedo reportar un error o sugerir una característica para APK Xplore File Manager?
-
A: Puede informar de un error o sugerir una característica para APK Xplore File Manager enviando un correo electrónico a apkxplorefilemanager @gmail.com. También puede dejar un comentario en su sitio web o en otras plataformas como APKPure o APKMirror. El desarrollador aprecia sus comentarios y tratará de corregir los errores o implementar las características tan pronto como sea posible.
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Cmo Descargar La Tarjeta Aadhar En Lnea.md b/spaces/Benson/text-generation/Examples/Cmo Descargar La Tarjeta Aadhar En Lnea.md
deleted file mode 100644
index 56fa1a5da50de71dcba78141582d0b176e2b9920..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Cmo Descargar La Tarjeta Aadhar En Lnea.md
+++ /dev/null
@@ -1,51 +0,0 @@
-
-
Cómo descargar la tarjeta Aadhaar en línea
-
Tarjeta Aadhaar es un número de identificación único emitido por el gobierno indio a cada residente de la India. Es un número de 12 dígitos que contiene su información biométrica y demográfica, como su nombre, fecha de nacimiento, dirección, género, foto, huella digital y escaneo del iris. La tarjeta Aadhaar se utiliza como prueba de identidad y dirección para diversos fines, como abrir una cuenta bancaria, solicitar un pasaporte, obtener una conexión móvil, obtener subsidios del gobierno y más.
-
Si se ha inscrito para la tarjeta Aadhaar o ha actualizado sus datos en un centro de inscripción, puede descargar una versión electrónica de su tarjeta Aadhaar en línea. Esto también se conoce como e-Aadhaar o Aadhaar digital. Es tan válido como la carta Aadhaar original que usted recibe por correo. También puede pedir una tarjeta de PVC (cloruro de polivinilo) que sea más duradera y cómoda de llevar.
En este artículo, le mostraremos cómo descargar la tarjeta Aadhaar en línea en pasos simples. También le diremos los beneficios de la tarjeta Aadhaar en línea y responderemos algunas preguntas frecuentes al respecto.
-
Pasos para descargar la tarjeta Aadhaar en línea
-
Para descargar la tarjeta Aadhaar en línea, necesita tener su número Aadhaar o su ID de inscripción (EID). También debe tener acceso a su número de teléfono móvil o dirección de correo electrónico registrados para recibir una contraseña de una sola vez (OTP) o usar su autenticación biométrica. Siga estos pasos para descargar la tarjeta Aadhaar en línea:
-
Paso 1: Visite el sitio web oficial de UIDAI
-
UIDAI (Autoridad de Identificación Única de la India) es la agencia gubernamental que emite y administra las tarjetas Aadhaar. Puede visitar su sitio web oficial en https://myaadhaar.uidai.gov.in/. Este es el portal de todos los servicios en línea de Aadhaar.
-
Paso 2: Seleccione la opción para descargar Aadhaar
-
-
Paso 3: Ingrese su número de Aadhaar o ID de inscripción
-
Puedes descargar tu tarjeta Aadhaar usando cualquiera de estos tres métodos:
-
-
Número de Aadhaar (UID): Este es el número de 12 dígitos que está impreso en su tarjeta o carta de Aadhaar.
-
ID de inscripción (EID): Este es el número de 14 dígitos y el sello de fecha y hora de 14 dígitos que se imprime en su boleto de inscripción que recibió cuando se inscribió para la tarjeta Aadhaar.
-
ID virtual (VID): Este es un número temporal de 16 dígitos que puede generar desde el sitio web de UIDAI. Se puede utilizar en lugar de su número de Aadhaar por razones de privacidad y seguridad.
-
-
Seleccione el método que prefiera e introduzca el número correspondiente en el cuadro. También debe introducir el código de captcha para la verificación. Luego, haz clic en "Enviar OTP" o "Ingresar TOTP" dependiendo de si quieres recibir un OTP en tu número de móvil registrado o dirección de correo electrónico, o usar un OTP basado en el tiempo generado por una aplicación como mAadhaar o Google Authenticator.
-
Paso 4: Verifique su identidad con OTP o biométrico
-
Si ha elegido recibir un OTP, obtendrá un código de seis dígitos en su número de móvil o dirección de correo electrónico registrada. Introduzca este código en el cuadro y haga clic en "Verificar y Descargar". Si ha elegido utilizar un TOTP, introduzca el código de ocho dígitos generado por la aplicación y haga clic en "Verificar y descargar". Si ha elegido utilizar su autenticación biométrica, necesita tener un dispositivo biométrico registrado que pueda escanear su huella digital o iris. Conecte el dispositivo a su computadora y siga las instrucciones en la pantalla para escanear su biometría y haga clic en "Verificar y Descargar".
-
-
Paso 5: Descarga e imprime tu tarjeta Aadhaar
-
-
Beneficios de la tarjeta Aadhaar en línea
-
Descargar la tarjeta Aadhaar en línea tiene muchos beneficios sobre la espera de la carta física Aadhaar para llegar por correo. Aquí están algunos de ellos:
-
Comodidad y accesibilidad
-
Puede descargar en línea la tarjeta Aadhaar en cualquier momento y en cualquier lugar desde el sitio web de UIDAI. No necesita visitar ningún centro de inscripción u oficina de correos para obtener su tarjeta Aadhaar. También puede descargar varias copias de su tarjeta Aadhaar si pierde o daña la original.
-
Seguridad y validez
-
La tarjeta Aadhaar en línea es tan válida como la carta Aadhaar física emitida por UIDAI. Tiene una firma digital que verifica su autenticidad. También tiene un código QR que contiene su información demográfica y biométrica que puede ser escaneada por agencias autorizadas. También puede utilizar la tarjeta Aadhaar en línea como documento e-KYC para varios servicios.
-
Ecológico y rentable
-
Al descargar la tarjeta Aadhaar en línea, puede ahorrar papel y reducir el impacto ambiental. También puede ahorrar dinero en gastos de envío e impresión. La tarjeta Aadhaar online es gratuita y se puede descargar de forma ilimitada.
-
Preguntas frecuentes sobre Online Aadhaar Card
-
Aquí hay algunas preguntas y respuestas comunes sobre la tarjeta Aadhaar en línea:
-
-
Q: ¿Es válida la tarjeta Aadhaar online para viajar?
-
A: Sí, la tarjeta Aadhaar en línea se acepta como una prueba de identificación válida para viajes nacionales de aerolíneas, ferrocarriles y autobuses. Sin embargo, para viajes internacionales, necesita tener un pasaporte u otros documentos según los requisitos del país de destino.
-
Q: ¿Cómo puedo actualizar mis datos en la tarjeta Aadhaar en línea?
-
A: Puede actualizar sus datos como nombre, dirección, número de teléfono móvil, dirección de correo electrónico, sexo, fecha de nacimiento, foto, huella digital y escaneo de iris en la tarjeta Aadhaar en línea visitando un centro de inscripción o utilizando el portal de actualización de autoservicio en línea (SSUP) en el sitio web de UIDAI.
-
-
A: Puede comprobar el estado de su tarjeta Aadhaar en línea utilizando la opción "Comprobar estado de Aadhaar" en el sitio web de UIDAI. Debe introducir su ID de inscripción o el número de Aadhaar y el código de captcha para verificar el estado.
-
Q: ¿Cómo puedo bloquear o desbloquear mi tarjeta Aadhaar en línea?
-
A: Puede bloquear o desbloquear su tarjeta Aadhaar en línea utilizando la opción "Lock/Unlock Biometrics" en el sitio web de UIDAI. Debe ingresar su número de Aadhaar o ID virtual y OTP o TOTP para bloquear o desbloquear sus datos biométricos. Esto evitará el acceso no autorizado a su información biométrica.
-
Q: ¿Cómo puedo compartir mi tarjeta Aadhaar en línea con otros?
-
A: Puede compartir su tarjeta Aadhaar en línea con otros mediante la opción "Compartir código QR" en el sitio web de UIDAI. Debe ingresar su número de Aadhaar o ID virtual y OTP o TOTP para generar un código QR que contenga su información demográfica y biométrica. A continuación, puede descargar o imprimir el código QR y compartirlo con otros que pueden escanearlo utilizando la aplicación lector de código QR.
-
-
Conclusión
-
La tarjeta Aadhaar en línea es una forma conveniente y segura de obtener su número de identificación único del gobierno indio. Es tan válida como la carta física de Aadhaar y puede ser usada para varios propósitos. Puede descargar la tarjeta Aadhaar en línea en sencillos pasos desde el sitio web de UIDAI utilizando su número de Aadhaar o ID de inscripción y verificando su identidad con OTP o datos biométricos. También puede solicitar una tarjeta de PVC o generar un código QR para compartir su tarjeta Aadhaar con otros. La tarjeta Aadhaar en línea es beneficiosa para la comodidad, la accesibilidad, la seguridad, la validez, la ecología y la rentabilidad. Esperamos que este artículo le haya ayudado a entender cómo descargar la tarjeta Aadhaar en línea y respondió a sus preguntas. Si tiene más preguntas, no dude en contactarnos o visite el sitio web de UIDAI para obtener más información.
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/vcs/bazaar.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/vcs/bazaar.py
deleted file mode 100644
index 20a17ed09272a09a5b3c0bfbd0e6c43f78db4c1e..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/vcs/bazaar.py
+++ /dev/null
@@ -1,112 +0,0 @@
-import logging
-from typing import List, Optional, Tuple
-
-from pip._internal.utils.misc import HiddenText, display_path
-from pip._internal.utils.subprocess import make_command
-from pip._internal.utils.urls import path_to_url
-from pip._internal.vcs.versioncontrol import (
- AuthInfo,
- RemoteNotFoundError,
- RevOptions,
- VersionControl,
- vcs,
-)
-
-logger = logging.getLogger(__name__)
-
-
-class Bazaar(VersionControl):
- name = "bzr"
- dirname = ".bzr"
- repo_name = "branch"
- schemes = (
- "bzr+http",
- "bzr+https",
- "bzr+ssh",
- "bzr+sftp",
- "bzr+ftp",
- "bzr+lp",
- "bzr+file",
- )
-
- @staticmethod
- def get_base_rev_args(rev: str) -> List[str]:
- return ["-r", rev]
-
- def fetch_new(
- self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int
- ) -> None:
- rev_display = rev_options.to_display()
- logger.info(
- "Checking out %s%s to %s",
- url,
- rev_display,
- display_path(dest),
- )
- if verbosity <= 0:
- flag = "--quiet"
- elif verbosity == 1:
- flag = ""
- else:
- flag = f"-{'v'*verbosity}"
- cmd_args = make_command(
- "checkout", "--lightweight", flag, rev_options.to_args(), url, dest
- )
- self.run_command(cmd_args)
-
- def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
- self.run_command(make_command("switch", url), cwd=dest)
-
- def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
- output = self.run_command(
- make_command("info"), show_stdout=False, stdout_only=True, cwd=dest
- )
- if output.startswith("Standalone "):
- # Older versions of pip used to create standalone branches.
- # Convert the standalone branch to a checkout by calling "bzr bind".
- cmd_args = make_command("bind", "-q", url)
- self.run_command(cmd_args, cwd=dest)
-
- cmd_args = make_command("update", "-q", rev_options.to_args())
- self.run_command(cmd_args, cwd=dest)
-
- @classmethod
- def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]:
- # hotfix the URL scheme after removing bzr+ from bzr+ssh:// re-add it
- url, rev, user_pass = super().get_url_rev_and_auth(url)
- if url.startswith("ssh://"):
- url = "bzr+" + url
- return url, rev, user_pass
-
- @classmethod
- def get_remote_url(cls, location: str) -> str:
- urls = cls.run_command(
- ["info"], show_stdout=False, stdout_only=True, cwd=location
- )
- for line in urls.splitlines():
- line = line.strip()
- for x in ("checkout of branch: ", "parent branch: "):
- if line.startswith(x):
- repo = line.split(x)[1]
- if cls._is_local_repository(repo):
- return path_to_url(repo)
- return repo
- raise RemoteNotFoundError
-
- @classmethod
- def get_revision(cls, location: str) -> str:
- revision = cls.run_command(
- ["revno"],
- show_stdout=False,
- stdout_only=True,
- cwd=location,
- )
- return revision.splitlines()[-1]
-
- @classmethod
- def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:
- """Always assume the versions don't match"""
- return False
-
-
-vcs.register(Bazaar)
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/__init__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/__init__.py
deleted file mode 100644
index 4f1603adeb6fcf9bc1c4a16a9b6e16223c6534f3..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/__init__.py
+++ /dev/null
@@ -1,608 +0,0 @@
-# Copyright 2016-2018 Julien Danjou
-# Copyright 2017 Elisey Zanko
-# Copyright 2016 Étienne Bersac
-# Copyright 2016 Joshua Harlow
-# Copyright 2013-2014 Ray Holder
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import functools
-import sys
-import threading
-import time
-import typing as t
-import warnings
-from abc import ABC, abstractmethod
-from concurrent import futures
-from inspect import iscoroutinefunction
-
-# Import all built-in retry strategies for easier usage.
-from .retry import retry_base # noqa
-from .retry import retry_all # noqa
-from .retry import retry_always # noqa
-from .retry import retry_any # noqa
-from .retry import retry_if_exception # noqa
-from .retry import retry_if_exception_type # noqa
-from .retry import retry_if_exception_cause_type # noqa
-from .retry import retry_if_not_exception_type # noqa
-from .retry import retry_if_not_result # noqa
-from .retry import retry_if_result # noqa
-from .retry import retry_never # noqa
-from .retry import retry_unless_exception_type # noqa
-from .retry import retry_if_exception_message # noqa
-from .retry import retry_if_not_exception_message # noqa
-
-# Import all nap strategies for easier usage.
-from .nap import sleep # noqa
-from .nap import sleep_using_event # noqa
-
-# Import all built-in stop strategies for easier usage.
-from .stop import stop_after_attempt # noqa
-from .stop import stop_after_delay # noqa
-from .stop import stop_all # noqa
-from .stop import stop_any # noqa
-from .stop import stop_never # noqa
-from .stop import stop_when_event_set # noqa
-
-# Import all built-in wait strategies for easier usage.
-from .wait import wait_chain # noqa
-from .wait import wait_combine # noqa
-from .wait import wait_exponential # noqa
-from .wait import wait_fixed # noqa
-from .wait import wait_incrementing # noqa
-from .wait import wait_none # noqa
-from .wait import wait_random # noqa
-from .wait import wait_random_exponential # noqa
-from .wait import wait_random_exponential as wait_full_jitter # noqa
-from .wait import wait_exponential_jitter # noqa
-
-# Import all built-in before strategies for easier usage.
-from .before import before_log # noqa
-from .before import before_nothing # noqa
-
-# Import all built-in after strategies for easier usage.
-from .after import after_log # noqa
-from .after import after_nothing # noqa
-
-# Import all built-in after strategies for easier usage.
-from .before_sleep import before_sleep_log # noqa
-from .before_sleep import before_sleep_nothing # noqa
-
-# Replace a conditional import with a hard-coded None so that pip does
-# not attempt to use tornado even if it is present in the environment.
-# If tornado is non-None, tenacity will attempt to execute some code
-# that is sensitive to the version of tornado, which could break pip
-# if an old version is found.
-tornado = None # type: ignore
-
-if t.TYPE_CHECKING:
- import types
-
- from .retry import RetryBaseT
- from .stop import StopBaseT
- from .wait import WaitBaseT
-
-
-WrappedFnReturnT = t.TypeVar("WrappedFnReturnT")
-WrappedFn = t.TypeVar("WrappedFn", bound=t.Callable[..., t.Any])
-
-
-class TryAgain(Exception):
- """Always retry the executed function when raised."""
-
-
-NO_RESULT = object()
-
-
-class DoAttempt:
- pass
-
-
-class DoSleep(float):
- pass
-
-
-class BaseAction:
- """Base class for representing actions to take by retry object.
-
- Concrete implementations must define:
- - __init__: to initialize all necessary fields
- - REPR_FIELDS: class variable specifying attributes to include in repr(self)
- - NAME: for identification in retry object methods and callbacks
- """
-
- REPR_FIELDS: t.Sequence[str] = ()
- NAME: t.Optional[str] = None
-
- def __repr__(self) -> str:
- state_str = ", ".join(f"{field}={getattr(self, field)!r}" for field in self.REPR_FIELDS)
- return f"{self.__class__.__name__}({state_str})"
-
- def __str__(self) -> str:
- return repr(self)
-
-
-class RetryAction(BaseAction):
- REPR_FIELDS = ("sleep",)
- NAME = "retry"
-
- def __init__(self, sleep: t.SupportsFloat) -> None:
- self.sleep = float(sleep)
-
-
-_unset = object()
-
-
-def _first_set(first: t.Union[t.Any, object], second: t.Any) -> t.Any:
- return second if first is _unset else first
-
-
-class RetryError(Exception):
- """Encapsulates the last attempt instance right before giving up."""
-
- def __init__(self, last_attempt: "Future") -> None:
- self.last_attempt = last_attempt
- super().__init__(last_attempt)
-
- def reraise(self) -> "t.NoReturn":
- if self.last_attempt.failed:
- raise self.last_attempt.result()
- raise self
-
- def __str__(self) -> str:
- return f"{self.__class__.__name__}[{self.last_attempt}]"
-
-
-class AttemptManager:
- """Manage attempt context."""
-
- def __init__(self, retry_state: "RetryCallState"):
- self.retry_state = retry_state
-
- def __enter__(self) -> None:
- pass
-
- def __exit__(
- self,
- exc_type: t.Optional[t.Type[BaseException]],
- exc_value: t.Optional[BaseException],
- traceback: t.Optional["types.TracebackType"],
- ) -> t.Optional[bool]:
- if exc_type is not None and exc_value is not None:
- self.retry_state.set_exception((exc_type, exc_value, traceback))
- return True # Swallow exception.
- else:
- # We don't have the result, actually.
- self.retry_state.set_result(None)
- return None
-
-
-class BaseRetrying(ABC):
- def __init__(
- self,
- sleep: t.Callable[[t.Union[int, float]], None] = sleep,
- stop: "StopBaseT" = stop_never,
- wait: "WaitBaseT" = wait_none(),
- retry: "RetryBaseT" = retry_if_exception_type(),
- before: t.Callable[["RetryCallState"], None] = before_nothing,
- after: t.Callable[["RetryCallState"], None] = after_nothing,
- before_sleep: t.Optional[t.Callable[["RetryCallState"], None]] = None,
- reraise: bool = False,
- retry_error_cls: t.Type[RetryError] = RetryError,
- retry_error_callback: t.Optional[t.Callable[["RetryCallState"], t.Any]] = None,
- ):
- self.sleep = sleep
- self.stop = stop
- self.wait = wait
- self.retry = retry
- self.before = before
- self.after = after
- self.before_sleep = before_sleep
- self.reraise = reraise
- self._local = threading.local()
- self.retry_error_cls = retry_error_cls
- self.retry_error_callback = retry_error_callback
-
- def copy(
- self,
- sleep: t.Union[t.Callable[[t.Union[int, float]], None], object] = _unset,
- stop: t.Union["StopBaseT", object] = _unset,
- wait: t.Union["WaitBaseT", object] = _unset,
- retry: t.Union[retry_base, object] = _unset,
- before: t.Union[t.Callable[["RetryCallState"], None], object] = _unset,
- after: t.Union[t.Callable[["RetryCallState"], None], object] = _unset,
- before_sleep: t.Union[t.Optional[t.Callable[["RetryCallState"], None]], object] = _unset,
- reraise: t.Union[bool, object] = _unset,
- retry_error_cls: t.Union[t.Type[RetryError], object] = _unset,
- retry_error_callback: t.Union[t.Optional[t.Callable[["RetryCallState"], t.Any]], object] = _unset,
- ) -> "BaseRetrying":
- """Copy this object with some parameters changed if needed."""
- return self.__class__(
- sleep=_first_set(sleep, self.sleep),
- stop=_first_set(stop, self.stop),
- wait=_first_set(wait, self.wait),
- retry=_first_set(retry, self.retry),
- before=_first_set(before, self.before),
- after=_first_set(after, self.after),
- before_sleep=_first_set(before_sleep, self.before_sleep),
- reraise=_first_set(reraise, self.reraise),
- retry_error_cls=_first_set(retry_error_cls, self.retry_error_cls),
- retry_error_callback=_first_set(retry_error_callback, self.retry_error_callback),
- )
-
- def __repr__(self) -> str:
- return (
- f"<{self.__class__.__name__} object at 0x{id(self):x} ("
- f"stop={self.stop}, "
- f"wait={self.wait}, "
- f"sleep={self.sleep}, "
- f"retry={self.retry}, "
- f"before={self.before}, "
- f"after={self.after})>"
- )
-
- @property
- def statistics(self) -> t.Dict[str, t.Any]:
- """Return a dictionary of runtime statistics.
-
- This dictionary will be empty when the controller has never been
- ran. When it is running or has ran previously it should have (but
- may not) have useful and/or informational keys and values when
- running is underway and/or completed.
-
- .. warning:: The keys in this dictionary **should** be some what
- stable (not changing), but there existence **may**
- change between major releases as new statistics are
- gathered or removed so before accessing keys ensure that
- they actually exist and handle when they do not.
-
- .. note:: The values in this dictionary are local to the thread
- running call (so if multiple threads share the same retrying
- object - either directly or indirectly) they will each have
- there own view of statistics they have collected (in the
- future we may provide a way to aggregate the various
- statistics from each thread).
- """
- try:
- return self._local.statistics # type: ignore[no-any-return]
- except AttributeError:
- self._local.statistics = t.cast(t.Dict[str, t.Any], {})
- return self._local.statistics
-
- def wraps(self, f: WrappedFn) -> WrappedFn:
- """Wrap a function for retrying.
-
- :param f: A function to wraps for retrying.
- """
-
- @functools.wraps(f)
- def wrapped_f(*args: t.Any, **kw: t.Any) -> t.Any:
- return self(f, *args, **kw)
-
- def retry_with(*args: t.Any, **kwargs: t.Any) -> WrappedFn:
- return self.copy(*args, **kwargs).wraps(f)
-
- wrapped_f.retry = self # type: ignore[attr-defined]
- wrapped_f.retry_with = retry_with # type: ignore[attr-defined]
-
- return wrapped_f # type: ignore[return-value]
-
- def begin(self) -> None:
- self.statistics.clear()
- self.statistics["start_time"] = time.monotonic()
- self.statistics["attempt_number"] = 1
- self.statistics["idle_for"] = 0
-
- def iter(self, retry_state: "RetryCallState") -> t.Union[DoAttempt, DoSleep, t.Any]: # noqa
- fut = retry_state.outcome
- if fut is None:
- if self.before is not None:
- self.before(retry_state)
- return DoAttempt()
-
- is_explicit_retry = fut.failed and isinstance(fut.exception(), TryAgain)
- if not (is_explicit_retry or self.retry(retry_state)):
- return fut.result()
-
- if self.after is not None:
- self.after(retry_state)
-
- self.statistics["delay_since_first_attempt"] = retry_state.seconds_since_start
- if self.stop(retry_state):
- if self.retry_error_callback:
- return self.retry_error_callback(retry_state)
- retry_exc = self.retry_error_cls(fut)
- if self.reraise:
- raise retry_exc.reraise()
- raise retry_exc from fut.exception()
-
- if self.wait:
- sleep = self.wait(retry_state)
- else:
- sleep = 0.0
- retry_state.next_action = RetryAction(sleep)
- retry_state.idle_for += sleep
- self.statistics["idle_for"] += sleep
- self.statistics["attempt_number"] += 1
-
- if self.before_sleep is not None:
- self.before_sleep(retry_state)
-
- return DoSleep(sleep)
-
- def __iter__(self) -> t.Generator[AttemptManager, None, None]:
- self.begin()
-
- retry_state = RetryCallState(self, fn=None, args=(), kwargs={})
- while True:
- do = self.iter(retry_state=retry_state)
- if isinstance(do, DoAttempt):
- yield AttemptManager(retry_state=retry_state)
- elif isinstance(do, DoSleep):
- retry_state.prepare_for_next_attempt()
- self.sleep(do)
- else:
- break
-
- @abstractmethod
- def __call__(
- self,
- fn: t.Callable[..., WrappedFnReturnT],
- *args: t.Any,
- **kwargs: t.Any,
- ) -> WrappedFnReturnT:
- pass
-
-
-class Retrying(BaseRetrying):
- """Retrying controller."""
-
- def __call__(
- self,
- fn: t.Callable[..., WrappedFnReturnT],
- *args: t.Any,
- **kwargs: t.Any,
- ) -> WrappedFnReturnT:
- self.begin()
-
- retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
- while True:
- do = self.iter(retry_state=retry_state)
- if isinstance(do, DoAttempt):
- try:
- result = fn(*args, **kwargs)
- except BaseException: # noqa: B902
- retry_state.set_exception(sys.exc_info()) # type: ignore[arg-type]
- else:
- retry_state.set_result(result)
- elif isinstance(do, DoSleep):
- retry_state.prepare_for_next_attempt()
- self.sleep(do)
- else:
- return do # type: ignore[no-any-return]
-
-
-if sys.version_info[1] >= 9:
- FutureGenericT = futures.Future[t.Any]
-else:
- FutureGenericT = futures.Future
-
-
-class Future(FutureGenericT):
- """Encapsulates a (future or past) attempted call to a target function."""
-
- def __init__(self, attempt_number: int) -> None:
- super().__init__()
- self.attempt_number = attempt_number
-
- @property
- def failed(self) -> bool:
- """Return whether a exception is being held in this future."""
- return self.exception() is not None
-
- @classmethod
- def construct(cls, attempt_number: int, value: t.Any, has_exception: bool) -> "Future":
- """Construct a new Future object."""
- fut = cls(attempt_number)
- if has_exception:
- fut.set_exception(value)
- else:
- fut.set_result(value)
- return fut
-
-
-class RetryCallState:
- """State related to a single call wrapped with Retrying."""
-
- def __init__(
- self,
- retry_object: BaseRetrying,
- fn: t.Optional[WrappedFn],
- args: t.Any,
- kwargs: t.Any,
- ) -> None:
- #: Retry call start timestamp
- self.start_time = time.monotonic()
- #: Retry manager object
- self.retry_object = retry_object
- #: Function wrapped by this retry call
- self.fn = fn
- #: Arguments of the function wrapped by this retry call
- self.args = args
- #: Keyword arguments of the function wrapped by this retry call
- self.kwargs = kwargs
-
- #: The number of the current attempt
- self.attempt_number: int = 1
- #: Last outcome (result or exception) produced by the function
- self.outcome: t.Optional[Future] = None
- #: Timestamp of the last outcome
- self.outcome_timestamp: t.Optional[float] = None
- #: Time spent sleeping in retries
- self.idle_for: float = 0.0
- #: Next action as decided by the retry manager
- self.next_action: t.Optional[RetryAction] = None
-
- @property
- def seconds_since_start(self) -> t.Optional[float]:
- if self.outcome_timestamp is None:
- return None
- return self.outcome_timestamp - self.start_time
-
- def prepare_for_next_attempt(self) -> None:
- self.outcome = None
- self.outcome_timestamp = None
- self.attempt_number += 1
- self.next_action = None
-
- def set_result(self, val: t.Any) -> None:
- ts = time.monotonic()
- fut = Future(self.attempt_number)
- fut.set_result(val)
- self.outcome, self.outcome_timestamp = fut, ts
-
- def set_exception(
- self, exc_info: t.Tuple[t.Type[BaseException], BaseException, "types.TracebackType| None"]
- ) -> None:
- ts = time.monotonic()
- fut = Future(self.attempt_number)
- fut.set_exception(exc_info[1])
- self.outcome, self.outcome_timestamp = fut, ts
-
- def __repr__(self) -> str:
- if self.outcome is None:
- result = "none yet"
- elif self.outcome.failed:
- exception = self.outcome.exception()
- result = f"failed ({exception.__class__.__name__} {exception})"
- else:
- result = f"returned {self.outcome.result()}"
-
- slept = float(round(self.idle_for, 2))
- clsname = self.__class__.__name__
- return f"<{clsname} {id(self)}: attempt #{self.attempt_number}; slept for {slept}; last result: {result}>"
-
-
-@t.overload
-def retry(func: WrappedFn) -> WrappedFn:
- ...
-
-
-@t.overload
-def retry(
- sleep: t.Callable[[t.Union[int, float]], None] = sleep,
- stop: "StopBaseT" = stop_never,
- wait: "WaitBaseT" = wait_none(),
- retry: "RetryBaseT" = retry_if_exception_type(),
- before: t.Callable[["RetryCallState"], None] = before_nothing,
- after: t.Callable[["RetryCallState"], None] = after_nothing,
- before_sleep: t.Optional[t.Callable[["RetryCallState"], None]] = None,
- reraise: bool = False,
- retry_error_cls: t.Type["RetryError"] = RetryError,
- retry_error_callback: t.Optional[t.Callable[["RetryCallState"], t.Any]] = None,
-) -> t.Callable[[WrappedFn], WrappedFn]:
- ...
-
-
-def retry(*dargs: t.Any, **dkw: t.Any) -> t.Any:
- """Wrap a function with a new `Retrying` object.
-
- :param dargs: positional arguments passed to Retrying object
- :param dkw: keyword arguments passed to the Retrying object
- """
- # support both @retry and @retry() as valid syntax
- if len(dargs) == 1 and callable(dargs[0]):
- return retry()(dargs[0])
- else:
-
- def wrap(f: WrappedFn) -> WrappedFn:
- if isinstance(f, retry_base):
- warnings.warn(
- f"Got retry_base instance ({f.__class__.__name__}) as callable argument, "
- f"this will probably hang indefinitely (did you mean retry={f.__class__.__name__}(...)?)"
- )
- r: "BaseRetrying"
- if iscoroutinefunction(f):
- r = AsyncRetrying(*dargs, **dkw)
- elif tornado and hasattr(tornado.gen, "is_coroutine_function") and tornado.gen.is_coroutine_function(f):
- r = TornadoRetrying(*dargs, **dkw)
- else:
- r = Retrying(*dargs, **dkw)
-
- return r.wraps(f)
-
- return wrap
-
-
-from pip._vendor.tenacity._asyncio import AsyncRetrying # noqa:E402,I100
-
-if tornado:
- from pip._vendor.tenacity.tornadoweb import TornadoRetrying
-
-
-__all__ = [
- "retry_base",
- "retry_all",
- "retry_always",
- "retry_any",
- "retry_if_exception",
- "retry_if_exception_type",
- "retry_if_exception_cause_type",
- "retry_if_not_exception_type",
- "retry_if_not_result",
- "retry_if_result",
- "retry_never",
- "retry_unless_exception_type",
- "retry_if_exception_message",
- "retry_if_not_exception_message",
- "sleep",
- "sleep_using_event",
- "stop_after_attempt",
- "stop_after_delay",
- "stop_all",
- "stop_any",
- "stop_never",
- "stop_when_event_set",
- "wait_chain",
- "wait_combine",
- "wait_exponential",
- "wait_fixed",
- "wait_incrementing",
- "wait_none",
- "wait_random",
- "wait_random_exponential",
- "wait_full_jitter",
- "wait_exponential_jitter",
- "before_log",
- "before_nothing",
- "after_log",
- "after_nothing",
- "before_sleep_log",
- "before_sleep_nothing",
- "retry",
- "WrappedFn",
- "TryAgain",
- "NO_RESULT",
- "DoAttempt",
- "DoSleep",
- "BaseAction",
- "RetryAction",
- "RetryError",
- "AttemptManager",
- "BaseRetrying",
- "Retrying",
- "Future",
- "RetryCallState",
- "AsyncRetrying",
-]
diff --git a/spaces/Borda90/Titanic_Esp/README.md b/spaces/Borda90/Titanic_Esp/README.md
deleted file mode 100644
index a6c3334063cdd77e1ee100ea099cf032283a17dd..0000000000000000000000000000000000000000
--- a/spaces/Borda90/Titanic_Esp/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Titanic_Esp
-emoji: 📉
-colorFrom: blue
-colorTo: purple
-sdk: gradio
-sdk_version: 2.8.13
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/__init__.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/__init__.py
deleted file mode 100644
index 51685f6233913e5c797ee1e3e235216ad33ebdf5..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
-
-from .utils.env import setup_environment
-
-setup_environment()
-
-
-# This line will be programatically read/write by setup.py.
-# Leave them at the bottom of this file and don't touch them.
-__version__ = "0.1.1"
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/nms.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/nms.py
deleted file mode 100644
index 726a96323d08946206a64d203ed10866b2a42c9a..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/nms.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-
-import torch
-from torchvision.ops import boxes as box_ops
-from torchvision.ops import nms # BC-compat
-
-
-def batched_nms(boxes, scores, idxs, iou_threshold):
- """
- Same as torchvision.ops.boxes.batched_nms, but safer.
- """
- assert boxes.shape[-1] == 4
- # TODO may need better strategy.
- # Investigate after having a fully-cuda NMS op.
- if len(boxes) < 40000:
- return box_ops.batched_nms(boxes, scores, idxs, iou_threshold)
-
- result_mask = scores.new_zeros(scores.size(), dtype=torch.bool)
- for id in torch.unique(idxs).cpu().tolist():
- mask = (idxs == id).nonzero().view(-1)
- keep = nms(boxes[mask], scores[mask], iou_threshold)
- result_mask[mask[keep]] = True
- keep = result_mask.nonzero().view(-1)
- keep = keep[scores[keep].argsort(descending=True)]
- return keep
-
-
-# Note: this function (nms_rotated) might be moved into
-# torchvision/ops/boxes.py in the future
-def nms_rotated(boxes, scores, iou_threshold):
- """
- Performs non-maximum suppression (NMS) on the rotated boxes according
- to their intersection-over-union (IoU).
-
- Rotated NMS iteratively removes lower scoring rotated boxes which have an
- IoU greater than iou_threshold with another (higher scoring) rotated box.
-
- Note that RotatedBox (5, 3, 4, 2, -90) covers exactly the same region as
- RotatedBox (5, 3, 4, 2, 90) does, and their IoU will be 1. However, they
- can be representing completely different objects in certain tasks, e.g., OCR.
-
- As for the question of whether rotated-NMS should treat them as faraway boxes
- even though their IOU is 1, it depends on the application and/or ground truth annotation.
-
- As an extreme example, consider a single character v and the square box around it.
-
- If the angle is 0 degree, the object (text) would be read as 'v';
-
- If the angle is 90 degrees, the object (text) would become '>';
-
- If the angle is 180 degrees, the object (text) would become '^';
-
- If the angle is 270/-90 degrees, the object (text) would become '<'
-
- All of these cases have IoU of 1 to each other, and rotated NMS that only
- uses IoU as criterion would only keep one of them with the highest score -
- which, practically, still makes sense in most cases because typically
- only one of theses orientations is the correct one. Also, it does not matter
- as much if the box is only used to classify the object (instead of transcribing
- them with a sequential OCR recognition model) later.
-
- On the other hand, when we use IoU to filter proposals that are close to the
- ground truth during training, we should definitely take the angle into account if
- we know the ground truth is labeled with the strictly correct orientation (as in,
- upside-down words are annotated with -180 degrees even though they can be covered
- with a 0/90/-90 degree box, etc.)
-
- The way the original dataset is annotated also matters. For example, if the dataset
- is a 4-point polygon dataset that does not enforce ordering of vertices/orientation,
- we can estimate a minimum rotated bounding box to this polygon, but there's no way
- we can tell the correct angle with 100% confidence (as shown above, there could be 4 different
- rotated boxes, with angles differed by 90 degrees to each other, covering the exactly
- same region). In that case we have to just use IoU to determine the box
- proximity (as many detection benchmarks (even for text) do) unless there're other
- assumptions we can make (like width is always larger than height, or the object is not
- rotated by more than 90 degrees CCW/CW, etc.)
-
- In summary, not considering angles in rotated NMS seems to be a good option for now,
- but we should be aware of its implications.
-
- Args:
- boxes (Tensor[N, 5]): Rotated boxes to perform NMS on. They are expected to be in
- (x_center, y_center, width, height, angle_degrees) format.
- scores (Tensor[N]): Scores for each one of the rotated boxes
- iou_threshold (float): Discards all overlapping rotated boxes with IoU < iou_threshold
-
- Returns:
- keep (Tensor): int64 tensor with the indices of the elements that have been kept
- by Rotated NMS, sorted in decreasing order of scores
- """
- from detectron2 import _C
-
- return _C.nms_rotated(boxes, scores, iou_threshold)
-
-
-# Note: this function (batched_nms_rotated) might be moved into
-# torchvision/ops/boxes.py in the future
-def batched_nms_rotated(boxes, scores, idxs, iou_threshold):
- """
- Performs non-maximum suppression in a batched fashion.
-
- Each index value correspond to a category, and NMS
- will not be applied between elements of different categories.
-
- Args:
- boxes (Tensor[N, 5]):
- boxes where NMS will be performed. They
- are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format
- scores (Tensor[N]):
- scores for each one of the boxes
- idxs (Tensor[N]):
- indices of the categories for each one of the boxes.
- iou_threshold (float):
- discards all overlapping boxes
- with IoU < iou_threshold
-
- Returns:
- Tensor:
- int64 tensor with the indices of the elements that have been kept
- by NMS, sorted in decreasing order of scores
- """
- assert boxes.shape[-1] == 5
-
- if boxes.numel() == 0:
- return torch.empty((0,), dtype=torch.int64, device=boxes.device)
- # Strategy: in order to perform NMS independently per class,
- # we add an offset to all the boxes. The offset is dependent
- # only on the class idx, and is large enough so that boxes
- # from different classes do not overlap
-
- # Note that batched_nms in torchvision/ops/boxes.py only uses max_coordinate,
- # which won't handle negative coordinates correctly.
- # Here by using min_coordinate we can make sure the negative coordinates are
- # correctly handled.
- max_coordinate = (
- torch.max(boxes[:, 0], boxes[:, 1]) + torch.max(boxes[:, 2], boxes[:, 3]) / 2
- ).max()
- min_coordinate = (
- torch.min(boxes[:, 0], boxes[:, 1]) - torch.min(boxes[:, 2], boxes[:, 3]) / 2
- ).min()
- offsets = idxs.to(boxes) * (max_coordinate - min_coordinate + 1)
- boxes_for_nms = boxes.clone() # avoid modifying the original values in boxes
- boxes_for_nms[:, :2] += offsets[:, None]
- keep = nms_rotated(boxes_for_nms, scores, iou_threshold)
- return keep
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/inner_product.h b/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/inner_product.h
deleted file mode 100644
index 71e1a92705b0570734aa544899e2fab7a681bb37..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/inner_product.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#pragma once
-
-#include
-#include
-
-namespace thrust
-{
-namespace system
-{
-namespace detail
-{
-namespace generic
-{
-
-
-template
-__host__ __device__
- OutputType inner_product(thrust::execution_policy &exec,
- InputIterator1 first1,
- InputIterator1 last1,
- InputIterator2 first2,
- OutputType init);
-
-
-template
-__host__ __device__
-OutputType inner_product(thrust::execution_policy &exec,
- InputIterator1 first1,
- InputIterator1 last1,
- InputIterator2 first2,
- OutputType init,
- BinaryFunction1 binary_op1,
- BinaryFunction2 binary_op2);
-
-
-} // end namespace generic
-} // end namespace detail
-} // end namespace system
-} // end namespace thrust
-
-#include
-
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/inner_product.h b/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/inner_product.h
deleted file mode 100644
index e8cf941a1dc3df1a6a516eee54f92fa610fd35cc..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/inner_product.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include
-
-// this system inherits inner_product
-#include
-
diff --git a/spaces/CVPR/MonoScene/monoscene/app.py b/spaces/CVPR/MonoScene/monoscene/app.py
deleted file mode 100644
index 8e70631e75313a28bc978ac3d3bd5df28b61a552..0000000000000000000000000000000000000000
--- a/spaces/CVPR/MonoScene/monoscene/app.py
+++ /dev/null
@@ -1,138 +0,0 @@
-from pytorch_lightning import Trainer
-from monoscene.models.monoscene import MonoScene
-from monoscene.data.NYU.nyu_dm import NYUDataModule
-from monoscene.data.semantic_kitti.kitti_dm import KittiDataModule
-from monoscene.data.kitti_360.kitti_360_dm import Kitti360DataModule
-# import hydra
-from omegaconf import DictConfig
-import torch
-import numpy as np
-import os
-from hydra.utils import get_original_cwd
-import gradio as gr
-import numpy as np
-import plotly.express as px
-import pandas as pd
-
-
-# @hydra.main(config_name="../config/monoscene.yaml")
-def plot(input_img):
- torch.set_grad_enabled(False)
-
- # Setup dataloader
- # if config.dataset == "kitti" or config.dataset == "kitti_360":
- feature = 64
- project_scale = 2
- full_scene_size = (256, 256, 32)
-
- # if config.dataset == "kitti":
- # data_module = KittiDataModule(
- # root=config.kitti_root,
- # preprocess_root=config.kitti_preprocess_root,
- # frustum_size=config.frustum_size,
- # batch_size=int(config.batch_size / config.n_gpus),
- # num_workers=int(config.num_workers_per_gpu * config.n_gpus),
- # )
- # data_module.setup()
- # data_loader = data_module.val_dataloader()
- # # data_loader = data_module.test_dataloader() # use this if you want to infer on test set
- # else:
- # data_module = Kitti360DataModule(
- # root=config.kitti_360_root,
- # sequences=[config.kitti_360_sequence],
- # n_scans=2000,
- # batch_size=1,
- # num_workers=3,
- # )
- # data_module.setup()
- # data_loader = data_module.dataloader()
-
- # elif config.dataset == "NYU":
- # project_scale = 1
- # feature = 200
- # full_scene_size = (60, 36, 60)
- # data_module = NYUDataModule(
- # root=config.NYU_root,
- # preprocess_root=config.NYU_preprocess_root,
- # n_relations=config.n_relations,
- # frustum_size=config.frustum_size,
- # batch_size=int(config.batch_size / config.n_gpus),
- # num_workers=int(config.num_workers_per_gpu * config.n_gpus),
- # )
- # data_module.setup()
- # data_loader = data_module.val_dataloader()
- # # data_loader = data_module.test_dataloader() # use this if you want to infer on test set
- # else:
- # print("dataset not support")
-
- # Load pretrained models
- # if config.dataset == "NYU":
- # model_path = os.path.join(
- # get_original_cwd(), "trained_models", "monoscene_nyu.ckpt"
- # )
- # else:
- # model_path = os.path.join(
- # get_original_cwd(), "trained_models", "monoscene_kitti.ckpt"
- # )
- model_path = "trained_models/monoscene_kitti.ckpt"
-
- model = MonoScene.load_from_checkpoint(
- model_path,
- feature=feature,
- project_scale=project_scale,
- fp_loss=False,
- full_scene_size=full_scene_size,
- )
- model.cuda()
- model.eval()
-
- print(input_img.shape)
-
- x = np.arange(12).reshape(4, 3) / 12
- data = pd.DataFrame(data=x, columns=['x', 'y', 'z'])
- fig = px.scatter_3d(data, x="x", y="y", z="z")
- return fig
-
-demo = gr.Interface(plot, gr.Image(shape=(200, 200)), gr.Plot())
-demo.launch()
-
-
-
- # Save prediction and additional data
- # to draw the viewing frustum and remove scene outside the room for NYUv2
- # output_path = os.path.join(config.output_path, config.dataset)
- # with torch.no_grad():
- # for batch in tqdm(data_loader):
- # batch["img"] = batch["img"].cuda()
- # pred = model(batch)
- # y_pred = torch.softmax(pred["ssc_logit"], dim=1).detach().cpu().numpy()
- # y_pred = np.argmax(y_pred, axis=1)
- # for i in range(config.batch_size):
- # out_dict = {"y_pred": y_pred[i].astype(np.uint16)}
- # if "target" in batch:
- # out_dict["target"] = (
- # batch["target"][i].detach().cpu().numpy().astype(np.uint16)
- # )
-
- # if config.dataset == "NYU":
- # write_path = output_path
- # filepath = os.path.join(write_path, batch["name"][i] + ".pkl")
- # out_dict["cam_pose"] = batch["cam_pose"][i].detach().cpu().numpy()
- # out_dict["vox_origin"] = (
- # batch["vox_origin"][i].detach().cpu().numpy()
- # )
- # else:
- # write_path = os.path.join(output_path, batch["sequence"][i])
- # filepath = os.path.join(write_path, batch["frame_id"][i] + ".pkl")
- # out_dict["fov_mask_1"] = (
- # batch["fov_mask_1"][i].detach().cpu().numpy()
- # )
- # out_dict["cam_k"] = batch["cam_k"][i].detach().cpu().numpy()
- # out_dict["T_velo_2_cam"] = (
- # batch["T_velo_2_cam"][i].detach().cpu().numpy()
- # )
-
- # os.makedirs(write_path, exist_ok=True)
- # with open(filepath, "wb") as handle:
- # pickle.dump(out_dict, handle)
- # print("wrote to", filepath)
\ No newline at end of file
diff --git a/spaces/CVPR/lama-example/bin/evaluator_example.py b/spaces/CVPR/lama-example/bin/evaluator_example.py
deleted file mode 100644
index 669e3c53c1218444a880dc78f19a565a406ff6dc..0000000000000000000000000000000000000000
--- a/spaces/CVPR/lama-example/bin/evaluator_example.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import os
-
-import cv2
-import numpy as np
-import torch
-from skimage import io
-from skimage.transform import resize
-from torch.utils.data import Dataset
-
-from saicinpainting.evaluation.evaluator import InpaintingEvaluator
-from saicinpainting.evaluation.losses.base_loss import SSIMScore, LPIPSScore, FIDScore
-
-
-class SimpleImageDataset(Dataset):
- def __init__(self, root_dir, image_size=(400, 600)):
- self.root_dir = root_dir
- self.files = sorted(os.listdir(root_dir))
- self.image_size = image_size
-
- def __getitem__(self, index):
- img_name = os.path.join(self.root_dir, self.files[index])
- image = io.imread(img_name)
- image = resize(image, self.image_size, anti_aliasing=True)
- image = torch.FloatTensor(image).permute(2, 0, 1)
- return image
-
- def __len__(self):
- return len(self.files)
-
-
-def create_rectangle_mask(height, width):
- mask = np.ones((height, width))
- up_left_corner = width // 4, height // 4
- down_right_corner = (width - up_left_corner[0] - 1, height - up_left_corner[1] - 1)
- cv2.rectangle(mask, up_left_corner, down_right_corner, (0, 0, 0), thickness=cv2.FILLED)
- return mask
-
-
-class Model():
- def __call__(self, img_batch, mask_batch):
- mean = (img_batch * mask_batch[:, None, :, :]).sum(dim=(2, 3)) / mask_batch.sum(dim=(1, 2))[:, None]
- inpainted = mean[:, :, None, None] * (1 - mask_batch[:, None, :, :]) + img_batch * mask_batch[:, None, :, :]
- return inpainted
-
-
-class SimpleImageSquareMaskDataset(Dataset):
- def __init__(self, dataset):
- self.dataset = dataset
- self.mask = torch.FloatTensor(create_rectangle_mask(*self.dataset.image_size))
- self.model = Model()
-
- def __getitem__(self, index):
- img = self.dataset[index]
- mask = self.mask.clone()
- inpainted = self.model(img[None, ...], mask[None, ...])
- return dict(image=img, mask=mask, inpainted=inpainted)
-
- def __len__(self):
- return len(self.dataset)
-
-
-dataset = SimpleImageDataset('imgs')
-mask_dataset = SimpleImageSquareMaskDataset(dataset)
-model = Model()
-metrics = {
- 'ssim': SSIMScore(),
- 'lpips': LPIPSScore(),
- 'fid': FIDScore()
-}
-
-evaluator = InpaintingEvaluator(
- mask_dataset, scores=metrics, batch_size=3, area_grouping=True
-)
-
-results = evaluator.evaluate(model)
-print(results)
diff --git a/spaces/CVPR/lama-example/bin/paper_runfiles/generate_test_paris.sh b/spaces/CVPR/lama-example/bin/paper_runfiles/generate_test_paris.sh
deleted file mode 100644
index 66056017c3aa376ef0767a59583ab25a321b559b..0000000000000000000000000000000000000000
--- a/spaces/CVPR/lama-example/bin/paper_runfiles/generate_test_paris.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/env bash
-
-# paths to data are valid for mml-ws01
-OUT_DIR="/media/inpainting/paper_data/Paris_StreetView_Dataset_val"
-
-source "$(dirname $0)/env.sh"
-
-for datadir in paris_eval_gt
-do
- for conf in random_thin_256 random_medium_256 random_thick_256 segm_256
- do
- "$BINDIR/gen_mask_dataset_hydra.py" -cn $conf datadir=$datadir location=mml-ws01-paris \
- location.out_dir=OUT_DIR cropping.out_square_crop=False cropping.out_min_size=227
-
- "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats"
- done
-done
diff --git a/spaces/CVPR/lama-example/bin/side_by_side.py b/spaces/CVPR/lama-example/bin/side_by_side.py
deleted file mode 100644
index 8ba7a42a3b8597552b8002d1eb245d5776aff7f7..0000000000000000000000000000000000000000
--- a/spaces/CVPR/lama-example/bin/side_by_side.py
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/usr/bin/env python3
-import os
-import random
-
-import cv2
-import numpy as np
-
-from saicinpainting.evaluation.data import PrecomputedInpaintingResultsDataset
-from saicinpainting.evaluation.utils import load_yaml
-from saicinpainting.training.visualizers.base import visualize_mask_and_images
-
-
-def main(args):
- config = load_yaml(args.config)
-
- datasets = [PrecomputedInpaintingResultsDataset(args.datadir, cur_predictdir, **config.dataset_kwargs)
- for cur_predictdir in args.predictdirs]
- assert len({len(ds) for ds in datasets}) == 1
- len_first = len(datasets[0])
-
- indices = list(range(len_first))
- if len_first > args.max_n:
- indices = sorted(random.sample(indices, args.max_n))
-
- os.makedirs(args.outpath, exist_ok=True)
-
- filename2i = {}
-
- keys = ['image'] + [i for i in range(len(datasets))]
- for img_i in indices:
- try:
- mask_fname = os.path.basename(datasets[0].mask_filenames[img_i])
- if mask_fname in filename2i:
- filename2i[mask_fname] += 1
- idx = filename2i[mask_fname]
- mask_fname_only, ext = os.path.split(mask_fname)
- mask_fname = f'{mask_fname_only}_{idx}{ext}'
- else:
- filename2i[mask_fname] = 1
-
- cur_vis_dict = datasets[0][img_i]
- for ds_i, ds in enumerate(datasets):
- cur_vis_dict[ds_i] = ds[img_i]['inpainted']
-
- vis_img = visualize_mask_and_images(cur_vis_dict, keys,
- last_without_mask=False,
- mask_only_first=True,
- black_mask=args.black)
- vis_img = np.clip(vis_img * 255, 0, 255).astype('uint8')
-
- out_fname = os.path.join(args.outpath, mask_fname)
-
-
-
- vis_img = cv2.cvtColor(vis_img, cv2.COLOR_RGB2BGR)
- cv2.imwrite(out_fname, vis_img)
- except Exception as ex:
- print(f'Could not process {img_i} due to {ex}')
-
-
-if __name__ == '__main__':
- import argparse
-
- aparser = argparse.ArgumentParser()
- aparser.add_argument('--max-n', type=int, default=100, help='Maximum number of images to print')
- aparser.add_argument('--black', action='store_true', help='Whether to fill mask on GT with black')
- aparser.add_argument('config', type=str, help='Path to evaluation config (e.g. configs/eval1.yaml)')
- aparser.add_argument('outpath', type=str, help='Where to put results')
- aparser.add_argument('datadir', type=str,
- help='Path to folder with images and masks')
- aparser.add_argument('predictdirs', type=str,
- nargs='+',
- help='Path to folders with predicts')
-
-
- main(aparser.parse_args())
diff --git a/spaces/CVPR/transfiner/configs/common/models/retinanet.py b/spaces/CVPR/transfiner/configs/common/models/retinanet.py
deleted file mode 100644
index 01d168fe6f054b88933488bdc65516424ce917cd..0000000000000000000000000000000000000000
--- a/spaces/CVPR/transfiner/configs/common/models/retinanet.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from detectron2.config import LazyCall as L
-from detectron2.layers import ShapeSpec
-from detectron2.modeling.meta_arch import RetinaNet
-from detectron2.modeling.anchor_generator import DefaultAnchorGenerator
-from detectron2.modeling.backbone.fpn import LastLevelP6P7
-from detectron2.modeling.backbone import BasicStem, FPN, ResNet
-from detectron2.modeling.box_regression import Box2BoxTransform
-from detectron2.modeling.matcher import Matcher
-from detectron2.modeling.meta_arch.retinanet import RetinaNetHead
-
-model = L(RetinaNet)(
- backbone=L(FPN)(
- bottom_up=L(ResNet)(
- stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"),
- stages=L(ResNet.make_default_stages)(
- depth=50,
- stride_in_1x1=True,
- norm="FrozenBN",
- ),
- out_features=["res3", "res4", "res5"],
- ),
- in_features=["res3", "res4", "res5"],
- out_channels=256,
- top_block=L(LastLevelP6P7)(in_channels=2048, out_channels="${..out_channels}"),
- ),
- head=L(RetinaNetHead)(
- input_shape=[ShapeSpec(channels=256)],
- num_classes="${..num_classes}",
- conv_dims=[256, 256, 256, 256],
- prior_prob=0.01,
- num_anchors=9,
- ),
- anchor_generator=L(DefaultAnchorGenerator)(
- sizes=[[x, x * 2 ** (1.0 / 3), x * 2 ** (2.0 / 3)] for x in [32, 64, 128, 256, 512]],
- aspect_ratios=[0.5, 1.0, 2.0],
- strides=[8, 16, 32, 64, 128],
- offset=0.0,
- ),
- box2box_transform=L(Box2BoxTransform)(weights=[1.0, 1.0, 1.0, 1.0]),
- anchor_matcher=L(Matcher)(
- thresholds=[0.4, 0.5], labels=[0, -1, 1], allow_low_quality_matches=True
- ),
- num_classes=80,
- head_in_features=["p3", "p4", "p5", "p6", "p7"],
- focal_loss_alpha=0.25,
- focal_loss_gamma=2.0,
- pixel_mean=[103.530, 116.280, 123.675],
- pixel_std=[1.0, 1.0, 1.0],
- input_format="BGR",
-)
diff --git a/spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/bertwarper.py b/spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/bertwarper.py
deleted file mode 100644
index f0cf9779b270e1aead32845006f8b881fcba37ad..0000000000000000000000000000000000000000
--- a/spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/bertwarper.py
+++ /dev/null
@@ -1,273 +0,0 @@
-# ------------------------------------------------------------------------
-# Grounding DINO
-# url: https://github.com/IDEA-Research/GroundingDINO
-# Copyright (c) 2023 IDEA. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
-# ------------------------------------------------------------------------
-
-import torch
-import torch.nn.functional as F
-import torch.utils.checkpoint as checkpoint
-from torch import Tensor, nn
-from torchvision.ops.boxes import nms
-from transformers import BertConfig, BertModel, BertPreTrainedModel
-from transformers.modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions
-
-
-class BertModelWarper(nn.Module):
- def __init__(self, bert_model):
- super().__init__()
- # self.bert = bert_modelc
-
- self.config = bert_model.config
- self.embeddings = bert_model.embeddings
- self.encoder = bert_model.encoder
- self.pooler = bert_model.pooler
-
- self.get_extended_attention_mask = bert_model.get_extended_attention_mask
- self.invert_attention_mask = bert_model.invert_attention_mask
- self.get_head_mask = bert_model.get_head_mask
-
- def forward(
- self,
- input_ids=None,
- attention_mask=None,
- token_type_ids=None,
- position_ids=None,
- head_mask=None,
- inputs_embeds=None,
- encoder_hidden_states=None,
- encoder_attention_mask=None,
- past_key_values=None,
- use_cache=None,
- output_attentions=None,
- output_hidden_states=None,
- return_dict=None,
- ):
- r"""
- encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
- Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
- the model is configured as a decoder.
- encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
- Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
- the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
-
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
- past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
- Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
-
- If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
- (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
- instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
- use_cache (:obj:`bool`, `optional`):
- If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
- decoding (see :obj:`past_key_values`).
- """
- output_attentions = (
- output_attentions if output_attentions is not None else self.config.output_attentions
- )
- output_hidden_states = (
- output_hidden_states
- if output_hidden_states is not None
- else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- if self.config.is_decoder:
- use_cache = use_cache if use_cache is not None else self.config.use_cache
- else:
- use_cache = False
-
- if input_ids is not None and inputs_embeds is not None:
- raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
- elif input_ids is not None:
- input_shape = input_ids.size()
- batch_size, seq_length = input_shape
- elif inputs_embeds is not None:
- input_shape = inputs_embeds.size()[:-1]
- batch_size, seq_length = input_shape
- else:
- raise ValueError("You have to specify either input_ids or inputs_embeds")
-
- device = input_ids.device if input_ids is not None else inputs_embeds.device
-
- # past_key_values_length
- past_key_values_length = (
- past_key_values[0][0].shape[2] if past_key_values is not None else 0
- )
-
- if attention_mask is None:
- attention_mask = torch.ones(
- ((batch_size, seq_length + past_key_values_length)), device=device
- )
- if token_type_ids is None:
- token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
-
- # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
- # ourselves in which case we just need to make it broadcastable to all heads.
- extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
- attention_mask, input_shape, device
- )
-
- # If a 2D or 3D attention mask is provided for the cross-attention
- # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
- if self.config.is_decoder and encoder_hidden_states is not None:
- encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
- encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
- if encoder_attention_mask is None:
- encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
- encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
- else:
- encoder_extended_attention_mask = None
- # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
- # import ipdb; ipdb.set_trace()
-
- # Prepare head mask if needed
- # 1.0 in head_mask indicate we keep the head
- # attention_probs has shape bsz x n_heads x N x N
- # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
- # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
- head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
-
- embedding_output = self.embeddings(
- input_ids=input_ids,
- position_ids=position_ids,
- token_type_ids=token_type_ids,
- inputs_embeds=inputs_embeds,
- past_key_values_length=past_key_values_length,
- )
-
- encoder_outputs = self.encoder(
- embedding_output,
- attention_mask=extended_attention_mask,
- head_mask=head_mask,
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_extended_attention_mask,
- past_key_values=past_key_values,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- sequence_output = encoder_outputs[0]
- pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
-
- if not return_dict:
- return (sequence_output, pooled_output) + encoder_outputs[1:]
-
- return BaseModelOutputWithPoolingAndCrossAttentions(
- last_hidden_state=sequence_output,
- pooler_output=pooled_output,
- past_key_values=encoder_outputs.past_key_values,
- hidden_states=encoder_outputs.hidden_states,
- attentions=encoder_outputs.attentions,
- cross_attentions=encoder_outputs.cross_attentions,
- )
-
-
-class TextEncoderShell(nn.Module):
- def __init__(self, text_encoder):
- super().__init__()
- self.text_encoder = text_encoder
- self.config = self.text_encoder.config
-
- def forward(self, **kw):
- # feed into text encoder
- return self.text_encoder(**kw)
-
-
-def generate_masks_with_special_tokens(tokenized, special_tokens_list, tokenizer):
- """Generate attention mask between each pair of special tokens
- Args:
- input_ids (torch.Tensor): input ids. Shape: [bs, num_token]
- special_tokens_mask (list): special tokens mask.
- Returns:
- torch.Tensor: attention mask between each special tokens.
- """
- input_ids = tokenized["input_ids"]
- bs, num_token = input_ids.shape
- # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens
- special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()
- for special_token in special_tokens_list:
- special_tokens_mask |= input_ids == special_token
-
- # idxs: each row is a list of indices of special tokens
- idxs = torch.nonzero(special_tokens_mask)
-
- # generate attention mask and positional ids
- attention_mask = (
- torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)
- )
- position_ids = torch.zeros((bs, num_token), device=input_ids.device)
- previous_col = 0
- for i in range(idxs.shape[0]):
- row, col = idxs[i]
- if (col == 0) or (col == num_token - 1):
- attention_mask[row, col, col] = True
- position_ids[row, col] = 0
- else:
- attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True
- position_ids[row, previous_col + 1 : col + 1] = torch.arange(
- 0, col - previous_col, device=input_ids.device
- )
-
- previous_col = col
-
- # # padding mask
- # padding_mask = tokenized['attention_mask']
- # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()
-
- return attention_mask, position_ids.to(torch.long)
-
-
-def generate_masks_with_special_tokens_and_transfer_map(tokenized, special_tokens_list, tokenizer):
- """Generate attention mask between each pair of special tokens
- Args:
- input_ids (torch.Tensor): input ids. Shape: [bs, num_token]
- special_tokens_mask (list): special tokens mask.
- Returns:
- torch.Tensor: attention mask between each special tokens.
- """
- input_ids = tokenized["input_ids"]
- bs, num_token = input_ids.shape
- # special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens
- special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()
- for special_token in special_tokens_list:
- special_tokens_mask |= input_ids == special_token
-
- # idxs: each row is a list of indices of special tokens
- idxs = torch.nonzero(special_tokens_mask)
-
- # generate attention mask and positional ids
- attention_mask = (
- torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)
- )
- position_ids = torch.zeros((bs, num_token), device=input_ids.device)
- cate_to_token_mask_list = [[] for _ in range(bs)]
- previous_col = 0
- for i in range(idxs.shape[0]):
- row, col = idxs[i]
- if (col == 0) or (col == num_token - 1):
- attention_mask[row, col, col] = True
- position_ids[row, col] = 0
- else:
- attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True
- position_ids[row, previous_col + 1 : col + 1] = torch.arange(
- 0, col - previous_col, device=input_ids.device
- )
- c2t_maski = torch.zeros((num_token), device=input_ids.device).bool()
- c2t_maski[previous_col + 1 : col] = True
- cate_to_token_mask_list[row].append(c2t_maski)
- previous_col = col
-
- cate_to_token_mask_list = [
- torch.stack(cate_to_token_mask_listi, dim=0)
- for cate_to_token_mask_listi in cate_to_token_mask_list
- ]
-
- # # padding mask
- # padding_mask = tokenized['attention_mask']
- # attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()
-
- return attention_mask, position_ids.to(torch.long), cate_to_token_mask_list
diff --git a/spaces/CofAI/chat/g4f/Provider/Providers/H2o.py b/spaces/CofAI/chat/g4f/Provider/Providers/H2o.py
deleted file mode 100644
index eabf94e2dc1e6167f746a820e34c335f2aa8578e..0000000000000000000000000000000000000000
--- a/spaces/CofAI/chat/g4f/Provider/Providers/H2o.py
+++ /dev/null
@@ -1,106 +0,0 @@
-from requests import Session
-from uuid import uuid4
-from json import loads
-import os
-import json
-import requests
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://gpt-gm.h2o.ai'
-model = ['falcon-40b', 'falcon-7b', 'llama-13b']
-supports_stream = True
-needs_auth = False
-
-models = {
- 'falcon-7b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
- 'falcon-40b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
- 'llama-13b': 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b'
-}
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- conversation = 'instruction: this is a conversation beween, a user and an AI assistant, respond to the latest message, referring to the conversation if needed\n'
- for message in messages:
- conversation += '%s: %s\n' % (message['role'], message['content'])
- conversation += 'assistant:'
-
- client = Session()
- client.headers = {
- 'authority': 'gpt-gm.h2o.ai',
- 'origin': 'https://gpt-gm.h2o.ai',
- 'referer': 'https://gpt-gm.h2o.ai/',
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-fetch-dest': 'document',
- 'sec-fetch-mode': 'navigate',
- 'sec-fetch-site': 'same-origin',
- 'sec-fetch-user': '?1',
- 'upgrade-insecure-requests': '1',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
- }
-
- client.get('https://gpt-gm.h2o.ai/')
- response = client.post('https://gpt-gm.h2o.ai/settings', data={
- 'ethicsModalAccepted': 'true',
- 'shareConversationsWithModelAuthors': 'true',
- 'ethicsModalAcceptedAt': '',
- 'activeModel': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
- 'searchEnabled': 'true',
- })
-
- headers = {
- 'authority': 'gpt-gm.h2o.ai',
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'origin': 'https://gpt-gm.h2o.ai',
- 'referer': 'https://gpt-gm.h2o.ai/',
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
- }
-
- json_data = {
- 'model': models[model]
- }
-
- response = client.post('https://gpt-gm.h2o.ai/conversation',
- headers=headers, json=json_data)
- conversationId = response.json()['conversationId']
-
-
- completion = client.post(f'https://gpt-gm.h2o.ai/conversation/{conversationId}', stream=True, json = {
- 'inputs': conversation,
- 'parameters': {
- 'temperature': kwargs.get('temperature', 0.4),
- 'truncate': kwargs.get('truncate', 2048),
- 'max_new_tokens': kwargs.get('max_new_tokens', 1024),
- 'do_sample': kwargs.get('do_sample', True),
- 'repetition_penalty': kwargs.get('repetition_penalty', 1.2),
- 'return_full_text': kwargs.get('return_full_text', False)
- },
- 'stream': True,
- 'options': {
- 'id': kwargs.get('id', str(uuid4())),
- 'response_id': kwargs.get('response_id', str(uuid4())),
- 'is_retry': False,
- 'use_cache': False,
- 'web_search_id': ''
- }
- })
-
- for line in completion.iter_lines():
- if b'data' in line:
- line = loads(line.decode('utf-8').replace('data:', ''))
- token = line['token']['text']
-
- if token == '<|endoftext|>':
- break
- else:
- yield (token)
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/spaces/Cpp4App/Cpp4App/SEM/types_pp_processing.py b/spaces/Cpp4App/Cpp4App/SEM/types_pp_processing.py
deleted file mode 100644
index e4b3a815588bfc1ea239c37b2a5a27e8eddd7acf..0000000000000000000000000000000000000000
--- a/spaces/Cpp4App/Cpp4App/SEM/types_pp_processing.py
+++ /dev/null
@@ -1,418 +0,0 @@
-import csv
-import re
-import spacy
-from bs4 import BeautifulSoup
-# import stanza
-
-from nltk.corpus import stopwords, wordnet
-from SEM.text_preprocessing import pre_process,pre_process_type
-from SEM.sentence_bayesian import clf_type,tf
-from SEM.phrase_similarity import wordnetSim3, wordnetSim_modified
-
-def check_ngram(string):
- words = string.split()
- num_words = len(words)
- return num_words
-
-
-replacement_patterns = [
-(r'won\'t', 'will not'),
-(r'can\'t', 'cannot'),
-(r'i\'m', 'i am'),
-(r'ain\'t', 'is not'),
-(r'(\w+)\'ll', '\g<1> will'),
-(r'(\w+)n\'t', '\g<1> not'),
-(r'(\w+)\'ve', '\g<1> have'),
-(r'(\w+)\'s', '\g<1> is'),
-(r'(\w+)\'re', '\g<1> are'),
-(r'(\w+)\'d', '\g<1> would')]
-
-class RegexpReplacer(object):
- def __init__(self, patterns=replacement_patterns):
- self.patterns = [(re.compile(regex), repl) for (regex, repl) in patterns]
- def replace(self, text):
- s = text
- for (pattern, repl) in self.patterns:
- (s, count) = re.subn(pattern, repl, s)
- return s
-# 获取单词的词性
-def get_wordnet_pos(tag):
- if tag.startswith('J'):
- return wordnet.ADJ
- elif tag.startswith('V'):
- return wordnet.VERB
- elif tag.startswith('N'):
- return wordnet.NOUN
- elif tag.startswith('R'):
- return wordnet.ADV
- else:
- return None
-
-
-def cleanHtml(txt):
-
- # only split with line
- personal_information = []
- with open(txt, encoding='utf-8') as file_obj:
- for line in file_obj:
- # if len(line.split(' ')) >= 5:
- personal_information.append(line)
-
- text = ''.join(personal_information)
- soup = BeautifulSoup(text, 'html.parser')
- lower = soup.get_text().lower()
-
- # use re
- # pattern = r'(? 0.8:
- simList[information_type.index(type)] = wordnetSim3(chunk,type)
- except Exception:
- pass
- print("error")
- nowMax = 0
- for max in simList:
- if max > nowMax:
- nowMax = max
- if nowMax != 0:
- word[simList.index(nowMax)] = 1
- return word
-
-def getSentences(txt):
-
- information_type = {'Name':['name', 'first name', 'last name', 'full name', 'real name', 'surname', 'family name', 'given name'],
- 'Birthday':['birthday', 'date of birth', 'birth date', 'DOB', 'dob full birthday'],
- 'Address':['address', 'mailing address', 'physical address', 'postal address', 'billing address', 'shipping address'],
- 'Phone':['phone', 'phone number', 'mobile', 'mobile phone', 'mobile number', 'telephone', 'telephone number', 'call'],
- 'Email':['email', 'e-mail', 'email address', 'e-mail address'],
- 'Contacts':['contacts', 'phone-book', 'phone book'],
- 'Location':['location', 'locate', 'place', 'geography', 'geo', 'geo-location', 'precision location'],
- 'Camera':['camera', 'photo', 'scan', 'album', 'picture', 'gallery', 'photo library', 'storage', 'image', 'video'],
- 'Microphone':['microphone', 'voice, mic', 'speech', 'talk'],
- 'Financial':['credit card', 'company', 'companies', 'organization', 'organizations', 'pay', 'payment'],
- 'IP':['IP', 'Internet Protocol', 'IP address', 'internet protocol address'],
- 'Cookies':['cookies', 'cookie']}
-
- sentence_list = cleanHtml(txt)
- for sen in sentence_list:
- sentence_list[sentence_list.index(sen)] = pre_process_type(sen)
-
- # print("all sentences:\n")
- # for sen in sentence_list:
- # print(sen)
- # print("\n")
-
- classified_sen = {'Name':[],
- 'Birthday':[],
- 'Address':[],
- 'Phone':[],
- 'Email':[],
- 'Contacts':[],
- 'Location':[],
- 'Camera':[],
- 'Microphone':[],
- 'Financial':[],
- 'IP':[],
- 'Cookies':[]}
- # simList = []
- # for a in information_type:
- # word.append(0)
- # for b in information_type:
- # simList.append(0)
- for sentence in sentence_list:
- if clf_type.predict(tf.transform([sentence])) == "1":
- # print("yes sentence: "+sentence+"\n")
- for type in information_type:
- for w in information_type[type]:
- if w in sentence:
- if w == "geo" or w == "IP" or w == "DOB":
- # check whether w is a part of an unrelated word
- if sentence[sentence.index(w) - 1] == " " and sentence not in classified_sen[type]:
- classified_sen[type].append(sentence)
- else:
- # check duplication
- if sentence not in classified_sen[type]:
- classified_sen[type].append(sentence)
-
- return classified_sen
-
-def getSentences_no_classifier(txt):
-
- information_type = {'Name':['name', 'first name', 'last name', 'full name', 'real name', 'surname', 'family name', 'given name'],
- 'Birthday':['birthday', 'date of birth', 'birth date', 'DOB', 'dob full birthday'],
- 'Address':['address', 'mailing address', 'physical address', 'postal address', 'billing address', 'shipping address'],
- 'Phone':['phone', 'phone number', 'mobile', 'mobile phone', 'mobile number', 'telephone', 'telephone number', 'call'],
- 'Email':['email', 'e-mail', 'email address', 'e-mail address'],
- 'Contacts':['contacts', 'phone-book', 'phone book'],
- 'Location':['location', 'locate', 'place', 'geography', 'geo', 'geo-location', 'precision location'],
- 'Camera':['camera', 'photo', 'scan', 'album', 'picture', 'gallery', 'photo library', 'storage', 'image', 'video'],
- 'Microphone':['microphone', 'voice, mic', 'speech', 'talk'],
- 'Financial':['credit card', 'company', 'companies', 'organization', 'organizations', 'pay', 'payment'],
- 'IP':['IP', 'Internet Protocol', 'IP address', 'internet protocol address'],
- 'Cookies':['cookies', 'cookie']}
-
- sentence_list = cleanHtml(txt)
- for sen in sentence_list:
- sentence_list[sentence_list.index(sen)] = pre_process_type(sen)
-
- # print("all sentences:\n")
- # for sen in sentence_list:
- # print(sen)
- # print("\n")
-
- classified_sen = {'Name':[],
- 'Birthday':[],
- 'Address':[],
- 'Phone':[],
- 'Email':[],
- 'Contacts':[],
- 'Location':[],
- 'Camera':[],
- 'Microphone':[],
- 'Financial':[],
- 'IP':[],
- 'Cookies':[]}
- # simList = []
- # for a in information_type:
- # word.append(0)
- # for b in information_type:
- # simList.append(0)
- for sentence in sentence_list:
- # print("yes sentence: "+sentence+"\n")
- for type in information_type:
- for w in information_type[type]:
- if w in sentence:
- if w == "geo" or w == "IP" or w == "DOB":
- # check whether w is a part of an unrelated word
- if sentence[sentence.index(w) - 1] == " " and sentence not in classified_sen[type]:
- classified_sen[type].append(sentence)
- else:
- # check duplication
- if sentence not in classified_sen[type]:
- classified_sen[type].append(sentence)
-
- return classified_sen
-
-def getSentences_with_classifier(txt):
-
- information_type = {'Name':['name', 'first name', 'last name', 'full name', 'real name', 'surname', 'family name', 'given name'],
- 'Birthday':['birthday', 'date of birth', 'birth date', 'DOB', 'dob full birthday', 'birth year'],
- 'Address':['mailing address', 'physical address', 'postal address', 'billing address', 'shipping address', 'delivery address', 'residence', 'collect address', 'personal address', 'residential address'],
- 'Phone':['phone', 'phone number', 'mobile', 'mobile phone', 'mobile number', 'telephone', 'telephone number', 'call'],
- 'Email':['email', 'e-mail', 'email address', 'e-mail address'],
- 'Contacts':['contacts', 'phone-book', 'phone book', 'phonebook', 'contact list', 'phone contacts', 'address book'],
- 'Location':['location', 'locate', 'geography', 'geo', 'geo-location', 'precision location', 'nearby'],
- 'Photos':['camera', 'photo', 'scan', 'album', 'picture', 'gallery', 'photo library', 'storage', 'image', 'video', 'scanner', 'photograph'],
- 'Voices':['microphone', 'voice', 'mic', 'speech', 'talk'],
- 'Financial info':['credit card', 'pay', 'payment', 'debit card', 'mastercard', 'wallet'],
- 'IP':['IP', 'Internet Protocol', 'IP address', 'internet protocol address'],
- 'Cookies':['cookies', 'cookie'],
- 'Social media':['facebook', 'twitter', 'socialmedia', 'social media'],
- 'Profile':['profile', 'account'],
- 'Gender':['gender']}
-
- sentence_list = cleanHtml(txt)
-
- classified_sen = {'Name': "",
- 'Birthday': "",
- 'Address': "",
- 'Phone': "",
- 'Email': "",
- 'Contacts': "",
- 'Location': "",
- 'Photos': "",
- 'Voices': "",
- 'Financial info': "",
- 'IP': "",
- 'Cookies': "",
- 'Social media': "",
- 'Profile': "",
- 'Gender': ""
- }
-
- keyword_index = {'Name':[],
- 'Birthday':[],
- 'Address':[],
- 'Phone':[],
- 'Email':[],
- 'Contacts':[],
- 'Location':[],
- 'Photos':[],
- 'Voices':[],
- 'Financial info':[],
- 'IP':[],
- 'Cookies':[],
- 'Social media': [],
- 'Profile': [],
- 'Gender': []
- }
-
- # simList = []
- # for a in information_type:
- # word.append(0)
- # for b in information_type:
- # simList.append(0)
- for sentence in sentence_list:
- # print("yes sentence: "+sentence+"\n")
-
- sentence = sentence.lower()
-
- info_found = False
-
- for type in information_type:
- for w in information_type[type]:
-
- if w.lower() in sentence:
- # if (check_ngram(w) == 1 and w.lower() in sentence.split()) or (check_ngram(w) > 1 and w.lower() in sentence):
- if w == "geo" or w == "IP" or w == "DOB" or w == "mic":
- if sentence[sentence.index(w.lower()) - 1] != " ":
- continue
- if sentence not in classified_sen[type]:
-
- if re.match(r'[a-zA-Z0-9]', sentence[-1]):
- sentence = sentence + '.'
-
- # start_index = len(classified_sen[type]) + sentence.index(w.lower())
- # end_index = start_index + len(w.lower()) - 1
- # keyword_index[type].append([start_index, end_index])
- # classified_sen[type] = classified_sen[type] + sentence
-
- pattern = re.compile(re.escape(w.lower()))
- for match in pattern.finditer(sentence):
- start_index = len(classified_sen[type]) + match.start()
- end_index = start_index + len(w) - 1
- keyword_index[type].append([start_index, end_index])
- # if sentence[0].isalpha():
- # sentence = sentence[0].upper() + sentence[1:]
- classified_sen[type] = classified_sen[type] + sentence + '\n'
- # sen_dict[type].append(sentence)
-
- info_found = True
-
- if not info_found and clf_type.predict(tf.transform([sentence])) == "1":
- nlp = spacy.load('en_core_web_sm')
- doc = nlp(sentence)
- chunk_list = []
- for chunk in doc.noun_chunks:
- chunk_str = str(chunk)
- if chunk_str[0] == " ":
- chunk_str = chunk_str[1:]
- chunk_list.append(chunk_str)
-
- for type in information_type:
- found_this_type = False
-
- for w in information_type[type]:
- for chunk in chunk_list:
- if w == chunk or wordnetSim_modified(chunk, w) > 0.8:
-
- if sentence not in classified_sen[type]:
- # classified_sen[type].append(sentence)
-
- if re.match(r'[a-zA-Z0-9]', sentence[-1]):
- sentence = sentence + '.'
-
- # start_index = len(classified_sen[type]) + sentence.index(chunk)
- # end_index = start_index + len(chunk) - 1
- # keyword_index[type].append([start_index, end_index])
- # classified_sen[type] = classified_sen[type] + sentence
-
- pattern = re.compile(re.escape(chunk))
- for match in pattern.finditer(sentence):
- start_index = len(classified_sen[type]) + match.start()
- end_index = start_index + len(chunk) - 1
- keyword_index[type].append([start_index, end_index])
- # if sentence[0].isalpha():
- # sentence = sentence[0].upper() + sentence[1:]
- classified_sen[type] = classified_sen[type] + sentence + '\n'
- # sen_dict[type].append(sentence)
-
- found_this_type = True
-
- if found_this_type:
- break
-
- return classified_sen, keyword_index
-
-
-
-
-
-
-
-
diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/structures/bounding_box.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/structures/bounding_box.py
deleted file mode 100644
index 5a1ecf746c1c6183d83d0613f0a13686ecb2a04b..0000000000000000000000000000000000000000
--- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/structures/bounding_box.py
+++ /dev/null
@@ -1,271 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
-import torch
-
-# transpose
-FLIP_LEFT_RIGHT = 0
-FLIP_TOP_BOTTOM = 1
-
-
-class BoxList(object):
- """
- This class represents a set of bounding boxes.
- The bounding boxes are represented as a Nx4 Tensor.
- In order to uniquely determine the bounding boxes with respect
- to an image, we also store the corresponding image dimensions.
- They can contain extra information that is specific to each bounding box, such as
- labels.
- """
-
- def __init__(self, bbox, image_size, mode="xyxy"):
- device = bbox.device if isinstance(bbox, torch.Tensor) else torch.device("cpu")
- bbox = torch.as_tensor(bbox, dtype=torch.float32, device=device)
- if bbox.ndimension() != 2:
- raise ValueError(
- "bbox should have 2 dimensions, got {}".format(bbox.ndimension())
- )
- if bbox.size(-1) != 4:
- raise ValueError(
- "last dimension of bbox should have a "
- "size of 4, got {}".format(bbox.size(-1))
- )
- if mode not in ("xyxy", "xywh"):
- raise ValueError("mode should be 'xyxy' or 'xywh'")
-
- self.bbox = bbox
- self.size = image_size # (image_width, image_height)
- self.mode = mode
- self.extra_fields = {}
-
- def add_field(self, field, field_data):
- self.extra_fields[field] = field_data
-
- def get_field(self, field):
- return self.extra_fields[field]
-
- def has_field(self, field):
- return field in self.extra_fields
-
- def fields(self):
- return list(self.extra_fields.keys())
-
- def _copy_extra_fields(self, bbox):
- for k, v in bbox.extra_fields.items():
- self.extra_fields[k] = v
-
- def convert(self, mode):
- if mode not in ("xyxy", "xywh"):
- raise ValueError("mode should be 'xyxy' or 'xywh'")
- if mode == self.mode:
- return self
- # we only have two modes, so don't need to check
- # self.mode
- xmin, ymin, xmax, ymax = self._split_into_xyxy()
- if mode == "xyxy":
- bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
- bbox = BoxList(bbox, self.size, mode=mode)
- else:
- TO_REMOVE = 1
- bbox = torch.cat(
- (xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE), dim=-1
- )
- bbox = BoxList(bbox, self.size, mode=mode)
- bbox._copy_extra_fields(self)
- return bbox
-
- def _split_into_xyxy(self):
- if self.mode == "xyxy":
- xmin, ymin, xmax, ymax = self.bbox.split(1, dim=-1)
- return xmin, ymin, xmax, ymax
- elif self.mode == "xywh":
- TO_REMOVE = 1
- xmin, ymin, w, h = self.bbox.split(1, dim=-1)
- return (
- xmin,
- ymin,
- xmin + (w - TO_REMOVE).clamp(min=0),
- ymin + (h - TO_REMOVE).clamp(min=0),
- )
- else:
- raise RuntimeError("Should not be here")
-
- def resize(self, size, *args, **kwargs):
- """
- Returns a resized copy of this bounding box
-
- :param size: The requested size in pixels, as a 2-tuple:
- (width, height).
- """
-
- ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
- if ratios[0] == ratios[1]:
- ratio = ratios[0]
- scaled_box = self.bbox * ratio
- bbox = BoxList(scaled_box, size, mode=self.mode)
- # bbox._copy_extra_fields(self)
- for k, v in self.extra_fields.items():
- if not isinstance(v, torch.Tensor):
- v = v.resize(size, *args, **kwargs)
- bbox.add_field(k, v)
- return bbox
-
- ratio_width, ratio_height = ratios
- xmin, ymin, xmax, ymax = self._split_into_xyxy()
- scaled_xmin = xmin * ratio_width
- scaled_xmax = xmax * ratio_width
- scaled_ymin = ymin * ratio_height
- scaled_ymax = ymax * ratio_height
- scaled_box = torch.cat(
- (scaled_xmin, scaled_ymin, scaled_xmax, scaled_ymax), dim=-1
- )
- bbox = BoxList(scaled_box, size, mode="xyxy")
- # bbox._copy_extra_fields(self)
- for k, v in self.extra_fields.items():
- if not isinstance(v, torch.Tensor):
- v = v.resize(size, *args, **kwargs)
- bbox.add_field(k, v)
-
- return bbox.convert(self.mode)
-
- def transpose(self, method):
- """
- Transpose bounding box (flip or rotate in 90 degree steps)
- :param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
- :py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
- :py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,
- :py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`.
- """
- if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
- raise NotImplementedError(
- "Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
- )
-
- image_width, image_height = self.size
- xmin, ymin, xmax, ymax = self._split_into_xyxy()
- if method == FLIP_LEFT_RIGHT:
- TO_REMOVE = 1
- transposed_xmin = image_width - xmax - TO_REMOVE
- transposed_xmax = image_width - xmin - TO_REMOVE
- transposed_ymin = ymin
- transposed_ymax = ymax
- elif method == FLIP_TOP_BOTTOM:
- transposed_xmin = xmin
- transposed_xmax = xmax
- transposed_ymin = image_height - ymax
- transposed_ymax = image_height - ymin
-
- transposed_boxes = torch.cat(
- (transposed_xmin, transposed_ymin, transposed_xmax, transposed_ymax), dim=-1
- )
- bbox = BoxList(transposed_boxes, self.size, mode="xyxy")
- # bbox._copy_extra_fields(self)
- for k, v in self.extra_fields.items():
- if not isinstance(v, torch.Tensor):
- v = v.transpose(method)
- bbox.add_field(k, v)
- return bbox.convert(self.mode)
-
- def crop(self, box, remove_empty=False):
- """
- Cropss a rectangular region from this bounding box. The box is a
- 4-tuple defining the left, upper, right, and lower pixel
- coordinate.
- """
- xmin, ymin, xmax, ymax = self._split_into_xyxy()
- w, h = box[2] - box[0], box[3] - box[1]
- cropped_xmin = (xmin - box[0]).clamp(min=0, max=w)
- cropped_ymin = (ymin - box[1]).clamp(min=0, max=h)
- cropped_xmax = (xmax - box[0]).clamp(min=0, max=w)
- cropped_ymax = (ymax - box[1]).clamp(min=0, max=h)
-
- # TODO should I filter empty boxes here?
- if False:
- is_empty = (cropped_xmin == cropped_xmax) | (cropped_ymin == cropped_ymax)
-
- cropped_box = torch.cat(
- (cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1
- )
- bbox = BoxList(cropped_box, (w, h), mode="xyxy")
- # bbox._copy_extra_fields(self)
- for k, v in self.extra_fields.items():
- if not isinstance(v, torch.Tensor):
- v = v.crop(box)
- bbox.add_field(k, v)
-
- if remove_empty:
- box = bbox.bbox
- keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0])
- bbox = bbox[keep]
- return bbox.convert(self.mode)
-
- # Tensor-like methods
-
- def to(self, device):
- bbox = BoxList(self.bbox.to(device), self.size, self.mode)
- for k, v in self.extra_fields.items():
- if hasattr(v, "to"):
- v = v.to(device)
- bbox.add_field(k, v)
- return bbox
-
- def __getitem__(self, item):
- bbox = BoxList(self.bbox[item], self.size, self.mode)
- for k, v in self.extra_fields.items():
- bbox.add_field(k, v[item])
- return bbox
-
- def __len__(self):
- return self.bbox.shape[0]
-
- def clip_to_image(self, remove_empty=True):
- TO_REMOVE = 1
- self.bbox[:, 0].clamp_(min=0, max=self.size[0] - TO_REMOVE)
- self.bbox[:, 1].clamp_(min=0, max=self.size[1] - TO_REMOVE)
- self.bbox[:, 2].clamp_(min=0, max=self.size[0] - TO_REMOVE)
- self.bbox[:, 3].clamp_(min=0, max=self.size[1] - TO_REMOVE)
- if remove_empty:
- box = self.bbox
- keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0])
- return self[keep]
- return self
-
- def area(self):
- box = self.bbox
- if self.mode == "xyxy":
- TO_REMOVE = 1
- area = (box[:, 2] - box[:, 0] + TO_REMOVE) * (box[:, 3] - box[:, 1] + TO_REMOVE)
- elif self.mode == "xywh":
- area = box[:, 2] * box[:, 3]
- else:
- raise RuntimeError("Should not be here")
-
- return area
-
- def copy_with_fields(self, fields, skip_missing=False):
- bbox = BoxList(self.bbox, self.size, self.mode)
- if not isinstance(fields, (list, tuple)):
- fields = [fields]
- for field in fields:
- if self.has_field(field):
- bbox.add_field(field, self.get_field(field))
- elif not skip_missing:
- raise KeyError("Field '{}' not found in {}".format(field, self))
- return bbox
-
- def __repr__(self):
- s = self.__class__.__name__ + "("
- s += "num_boxes={}, ".format(len(self))
- s += "image_width={}, ".format(self.size[0])
- s += "image_height={}, ".format(self.size[1])
- s += "mode={})".format(self.mode)
- return s
-
-
-if __name__ == "__main__":
- bbox = BoxList([[0, 0, 10, 10], [0, 0, 5, 5]], (10, 10))
- s_bbox = bbox.resize((5, 5))
- print(s_bbox)
- print(s_bbox.bbox)
-
- t_bbox = bbox.transpose(0)
- print(t_bbox)
- print(t_bbox.bbox)
diff --git a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/models/Qformer.py b/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/models/Qformer.py
deleted file mode 100644
index 4902165ec6574d89f04cbeb2141b018278324ca6..0000000000000000000000000000000000000000
--- a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/models/Qformer.py
+++ /dev/null
@@ -1,1217 +0,0 @@
-"""
-Adapted from salesforce@LAVIS. Below is the original copyright:
- * Copyright (c) 2023, salesforce.com, inc.
- * All rights reserved.
- * SPDX-License-Identifier: BSD-3-Clause
- * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
- * By Junnan Li
- * Based on huggingface code base
- * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert
-"""
-
-import math
-import os
-import warnings
-from dataclasses import dataclass
-from typing import Optional, Tuple, Dict, Any
-
-import torch
-from torch import Tensor, device, dtype, nn
-import torch.utils.checkpoint
-from torch import nn
-from torch.nn import CrossEntropyLoss
-import torch.nn.functional as F
-
-from transformers.activations import ACT2FN
-from transformers.file_utils import (
- ModelOutput,
-)
-from transformers.modeling_outputs import (
- BaseModelOutputWithPastAndCrossAttentions,
- BaseModelOutputWithPoolingAndCrossAttentions,
- CausalLMOutputWithCrossAttentions,
- MaskedLMOutput,
- MultipleChoiceModelOutput,
- NextSentencePredictorOutput,
- QuestionAnsweringModelOutput,
- SequenceClassifierOutput,
- TokenClassifierOutput,
-)
-from transformers.modeling_utils import (
- PreTrainedModel,
- apply_chunking_to_forward,
- find_pruneable_heads_and_indices,
- prune_linear_layer,
-)
-from transformers.utils import logging
-from transformers.models.bert.configuration_bert import BertConfig
-
-logger = logging.get_logger(__name__)
-
-
-class BertEmbeddings(nn.Module):
- """Construct the embeddings from word and position embeddings."""
-
- def __init__(self, config):
- super().__init__()
- self.word_embeddings = nn.Embedding(
- config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id
- )
- self.position_embeddings = nn.Embedding(
- config.max_position_embeddings, config.hidden_size
- )
-
- # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
- # any TensorFlow checkpoint file
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
-
- # position_ids (1, len position emb) is contiguous in memory and exported when serialized
- self.register_buffer(
- "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))
- )
- self.position_embedding_type = getattr(
- config, "position_embedding_type", "absolute"
- )
-
- self.config = config
-
- def forward(
- self,
- input_ids=None,
- position_ids=None,
- query_embeds=None,
- past_key_values_length=0,
- ):
- if input_ids is not None:
- seq_length = input_ids.size()[1]
- else:
- seq_length = 0
-
- if position_ids is None:
- position_ids = self.position_ids[
- :, past_key_values_length : seq_length + past_key_values_length
- ].clone()
-
- if input_ids is not None:
- embeddings = self.word_embeddings(input_ids)
- if self.position_embedding_type == "absolute":
- position_embeddings = self.position_embeddings(position_ids)
- embeddings = embeddings + position_embeddings
-
- if query_embeds is not None:
- embeddings = torch.cat((query_embeds, embeddings), dim=1)
- else:
- embeddings = query_embeds
-
- embeddings = self.LayerNorm(embeddings)
- embeddings = self.dropout(embeddings)
- return embeddings
-
-
-class BertSelfAttention(nn.Module):
- def __init__(self, config, is_cross_attention):
- super().__init__()
- self.config = config
- if config.hidden_size % config.num_attention_heads != 0 and not hasattr(
- config, "embedding_size"
- ):
- raise ValueError(
- "The hidden size (%d) is not a multiple of the number of attention "
- "heads (%d)" % (config.hidden_size, config.num_attention_heads)
- )
-
- self.num_attention_heads = config.num_attention_heads
- self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
- self.all_head_size = self.num_attention_heads * self.attention_head_size
-
- self.query = nn.Linear(config.hidden_size, self.all_head_size)
- if is_cross_attention:
- self.key = nn.Linear(config.encoder_width, self.all_head_size)
- self.value = nn.Linear(config.encoder_width, self.all_head_size)
- else:
- self.key = nn.Linear(config.hidden_size, self.all_head_size)
- self.value = nn.Linear(config.hidden_size, self.all_head_size)
-
- self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
- self.position_embedding_type = getattr(
- config, "position_embedding_type", "absolute"
- )
- if (
- self.position_embedding_type == "relative_key"
- or self.position_embedding_type == "relative_key_query"
- ):
- self.max_position_embeddings = config.max_position_embeddings
- self.distance_embedding = nn.Embedding(
- 2 * config.max_position_embeddings - 1, self.attention_head_size
- )
- self.save_attention = False
-
- def save_attn_gradients(self, attn_gradients):
- self.attn_gradients = attn_gradients
-
- def get_attn_gradients(self):
- return self.attn_gradients
-
- def save_attention_map(self, attention_map):
- self.attention_map = attention_map
-
- def get_attention_map(self):
- return self.attention_map
-
- def transpose_for_scores(self, x):
- new_x_shape = x.size()[:-1] + (
- self.num_attention_heads,
- self.attention_head_size,
- )
- x = x.view(*new_x_shape)
- return x.permute(0, 2, 1, 3)
-
- def forward(
- self,
- hidden_states,
- attention_mask=None,
- head_mask=None,
- encoder_hidden_states=None,
- encoder_attention_mask=None,
- past_key_value=None,
- output_attentions=False,
- ):
-
- # If this is instantiated as a cross-attention module, the keys
- # and values come from an encoder; the attention mask needs to be
- # such that the encoder's padding tokens are not attended to.
- is_cross_attention = encoder_hidden_states is not None
-
- if is_cross_attention:
- key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
- value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
- attention_mask = encoder_attention_mask
- elif past_key_value is not None:
- key_layer = self.transpose_for_scores(self.key(hidden_states))
- value_layer = self.transpose_for_scores(self.value(hidden_states))
- key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
- value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
- else:
- key_layer = self.transpose_for_scores(self.key(hidden_states))
- value_layer = self.transpose_for_scores(self.value(hidden_states))
-
- mixed_query_layer = self.query(hidden_states)
-
- query_layer = self.transpose_for_scores(mixed_query_layer)
-
- past_key_value = (key_layer, value_layer)
-
- # Take the dot product between "query" and "key" to get the raw attention scores.
- attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
-
- if (
- self.position_embedding_type == "relative_key"
- or self.position_embedding_type == "relative_key_query"
- ):
- seq_length = hidden_states.size()[1]
- position_ids_l = torch.arange(
- seq_length, dtype=torch.long, device=hidden_states.device
- ).view(-1, 1)
- position_ids_r = torch.arange(
- seq_length, dtype=torch.long, device=hidden_states.device
- ).view(1, -1)
- distance = position_ids_l - position_ids_r
- positional_embedding = self.distance_embedding(
- distance + self.max_position_embeddings - 1
- )
- positional_embedding = positional_embedding.to(
- dtype=query_layer.dtype
- ) # fp16 compatibility
-
- if self.position_embedding_type == "relative_key":
- relative_position_scores = torch.einsum(
- "bhld,lrd->bhlr", query_layer, positional_embedding
- )
- attention_scores = attention_scores + relative_position_scores
- elif self.position_embedding_type == "relative_key_query":
- relative_position_scores_query = torch.einsum(
- "bhld,lrd->bhlr", query_layer, positional_embedding
- )
- relative_position_scores_key = torch.einsum(
- "bhrd,lrd->bhlr", key_layer, positional_embedding
- )
- attention_scores = (
- attention_scores
- + relative_position_scores_query
- + relative_position_scores_key
- )
-
- attention_scores = attention_scores / math.sqrt(self.attention_head_size)
- if attention_mask is not None:
- # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
- attention_scores = attention_scores + attention_mask
-
- # Normalize the attention scores to probabilities.
- attention_probs = nn.Softmax(dim=-1)(attention_scores)
-
- if is_cross_attention and self.save_attention:
- self.save_attention_map(attention_probs)
- attention_probs.register_hook(self.save_attn_gradients)
-
- # This is actually dropping out entire tokens to attend to, which might
- # seem a bit unusual, but is taken from the original Transformer paper.
- attention_probs_dropped = self.dropout(attention_probs)
-
- # Mask heads if we want to
- if head_mask is not None:
- attention_probs_dropped = attention_probs_dropped * head_mask
-
- context_layer = torch.matmul(attention_probs_dropped, value_layer)
-
- context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
- new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
- context_layer = context_layer.view(*new_context_layer_shape)
-
- outputs = (
- (context_layer, attention_probs) if output_attentions else (context_layer,)
- )
-
- outputs = outputs + (past_key_value,)
- return outputs
-
-
-class BertSelfOutput(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.dense = nn.Linear(config.hidden_size, config.hidden_size)
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
-
- def forward(self, hidden_states, input_tensor):
- hidden_states = self.dense(hidden_states)
- hidden_states = self.dropout(hidden_states)
- hidden_states = self.LayerNorm(hidden_states + input_tensor)
- return hidden_states
-
-
-class BertAttention(nn.Module):
- def __init__(self, config, is_cross_attention=False):
- super().__init__()
- self.self = BertSelfAttention(config, is_cross_attention)
- self.output = BertSelfOutput(config)
- self.pruned_heads = set()
-
- def prune_heads(self, heads):
- if len(heads) == 0:
- return
- heads, index = find_pruneable_heads_and_indices(
- heads,
- self.self.num_attention_heads,
- self.self.attention_head_size,
- self.pruned_heads,
- )
-
- # Prune linear layers
- self.self.query = prune_linear_layer(self.self.query, index)
- self.self.key = prune_linear_layer(self.self.key, index)
- self.self.value = prune_linear_layer(self.self.value, index)
- self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
-
- # Update hyper params and store pruned heads
- self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
- self.self.all_head_size = (
- self.self.attention_head_size * self.self.num_attention_heads
- )
- self.pruned_heads = self.pruned_heads.union(heads)
-
- def forward(
- self,
- hidden_states,
- attention_mask=None,
- head_mask=None,
- encoder_hidden_states=None,
- encoder_attention_mask=None,
- past_key_value=None,
- output_attentions=False,
- ):
- self_outputs = self.self(
- hidden_states,
- attention_mask,
- head_mask,
- encoder_hidden_states,
- encoder_attention_mask,
- past_key_value,
- output_attentions,
- )
- attention_output = self.output(self_outputs[0], hidden_states)
-
- outputs = (attention_output,) + self_outputs[
- 1:
- ] # add attentions if we output them
- return outputs
-
-
-class BertIntermediate(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
- if isinstance(config.hidden_act, str):
- self.intermediate_act_fn = ACT2FN[config.hidden_act]
- else:
- self.intermediate_act_fn = config.hidden_act
-
- def forward(self, hidden_states):
- hidden_states = self.dense(hidden_states)
- hidden_states = self.intermediate_act_fn(hidden_states)
- return hidden_states
-
-
-class BertOutput(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
- self.dropout = nn.Dropout(config.hidden_dropout_prob)
-
- def forward(self, hidden_states, input_tensor):
- hidden_states = self.dense(hidden_states)
- hidden_states = self.dropout(hidden_states)
- hidden_states = self.LayerNorm(hidden_states + input_tensor)
- return hidden_states
-
-
-class BertLayer(nn.Module):
- def __init__(self, config, layer_num):
- super().__init__()
- self.config = config
- self.chunk_size_feed_forward = config.chunk_size_feed_forward
- self.seq_len_dim = 1
- self.attention = BertAttention(config)
- self.layer_num = layer_num
- if (
- self.config.add_cross_attention
- and layer_num % self.config.cross_attention_freq == 0
- ):
- self.crossattention = BertAttention(
- config, is_cross_attention=self.config.add_cross_attention
- )
- self.has_cross_attention = True
- else:
- self.has_cross_attention = False
- self.intermediate = BertIntermediate(config)
- self.output = BertOutput(config)
-
- self.intermediate_query = BertIntermediate(config)
- self.output_query = BertOutput(config)
-
- def forward(
- self,
- hidden_states,
- attention_mask=None,
- head_mask=None,
- encoder_hidden_states=None,
- encoder_attention_mask=None,
- past_key_value=None,
- output_attentions=False,
- query_length=0,
- ):
- # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
- self_attn_past_key_value = (
- past_key_value[:2] if past_key_value is not None else None
- )
- self_attention_outputs = self.attention(
- hidden_states,
- attention_mask,
- head_mask,
- output_attentions=output_attentions,
- past_key_value=self_attn_past_key_value,
- )
- attention_output = self_attention_outputs[0]
- outputs = self_attention_outputs[1:-1]
-
- present_key_value = self_attention_outputs[-1]
-
- if query_length > 0:
- query_attention_output = attention_output[:, :query_length, :]
-
- if self.has_cross_attention:
- assert (
- encoder_hidden_states is not None
- ), "encoder_hidden_states must be given for cross-attention layers"
- cross_attention_outputs = self.crossattention(
- query_attention_output,
- attention_mask,
- head_mask,
- encoder_hidden_states,
- encoder_attention_mask,
- output_attentions=output_attentions,
- )
- query_attention_output = cross_attention_outputs[0]
- outputs = (
- outputs + cross_attention_outputs[1:-1]
- ) # add cross attentions if we output attention weights
-
- layer_output = apply_chunking_to_forward(
- self.feed_forward_chunk_query,
- self.chunk_size_feed_forward,
- self.seq_len_dim,
- query_attention_output,
- )
- if attention_output.shape[1] > query_length:
- layer_output_text = apply_chunking_to_forward(
- self.feed_forward_chunk,
- self.chunk_size_feed_forward,
- self.seq_len_dim,
- attention_output[:, query_length:, :],
- )
- layer_output = torch.cat([layer_output, layer_output_text], dim=1)
- else:
- layer_output = apply_chunking_to_forward(
- self.feed_forward_chunk,
- self.chunk_size_feed_forward,
- self.seq_len_dim,
- attention_output,
- )
- outputs = (layer_output,) + outputs
-
- outputs = outputs + (present_key_value,)
-
- return outputs
-
- def feed_forward_chunk(self, attention_output):
- intermediate_output = self.intermediate(attention_output)
- layer_output = self.output(intermediate_output, attention_output)
- return layer_output
-
- def feed_forward_chunk_query(self, attention_output):
- intermediate_output = self.intermediate_query(attention_output)
- layer_output = self.output_query(intermediate_output, attention_output)
- return layer_output
-
-
-class BertEncoder(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.config = config
- self.layer = nn.ModuleList(
- [BertLayer(config, i) for i in range(config.num_hidden_layers)]
- )
-
- def forward(
- self,
- hidden_states,
- attention_mask=None,
- head_mask=None,
- encoder_hidden_states=None,
- encoder_attention_mask=None,
- past_key_values=None,
- use_cache=None,
- output_attentions=False,
- output_hidden_states=False,
- return_dict=True,
- query_length=0,
- ):
- all_hidden_states = () if output_hidden_states else None
- all_self_attentions = () if output_attentions else None
- all_cross_attentions = (
- () if output_attentions and self.config.add_cross_attention else None
- )
-
- next_decoder_cache = () if use_cache else None
-
- for i in range(self.config.num_hidden_layers):
- layer_module = self.layer[i]
- if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
-
- layer_head_mask = head_mask[i] if head_mask is not None else None
- past_key_value = past_key_values[i] if past_key_values is not None else None
-
- if getattr(self.config, "gradient_checkpointing", False) and self.training:
-
- if use_cache:
- logger.warn(
- "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
- )
- use_cache = False
-
- def create_custom_forward(module):
- def custom_forward(*inputs):
- return module(
- *inputs, past_key_value, output_attentions, query_length
- )
-
- return custom_forward
-
- layer_outputs = torch.utils.checkpoint.checkpoint(
- create_custom_forward(layer_module),
- hidden_states,
- attention_mask,
- layer_head_mask,
- encoder_hidden_states,
- encoder_attention_mask,
- )
- else:
- layer_outputs = layer_module(
- hidden_states,
- attention_mask,
- layer_head_mask,
- encoder_hidden_states,
- encoder_attention_mask,
- past_key_value,
- output_attentions,
- query_length,
- )
-
- hidden_states = layer_outputs[0]
- if use_cache:
- next_decoder_cache += (layer_outputs[-1],)
- if output_attentions:
- all_self_attentions = all_self_attentions + (layer_outputs[1],)
- all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
-
- if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
-
- if not return_dict:
- return tuple(
- v
- for v in [
- hidden_states,
- next_decoder_cache,
- all_hidden_states,
- all_self_attentions,
- all_cross_attentions,
- ]
- if v is not None
- )
- return BaseModelOutputWithPastAndCrossAttentions(
- last_hidden_state=hidden_states,
- past_key_values=next_decoder_cache,
- hidden_states=all_hidden_states,
- attentions=all_self_attentions,
- cross_attentions=all_cross_attentions,
- )
-
-
-class BertPooler(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.dense = nn.Linear(config.hidden_size, config.hidden_size)
- self.activation = nn.Tanh()
-
- def forward(self, hidden_states):
- # We "pool" the model by simply taking the hidden state corresponding
- # to the first token.
- first_token_tensor = hidden_states[:, 0]
- pooled_output = self.dense(first_token_tensor)
- pooled_output = self.activation(pooled_output)
- return pooled_output
-
-
-class BertPredictionHeadTransform(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.dense = nn.Linear(config.hidden_size, config.hidden_size)
- if isinstance(config.hidden_act, str):
- self.transform_act_fn = ACT2FN[config.hidden_act]
- else:
- self.transform_act_fn = config.hidden_act
- self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
-
- def forward(self, hidden_states):
- hidden_states = self.dense(hidden_states)
- hidden_states = self.transform_act_fn(hidden_states)
- hidden_states = self.LayerNorm(hidden_states)
- return hidden_states
-
-
-class BertLMPredictionHead(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.transform = BertPredictionHeadTransform(config)
-
- # The output weights are the same as the input embeddings, but there is
- # an output-only bias for each token.
- self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
-
- self.bias = nn.Parameter(torch.zeros(config.vocab_size))
-
- # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
- self.decoder.bias = self.bias
-
- def forward(self, hidden_states):
- hidden_states = self.transform(hidden_states)
- hidden_states = self.decoder(hidden_states)
- return hidden_states
-
-
-class BertOnlyMLMHead(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.predictions = BertLMPredictionHead(config)
-
- def forward(self, sequence_output):
- prediction_scores = self.predictions(sequence_output)
- return prediction_scores
-
-
-class BertPreTrainedModel(PreTrainedModel):
- """
- An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
- models.
- """
-
- config_class = BertConfig
- base_model_prefix = "bert"
- _keys_to_ignore_on_load_missing = [r"position_ids"]
-
- def _init_weights(self, module):
- """Initialize the weights"""
- if isinstance(module, (nn.Linear, nn.Embedding)):
- # Slightly different from the TF version which uses truncated_normal for initialization
- # cf https://github.com/pytorch/pytorch/pull/5617
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
- elif isinstance(module, nn.LayerNorm):
- module.bias.data.zero_()
- module.weight.data.fill_(1.0)
- if isinstance(module, nn.Linear) and module.bias is not None:
- module.bias.data.zero_()
-
-
-class BertModel(BertPreTrainedModel):
- """
- The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
- cross-attention is added between the self-attention layers, following the architecture described in `Attention is
- all you need `__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
- Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
- argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
- input to the forward pass.
- """
-
- def __init__(self, config, add_pooling_layer=False):
- super().__init__(config)
- self.config = config
-
- self.embeddings = BertEmbeddings(config)
-
- self.encoder = BertEncoder(config)
-
- self.pooler = BertPooler(config) if add_pooling_layer else None
-
- self.init_weights()
-
- def get_input_embeddings(self):
- return self.embeddings.word_embeddings
-
- def set_input_embeddings(self, value):
- self.embeddings.word_embeddings = value
-
- def _prune_heads(self, heads_to_prune):
- """
- Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
- class PreTrainedModel
- """
- for layer, heads in heads_to_prune.items():
- self.encoder.layer[layer].attention.prune_heads(heads)
-
- def get_extended_attention_mask(
- self,
- attention_mask: Tensor,
- input_shape: Tuple[int],
- device: device,
- is_decoder: bool,
- has_query: bool = False,
- ) -> Tensor:
- """
- Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
-
- Arguments:
- attention_mask (:obj:`torch.Tensor`):
- Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
- input_shape (:obj:`Tuple[int]`):
- The shape of the input to the model.
- device: (:obj:`torch.device`):
- The device of the input to the model.
-
- Returns:
- :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
- """
- # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
- # ourselves in which case we just need to make it broadcastable to all heads.
- if attention_mask.dim() == 3:
- extended_attention_mask = attention_mask[:, None, :, :]
- elif attention_mask.dim() == 2:
- # Provided a padding mask of dimensions [batch_size, seq_length]
- # - if the model is a decoder, apply a causal mask in addition to the padding mask
- # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
- if is_decoder:
- batch_size, seq_length = input_shape
-
- seq_ids = torch.arange(seq_length, device=device)
- causal_mask = (
- seq_ids[None, None, :].repeat(batch_size, seq_length, 1)
- <= seq_ids[None, :, None]
- )
-
- # add a prefix ones mask to the causal mask
- # causal and attention masks must have same type with pytorch version < 1.3
- causal_mask = causal_mask.to(attention_mask.dtype)
-
- if causal_mask.shape[1] < attention_mask.shape[1]:
- prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
- if has_query: # UniLM style attention mask
- causal_mask = torch.cat(
- [
- torch.zeros(
- (batch_size, prefix_seq_len, seq_length),
- device=device,
- dtype=causal_mask.dtype,
- ),
- causal_mask,
- ],
- axis=1,
- )
- causal_mask = torch.cat(
- [
- torch.ones(
- (batch_size, causal_mask.shape[1], prefix_seq_len),
- device=device,
- dtype=causal_mask.dtype,
- ),
- causal_mask,
- ],
- axis=-1,
- )
- extended_attention_mask = (
- causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
- )
- else:
- extended_attention_mask = attention_mask[:, None, None, :]
- else:
- raise ValueError(
- "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
- input_shape, attention_mask.shape
- )
- )
-
- # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
- # masked positions, this operation will create a tensor which is 0.0 for
- # positions we want to attend and -10000.0 for masked positions.
- # Since we are adding it to the raw scores before the softmax, this is
- # effectively the same as removing these entirely.
- extended_attention_mask = extended_attention_mask.to(
- dtype=self.dtype
- ) # fp16 compatibility
- extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
- return extended_attention_mask
-
- def forward(
- self,
- input_ids=None,
- attention_mask=None,
- position_ids=None,
- head_mask=None,
- query_embeds=None,
- encoder_hidden_states=None,
- encoder_attention_mask=None,
- past_key_values=None,
- use_cache=None,
- output_attentions=None,
- output_hidden_states=None,
- return_dict=None,
- is_decoder=False,
- ):
- r"""
- encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
- Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
- the model is configured as a decoder.
- encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
- Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
- the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
- past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
- Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
- If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
- (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
- instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
- use_cache (:obj:`bool`, `optional`):
- If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
- decoding (see :obj:`past_key_values`).
- """
- output_attentions = (
- output_attentions
- if output_attentions is not None
- else self.config.output_attentions
- )
- output_hidden_states = (
- output_hidden_states
- if output_hidden_states is not None
- else self.config.output_hidden_states
- )
- return_dict = (
- return_dict if return_dict is not None else self.config.use_return_dict
- )
-
- # use_cache = use_cache if use_cache is not None else self.config.use_cache
-
- if input_ids is None:
- assert (
- query_embeds is not None
- ), "You have to specify query_embeds when input_ids is None"
-
- # past_key_values_length
- past_key_values_length = (
- past_key_values[0][0].shape[2] - self.config.query_length
- if past_key_values is not None
- else 0
- )
-
- query_length = query_embeds.shape[1] if query_embeds is not None else 0
-
- embedding_output = self.embeddings(
- input_ids=input_ids,
- position_ids=position_ids,
- query_embeds=query_embeds,
- past_key_values_length=past_key_values_length,
- )
-
- input_shape = embedding_output.size()[:-1]
- batch_size, seq_length = input_shape
- device = embedding_output.device
-
- if attention_mask is None:
- attention_mask = torch.ones(
- ((batch_size, seq_length + past_key_values_length)), device=device
- )
-
- # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
- # ourselves in which case we just need to make it broadcastable to all heads.
- if is_decoder:
- extended_attention_mask = self.get_extended_attention_mask(
- attention_mask,
- input_ids.shape,
- device,
- is_decoder,
- has_query=(query_embeds is not None),
- )
- else:
- extended_attention_mask = self.get_extended_attention_mask(
- attention_mask, input_shape, device, is_decoder
- )
-
- # If a 2D or 3D attention mask is provided for the cross-attention
- # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
- if encoder_hidden_states is not None:
- if type(encoder_hidden_states) == list:
- encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[
- 0
- ].size()
- else:
- (
- encoder_batch_size,
- encoder_sequence_length,
- _,
- ) = encoder_hidden_states.size()
- encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
-
- if type(encoder_attention_mask) == list:
- encoder_extended_attention_mask = [
- self.invert_attention_mask(mask) for mask in encoder_attention_mask
- ]
- elif encoder_attention_mask is None:
- encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
- encoder_extended_attention_mask = self.invert_attention_mask(
- encoder_attention_mask
- )
- else:
- encoder_extended_attention_mask = self.invert_attention_mask(
- encoder_attention_mask
- )
- else:
- encoder_extended_attention_mask = None
-
- # Prepare head mask if needed
- # 1.0 in head_mask indicate we keep the head
- # attention_probs has shape bsz x n_heads x N x N
- # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
- # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
- head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
-
- encoder_outputs = self.encoder(
- embedding_output,
- attention_mask=extended_attention_mask,
- head_mask=head_mask,
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_extended_attention_mask,
- past_key_values=past_key_values,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- query_length=query_length,
- )
- sequence_output = encoder_outputs[0]
- pooled_output = (
- self.pooler(sequence_output) if self.pooler is not None else None
- )
-
- if not return_dict:
- return (sequence_output, pooled_output) + encoder_outputs[1:]
-
- return BaseModelOutputWithPoolingAndCrossAttentions(
- last_hidden_state=sequence_output,
- pooler_output=pooled_output,
- past_key_values=encoder_outputs.past_key_values,
- hidden_states=encoder_outputs.hidden_states,
- attentions=encoder_outputs.attentions,
- cross_attentions=encoder_outputs.cross_attentions,
- )
-
-
-class BertLMHeadModel(BertPreTrainedModel):
-
- _keys_to_ignore_on_load_unexpected = [r"pooler"]
- _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
-
- def __init__(self, config):
- super().__init__(config)
-
- self.bert = BertModel(config, add_pooling_layer=False)
- self.cls = BertOnlyMLMHead(config)
-
- self.init_weights()
-
- def get_output_embeddings(self):
- return self.cls.predictions.decoder
-
- def set_output_embeddings(self, new_embeddings):
- self.cls.predictions.decoder = new_embeddings
-
- def forward(
- self,
- input_ids=None,
- attention_mask=None,
- position_ids=None,
- head_mask=None,
- query_embeds=None,
- encoder_hidden_states=None,
- encoder_attention_mask=None,
- labels=None,
- past_key_values=None,
- use_cache=True,
- output_attentions=None,
- output_hidden_states=None,
- return_dict=None,
- return_logits=False,
- is_decoder=True,
- reduction="mean",
- ):
- r"""
- encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
- Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
- the model is configured as a decoder.
- encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
- Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
- the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
- labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
- Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
- ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
- ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
- past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
- Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
- If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
- (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
- instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
- use_cache (:obj:`bool`, `optional`):
- If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
- decoding (see :obj:`past_key_values`).
- Returns:
- Example::
- >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
- >>> import torch
- >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
- >>> config = BertConfig.from_pretrained("bert-base-cased")
- >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
- >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
- >>> outputs = model(**inputs)
- >>> prediction_logits = outputs.logits
- """
- return_dict = (
- return_dict if return_dict is not None else self.config.use_return_dict
- )
- if labels is not None:
- use_cache = False
- if past_key_values is not None:
- query_embeds = None
-
- outputs = self.bert(
- input_ids,
- attention_mask=attention_mask,
- position_ids=position_ids,
- head_mask=head_mask,
- query_embeds=query_embeds,
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_attention_mask,
- past_key_values=past_key_values,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- is_decoder=is_decoder,
- )
-
- sequence_output = outputs[0]
- if query_embeds is not None:
- sequence_output = outputs[0][:, query_embeds.shape[1] :, :]
-
- prediction_scores = self.cls(sequence_output)
-
- if return_logits:
- return prediction_scores[:, :-1, :].contiguous()
-
- lm_loss = None
- if labels is not None:
- # we are doing next-token prediction; shift prediction scores and input ids by one
- shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
- labels = labels[:, 1:].contiguous()
- loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1)
- lm_loss = loss_fct(
- shifted_prediction_scores.view(-1, self.config.vocab_size),
- labels.view(-1),
- )
- if reduction == "none":
- lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1)
-
- if not return_dict:
- output = (prediction_scores,) + outputs[2:]
- return ((lm_loss,) + output) if lm_loss is not None else output
-
- return CausalLMOutputWithCrossAttentions(
- loss=lm_loss,
- logits=prediction_scores,
- past_key_values=outputs.past_key_values,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- cross_attentions=outputs.cross_attentions,
- )
-
- def prepare_inputs_for_generation(
- self, input_ids, query_embeds, past=None, attention_mask=None, **model_kwargs
- ):
- # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
- if attention_mask is None:
- attention_mask = input_ids.new_ones(input_ids.shape)
- query_mask = input_ids.new_ones(query_embeds.shape[:-1])
- attention_mask = torch.cat([query_mask, attention_mask], dim=-1)
-
- # cut decoder_input_ids if past is used
- if past is not None:
- input_ids = input_ids[:, -1:]
-
- return {
- "input_ids": input_ids,
- "query_embeds": query_embeds,
- "attention_mask": attention_mask,
- "past_key_values": past,
- "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
- "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
- "is_decoder": True,
- }
-
- def _reorder_cache(self, past, beam_idx):
- reordered_past = ()
- for layer_past in past:
- reordered_past += (
- tuple(
- past_state.index_select(0, beam_idx) for past_state in layer_past
- ),
- )
- return reordered_past
-
-
-class BertForMaskedLM(BertPreTrainedModel):
-
- _keys_to_ignore_on_load_unexpected = [r"pooler"]
- _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
-
- def __init__(self, config):
- super().__init__(config)
-
- self.bert = BertModel(config, add_pooling_layer=False)
- self.cls = BertOnlyMLMHead(config)
-
- self.init_weights()
-
- def get_output_embeddings(self):
- return self.cls.predictions.decoder
-
- def set_output_embeddings(self, new_embeddings):
- self.cls.predictions.decoder = new_embeddings
-
- def forward(
- self,
- input_ids=None,
- attention_mask=None,
- position_ids=None,
- head_mask=None,
- query_embeds=None,
- encoder_hidden_states=None,
- encoder_attention_mask=None,
- labels=None,
- output_attentions=None,
- output_hidden_states=None,
- return_dict=None,
- return_logits=False,
- is_decoder=False,
- ):
- r"""
- labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
- Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
- config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
- (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
- """
-
- return_dict = (
- return_dict if return_dict is not None else self.config.use_return_dict
- )
-
- outputs = self.bert(
- input_ids,
- attention_mask=attention_mask,
- position_ids=position_ids,
- head_mask=head_mask,
- query_embeds=query_embeds,
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_attention_mask,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- is_decoder=is_decoder,
- )
-
- if query_embeds is not None:
- sequence_output = outputs[0][:, query_embeds.shape[1] :, :]
- prediction_scores = self.cls(sequence_output)
-
- if return_logits:
- return prediction_scores
-
- masked_lm_loss = None
- if labels is not None:
- loss_fct = CrossEntropyLoss() # -100 index = padding token
- masked_lm_loss = loss_fct(
- prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)
- )
-
- if not return_dict:
- output = (prediction_scores,) + outputs[2:]
- return (
- ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
- )
-
- return MaskedLMOutput(
- loss=masked_lm_loss,
- logits=prediction_scores,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-ebfc06be.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-ebfc06be.js
deleted file mode 100644
index 0c116207d9592b3bf43567c65f44f9f9641b6100..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-ebfc06be.js
+++ /dev/null
@@ -1,4 +0,0 @@
-const VERSION_RE = new RegExp("3.37.0/", "g");function import_fix(mod, base) {const url = new URL(mod, base); return import(`https://gradio.s3-us-west-2.amazonaws.com/3.37.0/${url.pathname?.startsWith('/') ? url.pathname.substring(1).replace(VERSION_RE, "") : url.pathname.replace(VERSION_RE, "")}`);}import{S as Ae,e as ye,s as Se,J as Pe,K as h,p as E,M as L,n as $,A as V,N as H,O as J,U as S,Q as B,X as re,af as Ve,a1 as ge,P as Y,R as Z,G as Re,m as pe,V as dl,z as P,u as ue,v as R,y as oe,B as Be,ag as Nl,k as j,o as K,x as Q,ah as Ol,h as He,ai as zl,_ as Ie,F as C,T as Te,aj as ml,j as cl,t as hl,a9 as Il,ab as Dl,ac as Cl,ad as jl,ak as z,E as Kl,ae as Ql,q as Yl,r as ql}from"./index-1d65707a.js";import"./Blocks-c9e1499d.js";import{U as Xl}from"./UploadText-f599be03.js";import{a as bl,B as Gl}from"./Button-f155035a.js";import{U as Jl}from"./Upload-9bb55fba.js";import{M as Zl}from"./ModifyUpload-c89cfce3.js";import{B as gl}from"./BlockLabel-66866176.js";import{E as Wl}from"./Empty-eec13822.js";import{S as xl,u as $l}from"./ShareButton-8cd3d8f6.js";import{n as en}from"./ModifyUpload.svelte_svelte_type_style_lang-d2acacf0.js";import"./IconButton-d42f3661.js";function ln(l){let e,i,n,a;return{c(){e=Pe("svg"),i=Pe("path"),n=Pe("circle"),a=Pe("circle"),h(i,"d","M9 18V5l12-2v13"),h(n,"cx","6"),h(n,"cy","18"),h(n,"r","3"),h(a,"cx","18"),h(a,"cy","16"),h(a,"r","3"),h(e,"xmlns","http://www.w3.org/2000/svg"),h(e,"width","100%"),h(e,"height","100%"),h(e,"viewBox","0 0 24 24"),h(e,"fill","none"),h(e,"stroke","currentColor"),h(e,"stroke-width","1.5"),h(e,"stroke-linecap","round"),h(e,"stroke-linejoin","round"),h(e,"class","feather feather-music")},m(t,s){E(t,e,s),L(e,i),L(e,n),L(e,a)},p:$,i:$,o:$,d(t){t&&V(e)}}}class Ne extends Ae{constructor(e){super(),ye(this,e,null,ln,Se,{})}}function De(l,e,i){const n=l.slice();return n[27]=e[i],n[29]=i,n}function Ce(l){let e,i,n,a,t=(l[6]==="label"||l[7]==="label")&&je(l);return{c(){e=H("span"),t&&t.c(),h(e,"class","pip first"),h(e,"style",i=l[14]+": 0%;"),S(e,"selected",l[17](l[0])),S(e,"in-range",l[16](l[0]))},m(s,r){E(s,e,r),t&&t.m(e,null),n||(a=[B(e,"click",function(){re(l[20](l[0]))&&l[20](l[0]).apply(this,arguments)}),B(e,"touchend",Ve(function(){re(l[20](l[0]))&&l[20](l[0]).apply(this,arguments)}))],n=!0)},p(s,r){l=s,l[6]==="label"||l[7]==="label"?t?t.p(l,r):(t=je(l),t.c(),t.m(e,null)):t&&(t.d(1),t=null),r&16384&&i!==(i=l[14]+": 0%;")&&h(e,"style",i),r&131073&&S(e,"selected",l[17](l[0])),r&65537&&S(e,"in-range",l[16](l[0]))},d(s){s&&V(e),t&&t.d(),n=!1,ge(a)}}}function je(l){let e,i=l[12](l[0],0,0)+"",n,a=l[10]&&Ke(l),t=l[11]&&Qe(l);return{c(){e=H("span"),a&&a.c(),n=Y(i),t&&t.c(),h(e,"class","pipVal")},m(s,r){E(s,e,r),a&&a.m(e,null),L(e,n),t&&t.m(e,null)},p(s,r){s[10]?a?a.p(s,r):(a=Ke(s),a.c(),a.m(e,n)):a&&(a.d(1),a=null),r&4097&&i!==(i=s[12](s[0],0,0)+"")&&Z(n,i),s[11]?t?t.p(s,r):(t=Qe(s),t.c(),t.m(e,null)):t&&(t.d(1),t=null)},d(s){s&&V(e),a&&a.d(),t&&t.d()}}}function Ke(l){let e,i;return{c(){e=H("span"),i=Y(l[10]),h(e,"class","pipVal-prefix")},m(n,a){E(n,e,a),L(e,i)},p(n,a){a&1024&&Z(i,n[10])},d(n){n&&V(e)}}}function Qe(l){let e,i;return{c(){e=H("span"),i=Y(l[11]),h(e,"class","pipVal-suffix")},m(n,a){E(n,e,a),L(e,i)},p(n,a){a&2048&&Z(i,n[11])},d(n){n&&V(e)}}}function Ye(l){let e,i=Re(Array(l[19]+1)),n=[];for(let a=0;ap}=e,{focus:U=void 0}=e,{orientationStart:X=void 0}=e,{percentOf:ie=void 0}=e,{moveHandle:le=void 0}=e;function fe(p){le(void 0,p)}return l.$$set=p=>{"range"in p&&i(21,f=p.range),"min"in p&&i(0,g=p.min),"max"in p&&i(1,d=p.max),"step"in p&&i(22,c=p.step),"values"in p&&i(23,o=p.values),"vertical"in p&&i(2,_=p.vertical),"reversed"in p&&i(3,m=p.reversed),"hoverable"in p&&i(4,A=p.hoverable),"disabled"in p&&i(5,y=p.disabled),"pipstep"in p&&i(24,w=p.pipstep),"all"in p&&i(6,I=p.all),"first"in p&&i(7,q=p.first),"last"in p&&i(8,O=p.last),"rest"in p&&i(9,D=p.rest),"prefix"in p&&i(10,F=p.prefix),"suffix"in p&&i(11,W=p.suffix),"formatter"in p&&i(12,ee=p.formatter),"focus"in p&&i(13,U=p.focus),"orientationStart"in p&&i(14,X=p.orientationStart),"percentOf"in p&&i(15,ie=p.percentOf),"moveHandle"in p&&i(25,le=p.moveHandle)},l.$$.update=()=>{l.$$.dirty&20971527&&i(26,n=w||((d-g)/c>=(_?50:100)?(d-g)/(_?10:20):1)),l.$$.dirty&71303171&&i(19,a=parseInt((d-g)/(c*n),10)),l.$$.dirty&71303169&&i(18,t=function(p){return g+p*c*n}),l.$$.dirty&8388608&&i(17,s=function(p){return o.some(ae=>ae===p)}),l.$$.dirty&10485760&&i(16,r=function(p){if(f==="min")return o[0]>p;if(f==="max")return o[0]
p})},[g,d,_,m,A,y,I,q,O,D,F,W,ee,U,X,ie,r,s,t,a,fe,f,c,o,w,le,n]}class tn extends Ae{constructor(e){super(),ye(this,e,an,nn,Se,{range:21,min:0,max:1,step:22,values:23,vertical:2,reversed:3,hoverable:4,disabled:5,pipstep:24,all:6,first:7,last:8,rest:9,prefix:10,suffix:11,formatter:12,focus:13,orientationStart:14,percentOf:15,moveHandle:25})}}function ll(l,e,i){const n=l.slice();return n[63]=e[i],n[65]=i,n}function nl(l){let e,i=l[21](l[63],l[65],l[23](l[63]))+"",n,a=l[18]&&il(l),t=l[19]&&al(l);return{c(){e=H("span"),a&&a.c(),n=Y(i),t&&t.c(),h(e,"class","rangeFloat")},m(s,r){E(s,e,r),a&&a.m(e,null),L(e,n),t&&t.m(e,null)},p(s,r){s[18]?a?a.p(s,r):(a=il(s),a.c(),a.m(e,n)):a&&(a.d(1),a=null),r[0]&10485761&&i!==(i=s[21](s[63],s[65],s[23](s[63]))+"")&&Z(n,i),s[19]?t?t.p(s,r):(t=al(s),t.c(),t.m(e,null)):t&&(t.d(1),t=null)},d(s){s&&V(e),a&&a.d(),t&&t.d()}}}function il(l){let e,i;return{c(){e=H("span"),i=Y(l[18]),h(e,"class","rangeFloat-prefix")},m(n,a){E(n,e,a),L(e,i)},p(n,a){a[0]&262144&&Z(i,n[18])},d(n){n&&V(e)}}}function al(l){let e,i;return{c(){e=H("span"),i=Y(l[19]),h(e,"class","rangeFloat-suffix")},m(n,a){E(n,e,a),L(e,i)},p(n,a){a[0]&524288&&Z(i,n[19])},d(n){n&&V(e)}}}function tl(l){let e,i,n,a,t,s,r,f,g,d,c,o,_=l[7]&&nl(l);return{c(){e=H("span"),i=H("span"),n=J(),_&&_.c(),h(i,"class","rangeNub"),h(e,"role","slider"),h(e,"class","rangeHandle"),h(e,"data-handle",l[65]),h(e,"style",a=l[28]+": "+l[29][l[65]]+"%; z-index: "+(l[26]===l[65]?3:2)+";"),h(e,"aria-valuemin",t=l[2]===!0&&l[65]===1?l[0][0]:l[3]),h(e,"aria-valuemax",s=l[2]===!0&&l[65]===0?l[0][1]:l[4]),h(e,"aria-valuenow",r=l[63]),h(e,"aria-valuetext",f=""+(l[18]+l[21](l[63],l[65],l[23](l[63]))+l[19])),h(e,"aria-orientation",g=l[6]?"vertical":"horizontal"),h(e,"aria-disabled",l[10]),h(e,"disabled",l[10]),h(e,"tabindex",d=l[10]?-1:0),S(e,"active",l[24]&&l[26]===l[65]),S(e,"press",l[25]&&l[26]===l[65])},m(m,A){E(m,e,A),L(e,i),L(e,n),_&&_.m(e,null),c||(o=[B(e,"blur",l[33]),B(e,"focus",l[34]),B(e,"keydown",l[35])],c=!0)},p(m,A){m[7]?_?_.p(m,A):(_=nl(m),_.c(),_.m(e,null)):_&&(_.d(1),_=null),A[0]&872415232&&a!==(a=m[28]+": "+m[29][m[65]]+"%; z-index: "+(m[26]===m[65]?3:2)+";")&&h(e,"style",a),A[0]&13&&t!==(t=m[2]===!0&&m[65]===1?m[0][0]:m[3])&&h(e,"aria-valuemin",t),A[0]&21&&s!==(s=m[2]===!0&&m[65]===0?m[0][1]:m[4])&&h(e,"aria-valuemax",s),A[0]&1&&r!==(r=m[63])&&h(e,"aria-valuenow",r),A[0]&11272193&&f!==(f=""+(m[18]+m[21](m[63],m[65],m[23](m[63]))+m[19]))&&h(e,"aria-valuetext",f),A[0]&64&&g!==(g=m[6]?"vertical":"horizontal")&&h(e,"aria-orientation",g),A[0]&1024&&h(e,"aria-disabled",m[10]),A[0]&1024&&h(e,"disabled",m[10]),A[0]&1024&&d!==(d=m[10]?-1:0)&&h(e,"tabindex",d),A[0]&83886080&&S(e,"active",m[24]&&m[26]===m[65]),A[0]&100663296&&S(e,"press",m[25]&&m[26]===m[65])},d(m){m&&V(e),_&&_.d(),c=!1,ge(o)}}}function sl(l){let e,i;return{c(){e=H("span"),h(e,"class","rangeBar"),h(e,"style",i=l[28]+": "+l[31](l[29])+"%; "+l[27]+": "+l[32](l[29])+"%;")},m(n,a){E(n,e,a)},p(n,a){a[0]&939524096&&i!==(i=n[28]+": "+n[31](n[29])+"%; "+n[27]+": "+n[32](n[29])+"%;")&&h(e,"style",i)},d(n){n&&V(e)}}}function fl(l){let e,i;return e=new tn({props:{values:l[0],min:l[3],max:l[4],step:l[5],range:l[2],vertical:l[6],reversed:l[8],orientationStart:l[28],hoverable:l[9],disabled:l[10],all:l[13],first:l[14],last:l[15],rest:l[16],pipstep:l[12],prefix:l[18],suffix:l[19],formatter:l[20],focus:l[24],percentOf:l[23],moveHandle:l[30]}}),{c(){j(e.$$.fragment)},m(n,a){K(e,n,a),i=!0},p(n,a){const t={};a[0]&1&&(t.values=n[0]),a[0]&8&&(t.min=n[3]),a[0]&16&&(t.max=n[4]),a[0]&32&&(t.step=n[5]),a[0]&4&&(t.range=n[2]),a[0]&64&&(t.vertical=n[6]),a[0]&256&&(t.reversed=n[8]),a[0]&268435456&&(t.orientationStart=n[28]),a[0]&512&&(t.hoverable=n[9]),a[0]&1024&&(t.disabled=n[10]),a[0]&8192&&(t.all=n[13]),a[0]&16384&&(t.first=n[14]),a[0]&32768&&(t.last=n[15]),a[0]&65536&&(t.rest=n[16]),a[0]&4096&&(t.pipstep=n[12]),a[0]&262144&&(t.prefix=n[18]),a[0]&524288&&(t.suffix=n[19]),a[0]&1048576&&(t.formatter=n[20]),a[0]&16777216&&(t.focus=n[24]),a[0]&8388608&&(t.percentOf=n[23]),e.$set(t)},i(n){i||(P(e.$$.fragment,n),i=!0)},o(n){R(e.$$.fragment,n),i=!1},d(n){Q(e,n)}}}function sn(l){let e,i,n,a,t,s,r=Re(l[0]),f=[];for(let c=0;c{d=null}),oe()),(!a||o[0]&131072)&&h(e,"id",c[17]),(!a||o[0]&4)&&S(e,"range",c[2]),(!a||o[0]&1024)&&S(e,"disabled",c[10]),(!a||o[0]&512)&&S(e,"hoverable",c[9]),(!a||o[0]&64)&&S(e,"vertical",c[6]),(!a||o[0]&256)&&S(e,"reversed",c[8]),(!a||o[0]&16777216)&&S(e,"focus",c[24]),(!a||o[0]&4)&&S(e,"min",c[2]==="min"),(!a||o[0]&4)&&S(e,"max",c[2]==="max"),(!a||o[0]&2048)&&S(e,"pips",c[11]),(!a||o[0]&122880)&&S(e,"pip-labels",c[13]==="label"||c[14]==="label"||c[15]==="label"||c[16]==="label")},i(c){a||(P(d),a=!0)},o(c){R(d),a=!1},d(c){c&&V(e),dl(f,c),g&&g.d(),d&&d.d(),l[49](null),t=!1,ge(s)}}}function rl(l){if(!l)return-1;for(var e=0;l=l.previousElementSibling;)e++;return e}function Ue(l){return l.type.includes("touch")?l.touches[0]:l}function fn(l,e,i){let n,a,t,s,r,f,g=$,d=()=>(g(),g=Ol(be,u=>i(29,f=u)),be);l.$$.on_destroy.push(()=>g());let{slider:c}=e,{range:o=!1}=e,{pushy:_=!1}=e,{min:m=0}=e,{max:A=100}=e,{step:y=1}=e,{values:w=[(A+m)/2]}=e,{vertical:I=!1}=e,{float:q=!1}=e,{reversed:O=!1}=e,{hoverable:D=!0}=e,{disabled:F=!1}=e,{pips:W=!1}=e,{pipstep:ee=void 0}=e,{all:U=void 0}=e,{first:X=void 0}=e,{last:ie=void 0}=e,{rest:le=void 0}=e,{id:fe=void 0}=e,{prefix:p=""}=e,{suffix:ae=""}=e,{formatter:_e=(u,v,M)=>u}=e,{handleFormatter:we=_e}=e,{precision:G=2}=e,{springValues:de={stiffness:.15,damping:.4}}=e;const me=Be();let ce=0,x=!1,ne=!1,b=!1,k=!1,N=w.length-1,te,he,be;function Me(u){const v=c.querySelectorAll(".handle"),M=Array.prototype.includes.call(v,u),T=Array.prototype.some.call(v,se=>se.contains(u));return M||T}function Ee(u){return o==="min"||o==="max"?u.slice(0,1):o?u.slice(0,2):u}function ke(){return c.getBoundingClientRect()}function Fe(u){const v=ke();let M=0,T=0,se=0;I?(M=u.clientY-v.top,T=M/v.height*100,T=O?T:100-T):(M=u.clientX-v.left,T=M/v.width*100,T=O?100-T:T),se=(A-m)/100*T+m;let ze;return o===!0&&w[0]===w[1]?se>w[1]?1:0:(ze=w.indexOf([...w].sort((Ll,Ul)=>Math.abs(se-Ll)-Math.abs(se-Ul))[0]),ze)}function Le(u){const v=ke();let M=0,T=0,se=0;I?(M=u.clientY-v.top,T=M/v.height*100,T=O?T:100-T):(M=u.clientX-v.left,T=M/v.width*100,T=O?100-T:T),se=(A-m)/100*T+m,ve(N,se)}function ve(u,v){return v=t(v),typeof u>"u"&&(u=N),o&&(u===0&&v>w[1]?_?i(0,w[1]=v,w):v=w[1]:u===1&&vt(u))})}function Oe(){!F&&me("stop",{activeHandle:N,startValue:te,value:w[N],values:w.map(u=>t(u))})}function Ml(){!F&&me("change",{activeHandle:N,startValue:te,previousValue:typeof he>"u"?te:he,value:w[N],values:w.map(u=>t(u))})}function Fl(u){He[u?"unshift":"push"](()=>{c=u,i(1,c)})}return l.$$set=u=>{"slider"in u&&i(1,c=u.slider),"range"in u&&i(2,o=u.range),"pushy"in u&&i(43,_=u.pushy),"min"in u&&i(3,m=u.min),"max"in u&&i(4,A=u.max),"step"in u&&i(5,y=u.step),"values"in u&&i(0,w=u.values),"vertical"in u&&i(6,I=u.vertical),"float"in u&&i(7,q=u.float),"reversed"in u&&i(8,O=u.reversed),"hoverable"in u&&i(9,D=u.hoverable),"disabled"in u&&i(10,F=u.disabled),"pips"in u&&i(11,W=u.pips),"pipstep"in u&&i(12,ee=u.pipstep),"all"in u&&i(13,U=u.all),"first"in u&&i(14,X=u.first),"last"in u&&i(15,ie=u.last),"rest"in u&&i(16,le=u.rest),"id"in u&&i(17,fe=u.id),"prefix"in u&&i(18,p=u.prefix),"suffix"in u&&i(19,ae=u.suffix),"formatter"in u&&i(20,_e=u.formatter),"handleFormatter"in u&&i(21,we=u.handleFormatter),"precision"in u&&i(44,G=u.precision),"springValues"in u&&i(45,de=u.springValues)},l.$$.update=()=>{l.$$.dirty[0]&24&&i(48,a=function(u){return u<=m?m:u>=A?A:u}),l.$$.dirty[0]&56|l.$$.dirty[1]&139264&&i(47,t=function(u){if(u<=m)return m;if(u>=A)return A;let v=(u-m)%y,M=u-v;return Math.abs(v)*2>=y&&(M+=v>0?y:-y),M=a(M),parseFloat(M.toFixed(G))}),l.$$.dirty[0]&24|l.$$.dirty[1]&8192&&i(23,n=function(u){let v=(u-m)/(A-m)*100;return isNaN(v)||v<=0?0:v>=100?100:parseFloat(v.toFixed(G))}),l.$$.dirty[0]&12582937|l.$$.dirty[1]&114688&&(Array.isArray(w)||(i(0,w=[(A+m)/2]),console.error("'values' prop should be an Array (https://github.com/simeydotme/svelte-range-slider-pips#slider-props)")),i(0,w=Ee(w.map(u=>t(u)))),ce!==w.length?d(i(22,be=Nl(w.map(u=>n(u)),de))):be.set(w.map(u=>n(u))),i(46,ce=w.length)),l.$$.dirty[0]&320&&i(28,s=I?O?"top":"bottom":O?"right":"left"),l.$$.dirty[0]&320&&i(27,r=I?O?"bottom":"top":O?"left":"right")},[w,c,o,m,A,y,I,q,O,D,F,W,ee,U,X,ie,le,fe,p,ae,_e,we,be,n,x,b,N,r,s,f,ve,wl,kl,vl,Al,yl,Sl,El,Vl,Pl,Rl,Tl,Bl,_,G,de,ce,t,a,Fl]}class rn extends Ae{constructor(e){super(),ye(this,e,fn,sn,Se,{slider:1,range:2,pushy:43,min:3,max:4,step:5,values:0,vertical:6,float:7,reversed:8,hoverable:9,disabled:10,pips:11,pipstep:12,all:13,first:14,last:15,rest:16,id:17,prefix:18,suffix:19,formatter:20,handleFormatter:21,precision:44,springValues:45},null,[-1,-1,-1])}}function pl(l,{crop_values:e,autoplay:i}={}){function n(){if(e===void 0)return;const t=e[0]/100*l.duration,s=e[1]/100*l.duration;l.currentTimes&&(l.currentTime=t,l.pause())}async function a(){i&&(l.pause(),await l.play())}return l.addEventListener("loadeddata",a),l.addEventListener("timeupdate",n),{destroy(){l.removeEventListener("loadeddata",a),l.removeEventListener("timeupdate",n)}}}function un(l){let e,i,n,a,t,s,r,f,g,d,c;e=new Zl({props:{editable:!0,absolute:!0}}),e.$on("clear",l[13]),e.$on("edit",l[26]);let o=l[8]==="edit"&&l[9]?.duration&&ul(l);return{c(){j(e.$$.fragment),i=J(),n=H("audio"),r=J(),o&&o.c(),f=pe(),n.controls=!0,h(n,"preload","metadata"),Te(n.src,a=l[1]?.data)||h(n,"src",a),h(n,"data-testid",t=`${l[2]}-audio`),h(n,"class","svelte-1thnwz")},m(_,m){K(e,_,m),E(_,i,m),E(_,n,m),l[27](n),E(_,r,m),o&&o.m(_,m),E(_,f,m),g=!0,d||(c=[ml(s=pl.call(null,n,{autoplay:l[6],crop_values:l[10]})),B(n,"play",l[23]),B(n,"pause",l[24]),B(n,"ended",l[16])],d=!0)},p(_,m){(!g||m[0]&2&&!Te(n.src,a=_[1]?.data))&&h(n,"src",a),(!g||m[0]&4&&t!==(t=`${_[2]}-audio`))&&h(n,"data-testid",t),s&&re(s.update)&&m[0]&1088&&s.update.call(null,{autoplay:_[6],crop_values:_[10]}),_[8]==="edit"&&_[9]?.duration?o?(o.p(_,m),m[0]&768&&P(o,1)):(o=ul(_),o.c(),P(o,1),o.m(f.parentNode,f)):o&&(ue(),R(o,1,1,()=>{o=null}),oe())},i(_){g||(P(e.$$.fragment,_),P(o),g=!0)},o(_){R(e.$$.fragment,_),R(o),g=!1},d(_){_&&(V(i),V(n),V(r),V(f)),Q(e,_),l[27](null),o&&o.d(_),d=!1,ge(c)}}}function on(l){let e,i,n,a;const t=[dn,_n],s=[];function r(f,g){return f[4]==="microphone"?0:f[4]==="upload"?1:-1}return~(e=r(l))&&(i=s[e]=t[e](l)),{c(){i&&i.c(),n=pe()},m(f,g){~e&&s[e].m(f,g),E(f,n,g),a=!0},p(f,g){let d=e;e=r(f),e===d?~e&&s[e].p(f,g):(i&&(ue(),R(s[d],1,1,()=>{s[d]=null}),oe()),~e?(i=s[e],i?i.p(f,g):(i=s[e]=t[e](f),i.c()),P(i,1),i.m(n.parentNode,n)):i=null)},i(f){a||(P(i),a=!0)},o(f){R(i),a=!1},d(f){f&&V(n),~e&&s[e].d(f)}}}function ul(l){let e,i,n;function a(s){l[28](s)}let t={range:!0,min:0,max:100,step:1};return l[10]!==void 0&&(t.values=l[10]),e=new rn({props:t}),He.push(()=>cl(e,"values",a)),e.$on("change",l[14]),{c(){j(e.$$.fragment)},m(s,r){K(e,s,r),n=!0},p(s,r){const f={};!i&&r[0]&1024&&(i=!0,f.values=s[10],hl(()=>i=!1)),e.$set(f)},i(s){n||(P(e.$$.fragment,s),n=!0)},o(s){R(e.$$.fragment,s),n=!1},d(s){Q(e,s)}}}function _n(l){let e,i,n;function a(s){l[25](s)}let t={filetype:"audio/aac,audio/midi,audio/mpeg,audio/ogg,audio/wav,audio/x-wav,audio/opus,audio/webm,audio/flac,audio/vnd.rn-realaudio,audio/x-ms-wma,audio/x-aiff,audio/amr,audio/*",$$slots:{default:[mn]},$$scope:{ctx:l}};return l[0]!==void 0&&(t.dragging=l[0]),e=new Jl({props:t}),He.push(()=>cl(e,"dragging",a)),e.$on("load",l[15]),{c(){j(e.$$.fragment)},m(s,r){K(e,s,r),n=!0},p(s,r){const f={};r[0]&536870912&&(f.$$scope={dirty:r,ctx:s}),!i&&r[0]&1&&(i=!0,f.dragging=s[0],hl(()=>i=!1)),e.$set(f)},i(s){n||(P(e.$$.fragment,s),n=!0)},o(s){R(e.$$.fragment,s),n=!1},d(s){Q(e,s)}}}function dn(l){let e,i,n,a;const t=[hn,cn],s=[];function r(f,g){return f[7]?0:1}return i=r(l),n=s[i]=t[i](l),{c(){e=H("div"),n.c(),h(e,"class","mic-wrap svelte-1thnwz")},m(f,g){E(f,e,g),s[i].m(e,null),a=!0},p(f,g){let d=i;i=r(f),i===d?s[i].p(f,g):(ue(),R(s[d],1,1,()=>{s[d]=null}),oe(),n=s[i],n?n.p(f,g):(n=s[i]=t[i](f),n.c()),P(n,1),n.m(e,null))},i(f){a||(P(n),a=!0)},o(f){R(n),a=!1},d(f){f&&V(e),s[i].d()}}}function mn(l){let e;const i=l[22].default,n=Il(i,l,l[29],null);return{c(){n&&n.c()},m(a,t){n&&n.m(a,t),e=!0},p(a,t){n&&n.p&&(!e||t[0]&536870912)&&Dl(n,i,a,a[29],e?jl(i,a[29],t,null):Cl(a[29]),null)},i(a){e||(P(n,a),e=!0)},o(a){R(n,a),e=!1},d(a){n&&n.d(a)}}}function cn(l){let e,i;return e=new bl({props:{size:"sm",$$slots:{default:[bn]},$$scope:{ctx:l}}}),e.$on("click",l[11]),{c(){j(e.$$.fragment)},m(n,a){K(e,n,a),i=!0},p(n,a){const t={};a[0]&536870912&&(t.$$scope={dirty:a,ctx:n}),e.$set(t)},i(n){i||(P(e.$$.fragment,n),i=!0)},o(n){R(e.$$.fragment,n),i=!1},d(n){Q(e,n)}}}function hn(l){let e,i;return e=new bl({props:{size:"sm",$$slots:{default:[gn]},$$scope:{ctx:l}}}),e.$on("click",l[12]),{c(){j(e.$$.fragment)},m(n,a){K(e,n,a),i=!0},p(n,a){const t={};a[0]&536870912&&(t.$$scope={dirty:a,ctx:n}),e.$set(t)},i(n){i||(P(e.$$.fragment,n),i=!0)},o(n){R(e.$$.fragment,n),i=!1},d(n){Q(e,n)}}}function bn(l){let e,i;return{c(){e=H("span"),e.innerHTML='',i=Y(`
- Record from microphone`),h(e,"class","record-icon svelte-1thnwz")},m(n,a){E(n,e,a),E(n,i,a)},p:$,d(n){n&&(V(e),V(i))}}}function gn(l){let e,i;return{c(){e=H("span"),e.innerHTML=' ',i=Y(`
- Stop recording`),h(e,"class","record-icon svelte-1thnwz")},m(n,a){E(n,e,a),E(n,i,a)},p:$,d(n){n&&(V(e),V(i))}}}function pn(l){let e,i,n,a,t,s;e=new gl({props:{show_label:l[3],Icon:Ne,float:l[4]==="upload"&&l[1]===null,label:l[2]||"Audio"}});const r=[on,un],f=[];function g(d,c){return d[1]===null||d[5]?0:1}return n=g(l),a=f[n]=r[n](l),{c(){j(e.$$.fragment),i=J(),a.c(),t=pe()},m(d,c){K(e,d,c),E(d,i,c),f[n].m(d,c),E(d,t,c),s=!0},p(d,c){const o={};c[0]&8&&(o.show_label=d[3]),c[0]&18&&(o.float=d[4]==="upload"&&d[1]===null),c[0]&4&&(o.label=d[2]||"Audio"),e.$set(o);let _=n;n=g(d),n===_?f[n].p(d,c):(ue(),R(f[_],1,1,()=>{f[_]=null}),oe(),a=f[n],a?a.p(d,c):(a=f[n]=r[n](d),a.c()),P(a,1),a.m(t.parentNode,t))},i(d){s||(P(e.$$.fragment,d),P(a),s=!0)},o(d){R(e.$$.fragment,d),R(a),s=!1},d(d){d&&(V(i),V(t)),Q(e,d),f[n].d(d)}}}const wn=500,ol=44;function kn(l){return new Promise((e,i)=>{let n=new FileReader;n.onerror=i,n.onload=()=>e(n.result),n.readAsDataURL(l)})}function vn(l,e,i){let{$$slots:n={},$$scope:a}=e,{value:t=null}=e,{label:s}=e,{show_label:r=!0}=e,{name:f=""}=e,{source:g}=e,{pending:d=!1}=e,{streaming:c=!1}=e,{autoplay:o=!1}=e,_=!1,m,A="",y,w=[],I=!1,q,O=!1,D=[0,100],F=[],W;function ee(){W=[Ie(()=>import("./module-88999aef.js"),["assets/module-88999aef.js","assets/module-a3cf0cc4.js","assets/index-1d65707a.js","assets/index-f2292b12.css"]),Ie(()=>import("./module-a5a0afa0.js"),["assets/module-a5a0afa0.js","assets/module-a3cf0cc4.js"])]}c&&ee();const U=Be(),X=async(k,N)=>{let te=new Blob(k,{type:"audio/wav"});i(1,t={data:await kn(te),name:"audio.wav"}),U(N,t)};async function ie(){let k;try{k=await navigator.mediaDevices.getUserMedia({audio:!0})}catch(N){if(N instanceof DOMException&&N.name=="NotAllowedError"){U("error","Please allow access to the microphone for recording.");return}throw N}if(k!=null){if(c){const[{MediaRecorder:N,register:te},{connect:he}]=await Promise.all(W);await te(await he()),m=new N(k,{mimeType:"audio/wav"});async function be(Me){let Ee=await Me.data.arrayBuffer(),ke=new Uint8Array(Ee);if(y||(i(19,y=new Uint8Array(Ee.slice(0,ol))),ke=new Uint8Array(Ee.slice(ol))),d)w.push(ke);else{let Fe=[y].concat(w,[ke]);X(Fe,"stream"),i(20,w=[])}}m.addEventListener("dataavailable",be)}else m=new MediaRecorder(k),m.addEventListener("dataavailable",N=>{F.push(N.data)}),m.addEventListener("stop",async()=>{i(7,_=!1),await X(F,"change"),await X(F,"stop_recording"),F=[]});O=!0}}async function le(){i(7,_=!0),U("start_recording"),O||await ie(),i(19,y=void 0),c?m.start(wn):m.start()}zl(()=>{m&&m.state!=="inactive"&&m.stop()});function fe(){m.stop(),c&&(i(7,_=!1),d&&i(21,I=!0))}function p(){U("change",null),U("clear"),i(8,A=""),i(1,t=null)}function ae({detail:{values:k}}){t&&(U("change",{data:t.data,name:f,crop_min:k[0],crop_max:k[1]}),U("edit"))}function _e({detail:k}){i(1,t=k),U("change",{data:k.data,name:k.name}),U("upload",k)}function we(){U("stop"),U("end")}let{dragging:G=!1}=e;function de(k){C.call(this,l,k)}function me(k){C.call(this,l,k)}function ce(k){G=k,i(0,G)}const x=()=>i(8,A="edit");function ne(k){He[k?"unshift":"push"](()=>{q=k,i(9,q)})}function b(k){D=k,i(10,D)}return l.$$set=k=>{"value"in k&&i(1,t=k.value),"label"in k&&i(2,s=k.label),"show_label"in k&&i(3,r=k.show_label),"name"in k&&i(17,f=k.name),"source"in k&&i(4,g=k.source),"pending"in k&&i(18,d=k.pending),"streaming"in k&&i(5,c=k.streaming),"autoplay"in k&&i(6,o=k.autoplay),"dragging"in k&&i(0,G=k.dragging),"$$scope"in k&&i(29,a=k.$$scope)},l.$$.update=()=>{if(l.$$.dirty[0]&3932160&&I&&d===!1&&(i(21,I=!1),y&&w)){let k=[y].concat(w);i(20,w=[]),X(k,"stream")}l.$$.dirty[0]&1&&U("drag",G)},[G,t,s,r,g,c,o,_,A,q,D,le,fe,p,ae,_e,we,f,d,y,w,I,n,de,me,ce,x,ne,b,a]}class An extends Ae{constructor(e){super(),ye(this,e,vn,pn,Se,{value:1,label:2,show_label:3,name:17,source:4,pending:18,streaming:5,autoplay:6,dragging:0},null,[-1,-1])}}function _l(l){let e,i,n;return i=new xl({props:{formatter:l[9],value:l[0]}}),i.$on("error",l[10]),i.$on("share",l[11]),{c(){e=H("div"),j(i.$$.fragment),h(e,"class","icon-button svelte-1yfus5a")},m(a,t){E(a,e,t),K(i,e,null),n=!0},p(a,t){const s={};t&1&&(s.value=a[0]),i.$set(s)},i(a){n||(P(i.$$.fragment,a),n=!0)},o(a){R(i.$$.fragment,a),n=!1},d(a){a&&V(e),Q(i)}}}function yn(l){let e,i,n,a,t,s;return{c(){e=H("audio"),e.controls=!0,h(e,"preload","metadata"),Te(e.src,i=l[0]?.data)||h(e,"src",i),h(e,"data-testid",n=`${l[1]}-audio`),h(e,"class","svelte-1yfus5a")},m(r,f){E(r,e,f),t||(s=[ml(a=pl.call(null,e,{autoplay:l[3]})),B(e,"play",l[7]),B(e,"pause",l[8]),B(e,"ended",l[5])],t=!0)},p(r,f){f&1&&!Te(e.src,i=r[0]?.data)&&h(e,"src",i),f&2&&n!==(n=`${r[1]}-audio`)&&h(e,"data-testid",n),a&&re(a.update)&&f&8&&a.update.call(null,{autoplay:r[3]})},i:$,o:$,d(r){r&&V(e),t=!1,ge(s)}}}function Sn(l){let e,i;return e=new Wl({props:{size:"small",$$slots:{default:[En]},$$scope:{ctx:l}}}),{c(){j(e.$$.fragment)},m(n,a){K(e,n,a),i=!0},p(n,a){const t={};a&8192&&(t.$$scope={dirty:a,ctx:n}),e.$set(t)},i(n){i||(P(e.$$.fragment,n),i=!0)},o(n){R(e.$$.fragment,n),i=!1},d(n){Q(e,n)}}}function En(l){let e,i;return e=new Ne({}),{c(){j(e.$$.fragment)},m(n,a){K(e,n,a),i=!0},i(n){i||(P(e.$$.fragment,n),i=!0)},o(n){R(e.$$.fragment,n),i=!1},d(n){Q(e,n)}}}function Vn(l){let e,i,n,a,t,s,r;e=new gl({props:{show_label:l[2],Icon:Ne,float:!1,label:l[1]||"Audio"}});let f=l[4]&&l[0]!==null&&_l(l);const g=[Sn,yn],d=[];function c(o,_){return o[0]===null?0:1}return a=c(l),t=d[a]=g[a](l),{c(){j(e.$$.fragment),i=J(),f&&f.c(),n=J(),t.c(),s=pe()},m(o,_){K(e,o,_),E(o,i,_),f&&f.m(o,_),E(o,n,_),d[a].m(o,_),E(o,s,_),r=!0},p(o,[_]){const m={};_&4&&(m.show_label=o[2]),_&2&&(m.label=o[1]||"Audio"),e.$set(m),o[4]&&o[0]!==null?f?(f.p(o,_),_&17&&P(f,1)):(f=_l(o),f.c(),P(f,1),f.m(n.parentNode,n)):f&&(ue(),R(f,1,1,()=>{f=null}),oe());let A=a;a=c(o),a===A?d[a].p(o,_):(ue(),R(d[A],1,1,()=>{d[A]=null}),oe(),t=d[a],t?t.p(o,_):(t=d[a]=g[a](o),t.c()),P(t,1),t.m(s.parentNode,s))},i(o){r||(P(e.$$.fragment,o),P(f),P(t),r=!0)},o(o){R(e.$$.fragment,o),R(f),R(t),r=!1},d(o){o&&(V(i),V(n),V(s)),Q(e,o),f&&f.d(o),d[a].d(o)}}}function Pn(l,e,i){let{value:n=null}=e,{label:a}=e,{name:t}=e,{show_label:s=!0}=e,{autoplay:r}=e,{show_share_button:f=!1}=e;const g=Be();function d(){g("stop"),g("end")}function c(y){C.call(this,l,y)}function o(y){C.call(this,l,y)}const _=async y=>y?``:"";function m(y){C.call(this,l,y)}function A(y){C.call(this,l,y)}return l.$$set=y=>{"value"in y&&i(0,n=y.value),"label"in y&&i(1,a=y.label),"name"in y&&i(6,t=y.name),"show_label"in y&&i(2,s=y.show_label),"autoplay"in y&&i(3,r=y.autoplay),"show_share_button"in y&&i(4,f=y.show_share_button)},l.$$.update=()=>{l.$$.dirty&65&&n&&g("change",{name:t,data:n?.data})},[n,a,s,r,f,d,t,c,o,_,m,A]}class Rn extends Ae{constructor(e){super(),ye(this,e,Pn,Vn,Se,{value:0,label:1,name:6,show_label:2,autoplay:3,show_share_button:4})}}function Tn(l){let e,i;return e=new Rn({props:{autoplay:l[15],show_label:l[9],show_share_button:l[16],value:l[17],name:l[17]?.name||"audio_file",label:l[8]}}),e.$on("share",l[35]),e.$on("error",l[36]),{c(){j(e.$$.fragment)},m(n,a){K(e,n,a),i=!0},p(n,a){const t={};a[0]&32768&&(t.autoplay=n[15]),a[0]&512&&(t.show_label=n[9]),a[0]&65536&&(t.show_share_button=n[16]),a[0]&131072&&(t.value=n[17]),a[0]&131072&&(t.name=n[17]?.name||"audio_file"),a[0]&256&&(t.label=n[8]),e.$set(t)},i(n){i||(P(e.$$.fragment,n),i=!0)},o(n){R(e.$$.fragment,n),i=!1},d(n){Q(e,n)}}}function Bn(l){let e,i;return e=new An({props:{label:l[8],show_label:l[9],value:l[17],name:l[6],source:l[7],pending:l[10],streaming:l[11],autoplay:l[15],$$slots:{default:[Hn]},$$scope:{ctx:l}}}),e.$on("change",l[23]),e.$on("stream",l[24]),e.$on("drag",l[25]),e.$on("edit",l[26]),e.$on("play",l[27]),e.$on("pause",l[28]),e.$on("stop",l[29]),e.$on("end",l[30]),e.$on("start_recording",l[31]),e.$on("stop_recording",l[32]),e.$on("upload",l[33]),e.$on("error",l[34]),{c(){j(e.$$.fragment)},m(n,a){K(e,n,a),i=!0},p(n,a){const t={};a[0]&256&&(t.label=n[8]),a[0]&512&&(t.show_label=n[9]),a[0]&131072&&(t.value=n[17]),a[0]&64&&(t.name=n[6]),a[0]&128&&(t.source=n[7]),a[0]&1024&&(t.pending=n[10]),a[0]&2048&&(t.streaming=n[11]),a[0]&32768&&(t.autoplay=n[15]),a[1]&64&&(t.$$scope={dirty:a,ctx:n}),e.$set(t)},i(n){i||(P(e.$$.fragment,n),i=!0)},o(n){R(e.$$.fragment,n),i=!1},d(n){Q(e,n)}}}function Hn(l){let e,i;return e=new Xl({props:{type:"audio"}}),{c(){j(e.$$.fragment)},m(n,a){K(e,n,a),i=!0},p:$,i(n){i||(P(e.$$.fragment,n),i=!0)},o(n){R(e.$$.fragment,n),i=!1},d(n){Q(e,n)}}}function Mn(l){let e,i,n,a,t,s;const r=[l[1]];let f={};for(let o=0;o{d[A]=null}),oe(),a=d[n],a?a.p(o,_):(a=d[n]=g[n](o),a.c()),P(a,1),a.m(t.parentNode,t))},i(o){s||(P(e.$$.fragment,o),P(a),s=!0)},o(o){R(e.$$.fragment,o),R(a),s=!1},d(o){o&&(V(i),V(t)),Q(e,o),d[n].d(o)}}}function Fn(l){let e,i;return e=new Gl({props:{variant:l[5]==="dynamic"&&l[0]===null&&l[7]==="upload"?"dashed":"solid",border_mode:l[18]?"focus":"base",padding:!1,elem_id:l[2],elem_classes:l[3],visible:l[4],container:l[12],scale:l[13],min_width:l[14],$$slots:{default:[Mn]},$$scope:{ctx:l}}}),{c(){j(e.$$.fragment)},m(n,a){K(e,n,a),i=!0},p(n,a){const t={};a[0]&161&&(t.variant=n[5]==="dynamic"&&n[0]===null&&n[7]==="upload"?"dashed":"solid"),a[0]&262144&&(t.border_mode=n[18]?"focus":"base"),a[0]&4&&(t.elem_id=n[2]),a[0]&8&&(t.elem_classes=n[3]),a[0]&16&&(t.visible=n[4]),a[0]&4096&&(t.container=n[12]),a[0]&8192&&(t.scale=n[13]),a[0]&16384&&(t.min_width=n[14]),a[0]&495587|a[1]&64&&(t.$$scope={dirty:a,ctx:n}),e.$set(t)},i(n){i||(P(e.$$.fragment,n),i=!0)},o(n){R(e.$$.fragment,n),i=!1},d(n){Q(e,n)}}}function Ln(l,e,i){const n=Be();let{elem_id:a=""}=e,{elem_classes:t=[]}=e,{visible:s=!0}=e,{mode:r}=e,{value:f=null}=e,g=null,{name:d}=e,{source:c}=e,{label:o}=e,{root:_}=e,{show_label:m}=e,{pending:A}=e,{streaming:y}=e,{root_url:w}=e,{container:I=!0}=e,{scale:q=null}=e,{min_width:O=void 0}=e,{loading_status:D}=e,{autoplay:F=!1}=e,{show_share_button:W=!1}=e,ee,U;const X=({detail:b})=>i(0,f=b),ie=({detail:b})=>{i(0,f=b),n("stream",f)},le=({detail:b})=>i(18,U=b);function fe(b){C.call(this,l,b)}function p(b){C.call(this,l,b)}function ae(b){C.call(this,l,b)}function _e(b){C.call(this,l,b)}function we(b){C.call(this,l,b)}function G(b){C.call(this,l,b)}function de(b){C.call(this,l,b)}function me(b){C.call(this,l,b)}const ce=({detail:b})=>{i(1,D=D||{}),i(1,D.status="error",D),n("error",b)};function x(b){C.call(this,l,b)}function ne(b){C.call(this,l,b)}return l.$$set=b=>{"elem_id"in b&&i(2,a=b.elem_id),"elem_classes"in b&&i(3,t=b.elem_classes),"visible"in b&&i(4,s=b.visible),"mode"in b&&i(5,r=b.mode),"value"in b&&i(0,f=b.value),"name"in b&&i(6,d=b.name),"source"in b&&i(7,c=b.source),"label"in b&&i(8,o=b.label),"root"in b&&i(20,_=b.root),"show_label"in b&&i(9,m=b.show_label),"pending"in b&&i(10,A=b.pending),"streaming"in b&&i(11,y=b.streaming),"root_url"in b&&i(21,w=b.root_url),"container"in b&&i(12,I=b.container),"scale"in b&&i(13,q=b.scale),"min_width"in b&&i(14,O=b.min_width),"loading_status"in b&&i(1,D=b.loading_status),"autoplay"in b&&i(15,F=b.autoplay),"show_share_button"in b&&i(16,W=b.show_share_button)},l.$$.update=()=>{l.$$.dirty[0]&3145729&&i(17,ee=en(f,_,w)),l.$$.dirty[0]&4194305&&JSON.stringify(f)!==JSON.stringify(g)&&(i(22,g=f),n("change"))},[f,D,a,t,s,r,d,c,o,m,A,y,I,q,O,F,W,ee,U,n,_,w,g,X,ie,le,fe,p,ae,_e,we,G,de,me,ce,x,ne]}class Un extends Ae{constructor(e){super(),ye(this,e,Ln,Fn,Se,{elem_id:2,elem_classes:3,visible:4,mode:5,value:0,name:6,source:7,label:8,root:20,show_label:9,pending:10,streaming:11,root_url:21,container:12,scale:13,min_width:14,loading_status:1,autoplay:15,show_share_button:16},null,[-1,-1])}get elem_id(){return this.$$.ctx[2]}set elem_id(e){this.$$set({elem_id:e}),z()}get elem_classes(){return this.$$.ctx[3]}set elem_classes(e){this.$$set({elem_classes:e}),z()}get visible(){return this.$$.ctx[4]}set visible(e){this.$$set({visible:e}),z()}get mode(){return this.$$.ctx[5]}set mode(e){this.$$set({mode:e}),z()}get value(){return this.$$.ctx[0]}set value(e){this.$$set({value:e}),z()}get name(){return this.$$.ctx[6]}set name(e){this.$$set({name:e}),z()}get source(){return this.$$.ctx[7]}set source(e){this.$$set({source:e}),z()}get label(){return this.$$.ctx[8]}set label(e){this.$$set({label:e}),z()}get root(){return this.$$.ctx[20]}set root(e){this.$$set({root:e}),z()}get show_label(){return this.$$.ctx[9]}set show_label(e){this.$$set({show_label:e}),z()}get pending(){return this.$$.ctx[10]}set pending(e){this.$$set({pending:e}),z()}get streaming(){return this.$$.ctx[11]}set streaming(e){this.$$set({streaming:e}),z()}get root_url(){return this.$$.ctx[21]}set root_url(e){this.$$set({root_url:e}),z()}get container(){return this.$$.ctx[12]}set container(e){this.$$set({container:e}),z()}get scale(){return this.$$.ctx[13]}set scale(e){this.$$set({scale:e}),z()}get min_width(){return this.$$.ctx[14]}set min_width(e){this.$$set({min_width:e}),z()}get loading_status(){return this.$$.ctx[1]}set loading_status(e){this.$$set({loading_status:e}),z()}get autoplay(){return this.$$.ctx[15]}set autoplay(e){this.$$set({autoplay:e}),z()}get show_share_button(){return this.$$.ctx[16]}set show_share_button(e){this.$$set({show_share_button:e}),z()}}const Xn=Un,Gn=["static","dynamic"],Jn=()=>({type:{input_payload:"{ name: string; data: string }",response_object:"{ name: string; data: string, is_file: boolean }"},description:{input_payload:"audio data as object with filename and base64 string",response_object:"object that includes path to audio file. The URL: {ROOT}file={name} contains the data"},example_data:{name:"audio.wav",data:"data:audio/wav;base64,UklGRiQAAABXQVZFZm10IBAAAAABAAEARKwAAIhYAQACABAAZGF0YQAAAAA="}});export{Xn as Component,Jn as document,Gn as modes};
-//# sourceMappingURL=index-ebfc06be.js.map
diff --git a/spaces/Dao3/MagicPrompt-Stable-Diffusion/README.md b/spaces/Dao3/MagicPrompt-Stable-Diffusion/README.md
deleted file mode 100644
index 0f1fb02ec78569e70c36b61f4503376f19d02cf8..0000000000000000000000000000000000000000
--- a/spaces/Dao3/MagicPrompt-Stable-Diffusion/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: MagicPrompt Stable Diffusion
-emoji: 🍄
-colorFrom: red
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.3.1
-app_file: app.py
-pinned: false
-license: mit
-duplicated_from: phenomenon1981/MagicPrompt-Stable-Diffusion
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Demosthene-OR/avr23-cds-translation/tabs/template_save/second_tab.py b/spaces/Demosthene-OR/avr23-cds-translation/tabs/template_save/second_tab.py
deleted file mode 100644
index b57012eee4b900ef46666223b807259b8a25c465..0000000000000000000000000000000000000000
--- a/spaces/Demosthene-OR/avr23-cds-translation/tabs/template_save/second_tab.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import streamlit as st
-import pandas as pd
-import numpy as np
-from PIL import Image
-
-
-title = "Exploration"
-sidebar_name = "Exploration"
-
-
-def run():
-
- st.title(title)
-
- st.markdown(
- """
- This is your app's second tab. Fill it in `tabs/second_tab.py`.
- You can and probably should rename the file.
-
- ## Test
-
- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse gravida urna vel tincidunt vestibulum. Nunc malesuada molestie odio, vel tincidunt arcu fringilla hendrerit. Sed leo velit, elementum nec ipsum id, sagittis tempus leo. Quisque viverra ipsum arcu, et ullamcorper arcu volutpat maximus. Donec volutpat porttitor mi in tincidunt. Ut sodales commodo magna, eu volutpat lacus sodales in. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Aliquam interdum libero non leo iaculis bibendum. Suspendisse in leo posuere risus viverra suscipit.
-
- Nunc eu tortor dolor. Etiam molestie id enim ut convallis. Pellentesque aliquet malesuada ipsum eget commodo. Ut at eros elit. Quisque non blandit magna. Aliquam porta, turpis ac maximus varius, risus elit sagittis leo, eu interdum lorem leo sit amet sapien. Nam vestibulum cursus magna, a dapibus augue pellentesque sed. Integer tincidunt scelerisque urna non viverra. Sed faucibus leo augue, ac suscipit orci cursus sed. Mauris sit amet consectetur nisi.
- """
- )
-
- chart_data = pd.DataFrame(np.random.randn(20, 3), columns=list("abc"))
-
- st.line_chart(chart_data)
-
- st.markdown(
- """
- ## Test 2
-
- Proin malesuada diam blandit orci auctor, ac auctor lacus porttitor. Aenean id faucibus tortor. Morbi ac odio leo. Proin consequat facilisis magna eu elementum. Proin arcu sapien, venenatis placerat blandit vitae, pharetra ac ipsum. Proin interdum purus non eros condimentum, sit amet luctus quam iaculis. Quisque vitae sapien felis. Vivamus ut tortor accumsan, dictum mi a, semper libero. Morbi sed fermentum ligula, quis varius quam. Suspendisse rutrum, sapien at scelerisque vestibulum, ipsum nibh fermentum odio, vel pellentesque arcu erat at sapien. Maecenas aliquam eget metus ut interdum.
-
- ```python
-
- def my_awesome_function(a, b):
- return a + b
- ```
-
- Sed lacinia suscipit turpis sit amet gravida. Etiam quis purus in magna elementum malesuada. Nullam fermentum, sapien a maximus pharetra, mauris tortor maximus velit, a tempus dolor elit ut lectus. Cras ut nulla eget dolor malesuada congue. Quisque placerat, nulla in pharetra dapibus, nunc ligula semper massa, eu euismod dui risus non metus. Curabitur pretium lorem vel luctus dictum. Maecenas a dui in odio congue interdum. Sed massa est, rutrum eu risus et, pharetra pulvinar lorem.
- """
- )
-
- st.area_chart(chart_data)
-
- st.markdown(
- """
- ## Test 3
-
- You can also display images using [Pillow](https://pillow.readthedocs.io/en/stable/index.html).
-
- ```python
- import streamlit as st
- from PIL import Image
-
- st.image(Image.open("assets/sample-image.jpg"))
-
- ```
-
- """
- )
-
- st.image(Image.open("assets/sample-image.jpg"))
diff --git a/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/PlayInteractively.py b/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/PlayInteractively.py
deleted file mode 100644
index 547b08ab2c4373e23711636488145df148d7eb4e..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/PlayInteractively.py
+++ /dev/null
@@ -1,197 +0,0 @@
-
-
-
-from tkinter import Tk
-from PIL import Image, ImageTk
-from tkinter.filedialog import askopenfilename
-from GUI import View
-from Inference import StyleCLIP
-import argparse
-#%%
-
-
-class PlayInteractively(): #Controller
- '''
- followed Model View Controller Design Pattern
-
- controller, model, view
- '''
- def __init__(self,dataset_name='ffhq'):
-
- self.root = Tk()
- self.view=View(self.root)
- self.img_ratio=2
- self.style_clip=StyleCLIP(dataset_name)
-
- self.view.neutral.bind("", self.text_n)
- self.view.target.bind("", self.text_t)
- self.view.alpha.bind('', self.ChangeAlpha)
- self.view.beta.bind('', self.ChangeBeta)
- self.view.set_init.bind('', self.SetInit)
- self.view.reset.bind('', self.Reset)
- self.view.bg.bind('', self.open_img)
-
-
- self.drawn = None
-
- self.view.target.delete(1.0, "end")
- self.view.target.insert("end", self.style_clip.target)
-#
- self.view.neutral.delete(1.0, "end")
- self.view.neutral.insert("end", self.style_clip.neutral)
-
-
- def Reset(self,event):
- self.style_clip.GetDt2()
- self.style_clip.M.alpha=[0]
-
- self.view.beta.set(self.style_clip.beta)
- self.view.alpha.set(0)
-
- img=self.style_clip.GetImg()
- img=Image.fromarray(img)
- img = ImageTk.PhotoImage(img)
- self.addImage_m(img)
-
-
- def SetInit(self,event):
- codes=self.style_clip.GetCode()
- self.style_clip.M.dlatent_tmp=[tmp[:,0] for tmp in codes]
- print('set init')
-
- def ChangeAlpha(self,event):
- tmp=self.view.alpha.get()
- self.style_clip.M.alpha=[float(tmp)]
-
- img=self.style_clip.GetImg()
- print('manipulate one')
- img=Image.fromarray(img)
- img = ImageTk.PhotoImage(img)
- self.addImage_m(img)
-
- def ChangeBeta(self,event):
- tmp=self.view.beta.get()
- self.style_clip.beta=float(tmp)
-
- img=self.style_clip.GetImg()
- print('manipulate one')
- img=Image.fromarray(img)
- img = ImageTk.PhotoImage(img)
- self.addImage_m(img)
-
- def ChangeDataset(self,event):
-
- dataset_name=self.view.set_category.get()
-
- self.style_clip.LoadData(dataset_name)
-
- self.view.target.delete(1.0, "end")
- self.view.target.insert("end", self.style_clip.target)
-
- self.view.neutral.delete(1.0, "end")
- self.view.neutral.insert("end", self.style_clip.neutral)
-
- def text_t(self,event):
- tmp=self.view.target.get("1.0",'end')
- tmp=tmp.replace('\n','')
-
- self.view.target.delete(1.0, "end")
- self.view.target.insert("end", tmp)
-
- print('target',tmp,'###')
- self.style_clip.target=tmp
- self.style_clip.GetDt2()
- self.view.beta.set(self.style_clip.beta)
- self.view.alpha.set(3)
- self.style_clip.M.alpha=[3]
-
- img=self.style_clip.GetImg()
- print('manipulate one')
- img=Image.fromarray(img)
- img = ImageTk.PhotoImage(img)
- self.addImage_m(img)
-
-
- def text_n(self,event):
- tmp=self.view.neutral.get("1.0",'end')
- tmp=tmp.replace('\n','')
-
- self.view.neutral.delete(1.0, "end")
- self.view.neutral.insert("end", tmp)
-
- print('neutral',tmp,'###')
- self.style_clip.neutral=tmp
- self.view.target.delete(1.0, "end")
- self.view.target.insert("end", tmp)
-
-
- def run(self):
- self.root.mainloop()
-
- def addImage(self,img):
- self.view.bg.create_image(self.view.width/2, self.view.height/2, image=img, anchor='center')
- self.image=img #save a copy of image. if not the image will disappear
-
- def addImage_m(self,img):
- self.view.mani.create_image(512, 512, image=img, anchor='center')
- self.image2=img
-
-
- def openfn(self):
- filename = askopenfilename(title='open',initialdir='./data/'+self.style_clip.M.dataset_name+'/',filetypes=[("all image format", ".jpg"),("all image format", ".png")])
- return filename
-
- def open_img(self,event):
- x = self.openfn()
- print(x)
-
-
- img = Image.open(x)
- img2 = img.resize(( 512,512), Image.ANTIALIAS)
- img2 = ImageTk.PhotoImage(img2)
- self.addImage(img2)
-
- img = ImageTk.PhotoImage(img)
- self.addImage_m(img)
-
- img_index=x.split('/')[-1].split('.')[0]
- img_index=int(img_index)
- print(img_index)
- self.style_clip.M.img_index=img_index
- self.style_clip.M.dlatent_tmp=[tmp[img_index:(img_index+1)] for tmp in self.style_clip.M.dlatents]
-
-
- self.style_clip.GetDt2()
- self.view.beta.set(self.style_clip.beta)
- self.view.alpha.set(3)
-
- #%%
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(description='Process some integers.')
-
- parser.add_argument('--dataset_name',type=str,default='ffhq',
- help='name of dataset, for example, ffhq')
-
- args = parser.parse_args()
- dataset_name=args.dataset_name
-
- self=PlayInteractively(dataset_name)
- self.run()
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/DragGan/DragGan/stylegan_human/pti/training/coaches/base_coach.py b/spaces/DragGan/DragGan/stylegan_human/pti/training/coaches/base_coach.py
deleted file mode 100644
index b429bd3cf186fc32bbf9491a39a889eebfe2110d..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan/stylegan_human/pti/training/coaches/base_coach.py
+++ /dev/null
@@ -1,150 +0,0 @@
-import abc
-import os
-import pickle
-from argparse import Namespace
-import wandb
-import os.path
-from .localitly_regulizer import Space_Regulizer, l2_loss
-import torch
-from torchvision import transforms
-from lpips import LPIPS
-from pti.training.projectors import w_projector
-from pti.pti_configs import global_config, paths_config, hyperparameters
-from pti.pti_models.e4e.psp import pSp
-from utils.log_utils import log_image_from_w
-from utils.models_utils import toogle_grad, load_old_G
-
-
-class BaseCoach:
- def __init__(self, data_loader, use_wandb):
-
- self.use_wandb = use_wandb
- self.data_loader = data_loader
- self.w_pivots = {}
- self.image_counter = 0
-
- if hyperparameters.first_inv_type == 'w+':
- self.initilize_e4e()
-
- self.e4e_image_transform = transforms.Compose([
- transforms.ToPILImage(),
- transforms.Resize((256, 128)),
- transforms.ToTensor(),
- transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
-
- # Initialize loss
- self.lpips_loss = LPIPS(net=hyperparameters.lpips_type).to(global_config.device).eval()
-
- self.restart_training()
-
- # Initialize checkpoint dir
- self.checkpoint_dir = paths_config.checkpoints_dir
- os.makedirs(self.checkpoint_dir, exist_ok=True)
-
- def restart_training(self):
-
- # Initialize networks
- self.G = load_old_G()
- toogle_grad(self.G, True)
-
- self.original_G = load_old_G()
-
- self.space_regulizer = Space_Regulizer(self.original_G, self.lpips_loss)
- self.optimizer = self.configure_optimizers()
-
- def get_inversion(self, w_path_dir, image_name, image):
- embedding_dir = f'{w_path_dir}/{paths_config.pti_results_keyword}/{image_name}'
- os.makedirs(embedding_dir, exist_ok=True)
-
- w_pivot = None
-
- if hyperparameters.use_last_w_pivots:
- w_pivot = self.load_inversions(w_path_dir, image_name)
-
- if not hyperparameters.use_last_w_pivots or w_pivot is None:
- w_pivot = self.calc_inversions(image, image_name)
- torch.save(w_pivot, f'{embedding_dir}/0.pt')
-
- w_pivot = w_pivot.to(global_config.device)
- return w_pivot
-
- def load_inversions(self, w_path_dir, image_name):
- if image_name in self.w_pivots:
- return self.w_pivots[image_name]
-
- if hyperparameters.first_inv_type == 'w+':
- w_potential_path = f'{w_path_dir}/{paths_config.e4e_results_keyword}/{image_name}/0.pt'
- else:
- w_potential_path = f'{w_path_dir}/{paths_config.pti_results_keyword}/{image_name}/0.pt'
- if not os.path.isfile(w_potential_path):
- return None
- w = torch.load(w_potential_path).to(global_config.device)
- self.w_pivots[image_name] = w
- return w
-
- def calc_inversions(self, image, image_name):
- if hyperparameters.first_inv_type == 'w+':
- w = self.get_e4e_inversion(image)
-
- else:
- id_image = torch.squeeze((image.to(global_config.device) + 1) / 2) * 255
- w = w_projector.project(self.G, id_image, device=torch.device(global_config.device), w_avg_samples=600,
- num_steps=hyperparameters.first_inv_steps, w_name=image_name,
- use_wandb=self.use_wandb)
-
- return w
-
- @abc.abstractmethod
- def train(self):
- pass
-
- def configure_optimizers(self):
- optimizer = torch.optim.Adam(self.G.parameters(), lr=hyperparameters.pti_learning_rate)
-
- return optimizer
-
- def calc_loss(self, generated_images, real_images, log_name, new_G, use_ball_holder, w_batch):
- loss = 0.0
-
- if hyperparameters.pt_l2_lambda > 0:
- l2_loss_val = l2_loss(generated_images, real_images)
- if self.use_wandb:
- wandb.log({f'MSE_loss_val_{log_name}': l2_loss_val.detach().cpu()}, step=global_config.training_step)
- loss += l2_loss_val * hyperparameters.pt_l2_lambda
- if hyperparameters.pt_lpips_lambda > 0:
- loss_lpips = self.lpips_loss(generated_images, real_images)
- loss_lpips = torch.squeeze(loss_lpips)
- if self.use_wandb:
- wandb.log({f'LPIPS_loss_val_{log_name}': loss_lpips.detach().cpu()}, step=global_config.training_step)
- loss += loss_lpips * hyperparameters.pt_lpips_lambda
-
- if use_ball_holder and hyperparameters.use_locality_regularization:
- ball_holder_loss_val = self.space_regulizer.space_regulizer_loss(new_G, w_batch, use_wandb=self.use_wandb)
- loss += ball_holder_loss_val
-
- return loss, l2_loss_val, loss_lpips
-
- def forward(self, w):
- generated_images = self.G.synthesis(w, noise_mode='const', force_fp32=True)
-
- return generated_images
-
- def initilize_e4e(self):
- ckpt = torch.load(paths_config.e4e, map_location='cpu')
- opts = ckpt['opts']
- opts['batch_size'] = hyperparameters.train_batch_size
- opts['checkpoint_path'] = paths_config.e4e
- opts = Namespace(**opts)
- self.e4e_inversion_net = pSp(opts)
- self.e4e_inversion_net.eval()
- self.e4e_inversion_net = self.e4e_inversion_net.to(global_config.device)
- toogle_grad(self.e4e_inversion_net, False)
-
- def get_e4e_inversion(self, image):
- image = (image + 1) / 2
- new_image = self.e4e_image_transform(image[0]).to(global_config.device)
- _, w = self.e4e_inversion_net(new_image.unsqueeze(0), randomize_noise=False, return_latents=True, resize=False,
- input_code=False)
- if self.use_wandb:
- log_image_from_w(w, self.G, 'First e4e inversion')
- return w
diff --git a/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/include/kalmanFilter.h b/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/include/kalmanFilter.h
deleted file mode 100644
index 6596b54e33de75d1b49a8af9bfbb1f26d00ea786..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/include/kalmanFilter.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#pragma once
-
-#include "dataType.h"
-
-namespace byte_kalman
-{
- class KalmanFilter
- {
- public:
- static const double chi2inv95[10];
- KalmanFilter();
- KAL_DATA initiate(const DETECTBOX& measurement);
- void predict(KAL_MEAN& mean, KAL_COVA& covariance);
- KAL_HDATA project(const KAL_MEAN& mean, const KAL_COVA& covariance);
- KAL_DATA update(const KAL_MEAN& mean,
- const KAL_COVA& covariance,
- const DETECTBOX& measurement);
-
- Eigen::Matrix gating_distance(
- const KAL_MEAN& mean,
- const KAL_COVA& covariance,
- const std::vector& measurements,
- bool only_position = false);
-
- private:
- Eigen::Matrix _motion_mat;
- Eigen::Matrix _update_mat;
- float _std_weight_position;
- float _std_weight_velocity;
- };
-}
\ No newline at end of file
diff --git a/spaces/ECCV2022/bytetrack/yolox/utils/visualize.py b/spaces/ECCV2022/bytetrack/yolox/utils/visualize.py
deleted file mode 100644
index 1d02d474d289df7bf3a9c43a707f403c1858f950..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/bytetrack/yolox/utils/visualize.py
+++ /dev/null
@@ -1,166 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
-
-import cv2
-import numpy as np
-
-__all__ = ["vis"]
-
-
-def vis(img, boxes, scores, cls_ids, conf=0.5, class_names=None):
-
- for i in range(len(boxes)):
- box = boxes[i]
- cls_id = int(cls_ids[i])
- score = scores[i]
- if score < conf:
- continue
- x0 = int(box[0])
- y0 = int(box[1])
- x1 = int(box[2])
- y1 = int(box[3])
-
- color = (_COLORS[cls_id] * 255).astype(np.uint8).tolist()
- text = '{}:{:.1f}%'.format(class_names[cls_id], score * 100)
- txt_color = (0, 0, 0) if np.mean(_COLORS[cls_id]) > 0.5 else (255, 255, 255)
- font = cv2.FONT_HERSHEY_SIMPLEX
-
- txt_size = cv2.getTextSize(text, font, 0.4, 1)[0]
- cv2.rectangle(img, (x0, y0), (x1, y1), color, 2)
-
- txt_bk_color = (_COLORS[cls_id] * 255 * 0.7).astype(np.uint8).tolist()
- cv2.rectangle(
- img,
- (x0, y0 + 1),
- (x0 + txt_size[0] + 1, y0 + int(1.5*txt_size[1])),
- txt_bk_color,
- -1
- )
- cv2.putText(img, text, (x0, y0 + txt_size[1]), font, 0.4, txt_color, thickness=1)
-
- return img
-
-
-def get_color(idx):
- idx = idx * 3
- color = ((37 * idx) % 255, (17 * idx) % 255, (29 * idx) % 255)
-
- return color
-
-
-def plot_tracking(image, tlwhs, obj_ids, scores=None, frame_id=0, fps=0., ids2=None):
- im = np.ascontiguousarray(np.copy(image))
- im_h, im_w = im.shape[:2]
-
- top_view = np.zeros([im_w, im_w, 3], dtype=np.uint8) + 255
-
- #text_scale = max(1, image.shape[1] / 1600.)
- #text_thickness = 2
- #line_thickness = max(1, int(image.shape[1] / 500.))
- text_scale = 2
- text_thickness = 2
- line_thickness = 3
-
- radius = max(5, int(im_w/140.))
- cv2.putText(im, 'frame: %d fps: %.2f num: %d' % (frame_id, fps, len(tlwhs)),
- (0, int(15 * text_scale)), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), thickness=2)
-
- for i, tlwh in enumerate(tlwhs):
- x1, y1, w, h = tlwh
- intbox = tuple(map(int, (x1, y1, x1 + w, y1 + h)))
- obj_id = int(obj_ids[i])
- id_text = '{}'.format(int(obj_id))
- if ids2 is not None:
- id_text = id_text + ', {}'.format(int(ids2[i]))
- color = get_color(abs(obj_id))
- cv2.rectangle(im, intbox[0:2], intbox[2:4], color=color, thickness=line_thickness)
- cv2.putText(im, id_text, (intbox[0], intbox[1]), cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 0, 255),
- thickness=text_thickness)
- return im
-
-
-_COLORS = np.array(
- [
- 0.000, 0.447, 0.741,
- 0.850, 0.325, 0.098,
- 0.929, 0.694, 0.125,
- 0.494, 0.184, 0.556,
- 0.466, 0.674, 0.188,
- 0.301, 0.745, 0.933,
- 0.635, 0.078, 0.184,
- 0.300, 0.300, 0.300,
- 0.600, 0.600, 0.600,
- 1.000, 0.000, 0.000,
- 1.000, 0.500, 0.000,
- 0.749, 0.749, 0.000,
- 0.000, 1.000, 0.000,
- 0.000, 0.000, 1.000,
- 0.667, 0.000, 1.000,
- 0.333, 0.333, 0.000,
- 0.333, 0.667, 0.000,
- 0.333, 1.000, 0.000,
- 0.667, 0.333, 0.000,
- 0.667, 0.667, 0.000,
- 0.667, 1.000, 0.000,
- 1.000, 0.333, 0.000,
- 1.000, 0.667, 0.000,
- 1.000, 1.000, 0.000,
- 0.000, 0.333, 0.500,
- 0.000, 0.667, 0.500,
- 0.000, 1.000, 0.500,
- 0.333, 0.000, 0.500,
- 0.333, 0.333, 0.500,
- 0.333, 0.667, 0.500,
- 0.333, 1.000, 0.500,
- 0.667, 0.000, 0.500,
- 0.667, 0.333, 0.500,
- 0.667, 0.667, 0.500,
- 0.667, 1.000, 0.500,
- 1.000, 0.000, 0.500,
- 1.000, 0.333, 0.500,
- 1.000, 0.667, 0.500,
- 1.000, 1.000, 0.500,
- 0.000, 0.333, 1.000,
- 0.000, 0.667, 1.000,
- 0.000, 1.000, 1.000,
- 0.333, 0.000, 1.000,
- 0.333, 0.333, 1.000,
- 0.333, 0.667, 1.000,
- 0.333, 1.000, 1.000,
- 0.667, 0.000, 1.000,
- 0.667, 0.333, 1.000,
- 0.667, 0.667, 1.000,
- 0.667, 1.000, 1.000,
- 1.000, 0.000, 1.000,
- 1.000, 0.333, 1.000,
- 1.000, 0.667, 1.000,
- 0.333, 0.000, 0.000,
- 0.500, 0.000, 0.000,
- 0.667, 0.000, 0.000,
- 0.833, 0.000, 0.000,
- 1.000, 0.000, 0.000,
- 0.000, 0.167, 0.000,
- 0.000, 0.333, 0.000,
- 0.000, 0.500, 0.000,
- 0.000, 0.667, 0.000,
- 0.000, 0.833, 0.000,
- 0.000, 1.000, 0.000,
- 0.000, 0.000, 0.167,
- 0.000, 0.000, 0.333,
- 0.000, 0.000, 0.500,
- 0.000, 0.000, 0.667,
- 0.000, 0.000, 0.833,
- 0.000, 0.000, 1.000,
- 0.000, 0.000, 0.000,
- 0.143, 0.143, 0.143,
- 0.286, 0.286, 0.286,
- 0.429, 0.429, 0.429,
- 0.571, 0.571, 0.571,
- 0.714, 0.714, 0.714,
- 0.857, 0.857, 0.857,
- 0.000, 0.447, 0.741,
- 0.314, 0.717, 0.741,
- 0.50, 0.5, 0
- ]
-).astype(np.float32).reshape(-1, 3)
diff --git a/spaces/ElainaFanBoy/MusicGen/audiocraft/modules/lstm.py b/spaces/ElainaFanBoy/MusicGen/audiocraft/modules/lstm.py
deleted file mode 100644
index c0866175950c1ca4f6cca98649525e6481853bba..0000000000000000000000000000000000000000
--- a/spaces/ElainaFanBoy/MusicGen/audiocraft/modules/lstm.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from torch import nn
-
-
-class StreamableLSTM(nn.Module):
- """LSTM without worrying about the hidden state, nor the layout of the data.
- Expects input as convolutional layout.
- """
- def __init__(self, dimension: int, num_layers: int = 2, skip: bool = True):
- super().__init__()
- self.skip = skip
- self.lstm = nn.LSTM(dimension, dimension, num_layers)
-
- def forward(self, x):
- x = x.permute(2, 0, 1)
- y, _ = self.lstm(x)
- if self.skip:
- y = y + x
- y = y.permute(1, 2, 0)
- return y
diff --git a/spaces/Feifei315/flax-midjourney-v4-diffusion/README.md b/spaces/Feifei315/flax-midjourney-v4-diffusion/README.md
deleted file mode 100644
index bd1e5d8817ac4929e35ae8bfc391198f7d545642..0000000000000000000000000000000000000000
--- a/spaces/Feifei315/flax-midjourney-v4-diffusion/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Flax Midjourney V4 Diffusion
-emoji: 🔥
-colorFrom: yellow
-colorTo: pink
-sdk: gradio
-sdk_version: 3.27.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/FluxWaveCorp/Ghostwriter-Bloom/generators/topic_to_abstract.py b/spaces/FluxWaveCorp/Ghostwriter-Bloom/generators/topic_to_abstract.py
deleted file mode 100644
index a91131db41007e46ce1d9a9772bdd7e9645b9d2b..0000000000000000000000000000000000000000
--- a/spaces/FluxWaveCorp/Ghostwriter-Bloom/generators/topic_to_abstract.py
+++ /dev/null
@@ -1,6 +0,0 @@
-
-from .model import model
-
-
-def topic_to_abstract_generator(template):
- return model('topic', template)
diff --git a/spaces/FritsLyneborg/kunstnerfrits/README.md b/spaces/FritsLyneborg/kunstnerfrits/README.md
deleted file mode 100644
index 1759f1317cee8502e6727c061c0fb2e4a9ba7f9b..0000000000000000000000000000000000000000
--- a/spaces/FritsLyneborg/kunstnerfrits/README.md
+++ /dev/null
@@ -1,270 +0,0 @@
----
-title: DALL·E mini
-emoji: 🥑
-colorFrom: yellow
-colorTo: green
-sdk: streamlit
-app_file: app/streamlit/app.py
-pinned: True
----
-
-# DALL·E Mini
-
-[](https://discord.gg/xBPBXfcFHd)
-
-_Generate images from a text prompt_
-
-
-
-Our logo was generated with DALL·E mini using the prompt "logo of an armchair in the shape of an avocado".
-
-You can create your own pictures with [the demo](https://huggingface.co/spaces/flax-community/dalle-mini).
-
-## How does it work?
-
-Refer to [our report](https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini--Vmlldzo4NjIxODA).
-
-## Inference Pipeline
-
-To generate sample predictions and understand the inference pipeline step by step, refer to [`tools/inference/inference_pipeline.ipynb`](tools/inference/inference_pipeline.ipynb).
-
-[](https://colab.research.google.com/github/borisdayma/dalle-mini/blob/main/tools/inference/inference_pipeline.ipynb)
-
-## Contributing
-
-Join the community on the [DALLE-Pytorch Discord](https://discord.gg/xBPBXfcFHd).
-Any contribution is welcome, from reporting issues to proposing fixes/improvements or testing the model with cool prompts!
-
-## Development
-
-### Dependencies Installation
-
-For inference only, use `pip install git+https://github.com/borisdayma/dalle-mini.git`.
-
-For development, clone the repo and use `pip install -e ".[dev]"`.
-Before making a PR, check style with `make style`.
-
-### Image Encoder
-
-We use a VQGAN from [taming-transformers](https://github.com/CompVis/taming-transformers), which can also be fine-tuned.
-
-Use [patil-suraj/vqgan-jax](https://github.com/patil-suraj/vqgan-jax) if you want to convert a checkpoint to JAX (does not support Gumbel).
-
-Any image encoder that turns an image into a fixed sequence of tokens can be used.
-
-### Training of DALL·E mini
-
-Use [`tools/train/train.py`](tools/train/train.py).
-
-You can also adjust the [sweep configuration file](https://docs.wandb.ai/guides/sweeps) if you need to perform a hyperparameter search.
-
-## FAQ
-
-### Where to find the latest models?
-
-Trained models are on 🤗 Model Hub:
-
-- [VQGAN-f16-16384](https://huggingface.co/dalle-mini/vqgan_imagenet_f16_16384) for encoding/decoding images
-- [DALL·E mini](https://huggingface.co/flax-community/dalle-mini) for generating images from a text prompt
-
-### Where does the logo come from?
-
-The "armchair in the shape of an avocado" was used by OpenAI when releasing DALL·E to illustrate the model's capabilities. Having successful predictions on this prompt represents a big milestone to us.
-
-## Acknowledgements
-
-- 🤗 Hugging Face for organizing [the FLAX/JAX community week](https://github.com/huggingface/transformers/tree/master/examples/research_projects/jax-projects)
-- Google [TPU Research Cloud (TRC) program](https://sites.research.google/trc/) for providing computing resources
-- [Weights & Biases](https://wandb.com/) for providing the infrastructure for experiment tracking and model management
-
-## Authors & Contributors
-
-DALL·E mini was initially developed by:
-
-- [Boris Dayma](https://github.com/borisdayma)
-- [Suraj Patil](https://github.com/patil-suraj)
-- [Pedro Cuenca](https://github.com/pcuenca)
-- [Khalid Saifullah](https://github.com/khalidsaifullaah)
-- [Tanishq Abraham](https://github.com/tmabraham)
-- [Phúc Lê Khắc](https://github.com/lkhphuc)
-- [Luke Melas](https://github.com/lukemelas)
-- [Ritobrata Ghosh](https://github.com/ghosh-r)
-
-Many thanks to the people who helped make it better:
-
-- the [DALLE-Pytorch](https://discord.gg/xBPBXfcFHd) and [EleutherAI](https://www.eleuther.ai/) communities for testing and exchanging cool ideas
-- [Rohan Anil](https://github.com/rohan-anil) for adding Distributed Shampoo optimizer
-- [Phil Wang](https://github.com/lucidrains) has provided a lot of cool implementations of transformer variants and gives interesting insights with [x-transformers](https://github.com/lucidrains/x-transformers)
-- [Katherine Crowson](https://github.com/crowsonkb) for [super conditioning](https://twitter.com/RiversHaveWings/status/1478093658716966912)
-
-## Citing DALL·E mini
-
-If you find DALL·E mini useful in your research or wish to refer, please use the following BibTeX entry.
-
-```text
-@misc{Dayma_DALL·E_Mini_2021,
- author = {Dayma, Boris and Patil, Suraj and Cuenca, Pedro and Saifullah, Khalid and Abraham, Tanishq and Lê Khắc, Phúc and Melas, Luke and Ghosh, Ritobrata},
- doi = {10.5281/zenodo.5146400},
- month = {7},
- title = {DALL·E Mini},
- url = {https://github.com/borisdayma/dalle-mini},
- year = {2021}
-}
-```
-
-## References
-
-Original DALL·E from "[Zero-Shot Text-to-Image Generation](https://arxiv.org/abs/2102.12092)" with image quantization from "[Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020)".
-
-Image encoder from "[Taming Transformers for High-Resolution Image Synthesis](https://arxiv.org/abs/2012.09841v2)".
-
-Sequence to sequence model based on "[BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/abs/1910.13461v1)" with implementation of a few variants:
-
-- "[GLU Variants Improve Transformer](https://arxiv.org/abs/2002.05202)"
-- "[Deepnet: Scaling Transformers to 1,000 Layers](https://arxiv.org/abs/2203.00555)"
-- "[NormFormer: Improved Transformer Pretraining with Extra Normalization](https://arxiv.org/abs/2110.09456)"
-- "[Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030)"
-- "[CogView: Mastering Text-to-Image Generation via Transformers](https://arxiv.org/abs/2105.13290v2)"
-- "[Root Mean Square Layer Normalization](https://arxiv.org/abs/1910.07467)"
-- "[Sinkformers: Transformers with Doubly Stochastic Attention](https://arxiv.org/abs/2110.11773)"
-
-Main optimizer (Distributed Shampoo) from "[Scalable Second Order Optimization for Deep Learning](https://arxiv.org/abs/2002.09018)".
-
-### Citations
-
-```text
-@misc{
- title={Zero-Shot Text-to-Image Generation},
- author={Aditya Ramesh and Mikhail Pavlov and Gabriel Goh and Scott Gray and Chelsea Voss and Alec Radford and Mark Chen and Ilya Sutskever},
- year={2021},
- eprint={2102.12092},
- archivePrefix={arXiv},
- primaryClass={cs.CV}
-}
-```
-
-```text
-@misc{
- title={Learning Transferable Visual Models From Natural Language Supervision},
- author={Alec Radford and Jong Wook Kim and Chris Hallacy and Aditya Ramesh and Gabriel Goh and Sandhini Agarwal and Girish Sastry and Amanda Askell and Pamela Mishkin and Jack Clark and Gretchen Krueger and Ilya Sutskever},
- year={2021},
- eprint={2103.00020},
- archivePrefix={arXiv},
- primaryClass={cs.CV}
-}
-```
-
-```text
-@misc{
- title={Taming Transformers for High-Resolution Image Synthesis},
- author={Patrick Esser and Robin Rombach and Björn Ommer},
- year={2021},
- eprint={2012.09841},
- archivePrefix={arXiv},
- primaryClass={cs.CV}
-}
-```
-
-```text
-@misc{
- title={BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension},
- author={Mike Lewis and Yinhan Liu and Naman Goyal and Marjan Ghazvininejad and Abdelrahman Mohamed and Omer Levy and Ves Stoyanov and Luke Zettlemoyer},
- year={2019},
- eprint={1910.13461},
- archivePrefix={arXiv},
- primaryClass={cs.CL}
-}
-```
-
-```text
-@misc{
- title={Scalable Second Order Optimization for Deep Learning},
- author={Rohan Anil and Vineet Gupta and Tomer Koren and Kevin Regan and Yoram Singer},
- year={2021},
- eprint={2002.09018},
- archivePrefix={arXiv},
- primaryClass={cs.LG}
-}
-```
-
-```text
-@misc{
- title={GLU Variants Improve Transformer},
- author={Noam Shazeer},
- year={2020},
- url={https://arxiv.org/abs/2002.05202}
-}
-```
-
-```text
- @misc{
- title={DeepNet: Scaling transformers to 1,000 layers},
- author={Wang, Hongyu and Ma, Shuming and Dong, Li and Huang, Shaohan and Zhang, Dongdong and Wei, Furu},
- year={2022},
- eprint={2203.00555}
- archivePrefix={arXiv},
- primaryClass={cs.LG}
-}
-```
-
-```text
-@misc{
- title={NormFormer: Improved Transformer Pretraining with Extra Normalization},
- author={Sam Shleifer and Jason Weston and Myle Ott},
- year={2021},
- eprint={2110.09456},
- archivePrefix={arXiv},
- primaryClass={cs.CL}
-}
-```
-
-```text
-@inproceedings{
- title={Swin Transformer V2: Scaling Up Capacity and Resolution},
- author={Ze Liu and Han Hu and Yutong Lin and Zhuliang Yao and Zhenda Xie and Yixuan Wei and Jia Ning and Yue Cao and Zheng Zhang and Li Dong and Furu Wei and Baining Guo},
- booktitle={International Conference on Computer Vision and Pattern Recognition (CVPR)},
- year={2022}
-}
-```
-
-```text
-@misc{
- title = {CogView: Mastering Text-to-Image Generation via Transformers},
- author = {Ming Ding and Zhuoyi Yang and Wenyi Hong and Wendi Zheng and Chang Zhou and Da Yin and Junyang Lin and Xu Zou and Zhou Shao and Hongxia Yang and Jie Tang},
- year = {2021},
- eprint = {2105.13290},
- archivePrefix = {arXiv},
- primaryClass = {cs.CV}
-}
-```
-
-```text
-@misc{
- title = {Root Mean Square Layer Normalization},
- author = {Biao Zhang and Rico Sennrich},
- year = {2019},
- eprint = {1910.07467},
- archivePrefix = {arXiv},
- primaryClass = {cs.LG}
-}
-```
-
-```text
-@misc{
- title = {Sinkformers: Transformers with Doubly Stochastic Attention},
- url = {https://arxiv.org/abs/2110.11773},
- author = {Sander, Michael E. and Ablin, Pierre and Blondel, Mathieu and Peyré, Gabriel},
- publisher = {arXiv},
- year = {2021},
-}
-```
-
-```text
-@misc{
- title = {Smooth activations and reproducibility in deep networks},
- url = {https://arxiv.org/abs/2010.09931},
- author = {Shamir, Gil I. and Lin, Dong and Coviello, Lorenzo},
- publisher = {arXiv},
- year = {2020},
-}
-```
diff --git a/spaces/Froleptan/stablediffusion-infinity/process.py b/spaces/Froleptan/stablediffusion-infinity/process.py
deleted file mode 100644
index 5db1495ac8098c0260f5fdf5a60ca35a043b461c..0000000000000000000000000000000000000000
--- a/spaces/Froleptan/stablediffusion-infinity/process.py
+++ /dev/null
@@ -1,395 +0,0 @@
-"""
-https://github.com/Trinkle23897/Fast-Poisson-Image-Editing
-MIT License
-
-Copyright (c) 2022 Jiayi Weng
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-"""
-import os
-from abc import ABC, abstractmethod
-from typing import Any, Optional, Tuple
-
-import numpy as np
-
-from fpie import np_solver
-
-import scipy
-import scipy.signal
-
-CPU_COUNT = os.cpu_count() or 1
-DEFAULT_BACKEND = "numpy"
-ALL_BACKEND = ["numpy"]
-
-try:
- from fpie import numba_solver
- ALL_BACKEND += ["numba"]
- DEFAULT_BACKEND = "numba"
-except ImportError:
- numba_solver = None # type: ignore
-
-try:
- from fpie import taichi_solver
- ALL_BACKEND += ["taichi-cpu", "taichi-gpu"]
- DEFAULT_BACKEND = "taichi-cpu"
-except ImportError:
- taichi_solver = None # type: ignore
-
-# try:
-# from fpie import core_gcc # type: ignore
-# DEFAULT_BACKEND = "gcc"
-# ALL_BACKEND.append("gcc")
-# except ImportError:
-# core_gcc = None
-
-# try:
-# from fpie import core_openmp # type: ignore
-# DEFAULT_BACKEND = "openmp"
-# ALL_BACKEND.append("openmp")
-# except ImportError:
-# core_openmp = None
-
-# try:
-# from mpi4py import MPI
-
-# from fpie import core_mpi # type: ignore
-# ALL_BACKEND.append("mpi")
-# except ImportError:
-# MPI = None # type: ignore
-# core_mpi = None
-
-try:
- from fpie import core_cuda # type: ignore
- DEFAULT_BACKEND = "cuda"
- ALL_BACKEND.append("cuda")
-except ImportError:
- core_cuda = None
-
-
-class BaseProcessor(ABC):
- """API definition for processor class."""
-
- def __init__(
- self, gradient: str, rank: int, backend: str, core: Optional[Any]
- ):
- if core is None:
- error_msg = {
- "numpy":
- "Please run `pip install numpy`.",
- "numba":
- "Please run `pip install numba`.",
- "gcc":
- "Please install cmake and gcc in your operating system.",
- "openmp":
- "Please make sure your gcc is compatible with `-fopenmp` option.",
- "mpi":
- "Please install MPI and run `pip install mpi4py`.",
- "cuda":
- "Please make sure nvcc and cuda-related libraries are available.",
- "taichi":
- "Please run `pip install taichi`.",
- }
- print(error_msg[backend.split("-")[0]])
-
- raise AssertionError(f"Invalid backend {backend}.")
-
- self.gradient = gradient
- self.rank = rank
- self.backend = backend
- self.core = core
- self.root = rank == 0
-
- def mixgrad(self, a: np.ndarray, b: np.ndarray) -> np.ndarray:
- if self.gradient == "src":
- return a
- if self.gradient == "avg":
- return (a + b) / 2
- # mix gradient, see Equ. 12 in PIE paper
- mask = np.abs(a) < np.abs(b)
- a[mask] = b[mask]
- return a
-
- @abstractmethod
- def reset(
- self,
- src: np.ndarray,
- mask: np.ndarray,
- tgt: np.ndarray,
- mask_on_src: Tuple[int, int],
- mask_on_tgt: Tuple[int, int],
- ) -> int:
- pass
-
- def sync(self) -> None:
- self.core.sync()
-
- @abstractmethod
- def step(self, iteration: int) -> Optional[Tuple[np.ndarray, np.ndarray]]:
- pass
-
-
-class EquProcessor(BaseProcessor):
- """PIE Jacobi equation processor."""
-
- def __init__(
- self,
- gradient: str = "max",
- backend: str = DEFAULT_BACKEND,
- n_cpu: int = CPU_COUNT,
- min_interval: int = 100,
- block_size: int = 1024,
- ):
- core: Optional[Any] = None
- rank = 0
-
- if backend == "numpy":
- core = np_solver.EquSolver()
- elif backend == "numba" and numba_solver is not None:
- core = numba_solver.EquSolver()
- elif backend == "gcc":
- core = core_gcc.EquSolver()
- elif backend == "openmp" and core_openmp is not None:
- core = core_openmp.EquSolver(n_cpu)
- elif backend == "mpi" and core_mpi is not None:
- core = core_mpi.EquSolver(min_interval)
- rank = MPI.COMM_WORLD.Get_rank()
- elif backend == "cuda" and core_cuda is not None:
- core = core_cuda.EquSolver(block_size)
- elif backend.startswith("taichi") and taichi_solver is not None:
- core = taichi_solver.EquSolver(backend, n_cpu, block_size)
-
- super().__init__(gradient, rank, backend, core)
-
- def mask2index(
- self, mask: np.ndarray
- ) -> Tuple[np.ndarray, int, np.ndarray, np.ndarray]:
- x, y = np.nonzero(mask)
- max_id = x.shape[0] + 1
- index = np.zeros((max_id, 3))
- ids = self.core.partition(mask)
- ids[mask == 0] = 0 # reserve id=0 for constant
- index = ids[x, y].argsort()
- return ids, max_id, x[index], y[index]
-
- def reset(
- self,
- src: np.ndarray,
- mask: np.ndarray,
- tgt: np.ndarray,
- mask_on_src: Tuple[int, int],
- mask_on_tgt: Tuple[int, int],
- ) -> int:
- assert self.root
- # check validity
- # assert 0 <= mask_on_src[0] and 0 <= mask_on_src[1]
- # assert mask_on_src[0] + mask.shape[0] <= src.shape[0]
- # assert mask_on_src[1] + mask.shape[1] <= src.shape[1]
- # assert mask_on_tgt[0] + mask.shape[0] <= tgt.shape[0]
- # assert mask_on_tgt[1] + mask.shape[1] <= tgt.shape[1]
-
- if len(mask.shape) == 3:
- mask = mask.mean(-1)
- mask = (mask >= 128).astype(np.int32)
-
- # zero-out edge
- mask[0] = 0
- mask[-1] = 0
- mask[:, 0] = 0
- mask[:, -1] = 0
-
- x, y = np.nonzero(mask)
- x0, x1 = x.min() - 1, x.max() + 2
- y0, y1 = y.min() - 1, y.max() + 2
- mask_on_src = (x0 + mask_on_src[0], y0 + mask_on_src[1])
- mask_on_tgt = (x0 + mask_on_tgt[0], y0 + mask_on_tgt[1])
- mask = mask[x0:x1, y0:y1]
- ids, max_id, index_x, index_y = self.mask2index(mask)
-
- src_x, src_y = index_x + mask_on_src[0], index_y + mask_on_src[1]
- tgt_x, tgt_y = index_x + mask_on_tgt[0], index_y + mask_on_tgt[1]
-
- src_C = src[src_x, src_y].astype(np.float32)
- src_U = src[src_x - 1, src_y].astype(np.float32)
- src_D = src[src_x + 1, src_y].astype(np.float32)
- src_L = src[src_x, src_y - 1].astype(np.float32)
- src_R = src[src_x, src_y + 1].astype(np.float32)
- tgt_C = tgt[tgt_x, tgt_y].astype(np.float32)
- tgt_U = tgt[tgt_x - 1, tgt_y].astype(np.float32)
- tgt_D = tgt[tgt_x + 1, tgt_y].astype(np.float32)
- tgt_L = tgt[tgt_x, tgt_y - 1].astype(np.float32)
- tgt_R = tgt[tgt_x, tgt_y + 1].astype(np.float32)
-
- grad = self.mixgrad(src_C - src_L, tgt_C - tgt_L) \
- + self.mixgrad(src_C - src_R, tgt_C - tgt_R) \
- + self.mixgrad(src_C - src_U, tgt_C - tgt_U) \
- + self.mixgrad(src_C - src_D, tgt_C - tgt_D)
-
- A = np.zeros((max_id, 4), np.int32)
- X = np.zeros((max_id, 3), np.float32)
- B = np.zeros((max_id, 3), np.float32)
-
- X[1:] = tgt[index_x + mask_on_tgt[0], index_y + mask_on_tgt[1]]
- # four-way
- A[1:, 0] = ids[index_x - 1, index_y]
- A[1:, 1] = ids[index_x + 1, index_y]
- A[1:, 2] = ids[index_x, index_y - 1]
- A[1:, 3] = ids[index_x, index_y + 1]
- B[1:] = grad
- m = (mask[index_x - 1, index_y] == 0).astype(float).reshape(-1, 1)
- B[1:] += m * tgt[index_x + mask_on_tgt[0] - 1, index_y + mask_on_tgt[1]]
- m = (mask[index_x, index_y - 1] == 0).astype(float).reshape(-1, 1)
- B[1:] += m * tgt[index_x + mask_on_tgt[0], index_y + mask_on_tgt[1] - 1]
- m = (mask[index_x, index_y + 1] == 0).astype(float).reshape(-1, 1)
- B[1:] += m * tgt[index_x + mask_on_tgt[0], index_y + mask_on_tgt[1] + 1]
- m = (mask[index_x + 1, index_y] == 0).astype(float).reshape(-1, 1)
- B[1:] += m * tgt[index_x + mask_on_tgt[0] + 1, index_y + mask_on_tgt[1]]
-
- self.tgt = tgt.copy()
- self.tgt_index = (index_x + mask_on_tgt[0], index_y + mask_on_tgt[1])
- self.core.reset(max_id, A, X, B)
- return max_id
-
- def step(self, iteration: int) -> Optional[Tuple[np.ndarray, np.ndarray]]:
- result = self.core.step(iteration)
- if self.root:
- x, err = result
- self.tgt[self.tgt_index] = x[1:]
- return self.tgt, err
- return None
-
-
-class GridProcessor(BaseProcessor):
- """PIE grid processor."""
-
- def __init__(
- self,
- gradient: str = "max",
- backend: str = DEFAULT_BACKEND,
- n_cpu: int = CPU_COUNT,
- min_interval: int = 100,
- block_size: int = 1024,
- grid_x: int = 8,
- grid_y: int = 8,
- ):
- core: Optional[Any] = None
- rank = 0
-
- if backend == "numpy":
- core = np_solver.GridSolver()
- elif backend == "numba" and numba_solver is not None:
- core = numba_solver.GridSolver()
- elif backend == "gcc":
- core = core_gcc.GridSolver(grid_x, grid_y)
- elif backend == "openmp" and core_openmp is not None:
- core = core_openmp.GridSolver(grid_x, grid_y, n_cpu)
- elif backend == "mpi" and core_mpi is not None:
- core = core_mpi.GridSolver(min_interval)
- rank = MPI.COMM_WORLD.Get_rank()
- elif backend == "cuda" and core_cuda is not None:
- core = core_cuda.GridSolver(grid_x, grid_y)
- elif backend.startswith("taichi") and taichi_solver is not None:
- core = taichi_solver.GridSolver(
- grid_x, grid_y, backend, n_cpu, block_size
- )
-
- super().__init__(gradient, rank, backend, core)
-
- def reset(
- self,
- src: np.ndarray,
- mask: np.ndarray,
- tgt: np.ndarray,
- mask_on_src: Tuple[int, int],
- mask_on_tgt: Tuple[int, int],
- ) -> int:
- assert self.root
- # check validity
- # assert 0 <= mask_on_src[0] and 0 <= mask_on_src[1]
- # assert mask_on_src[0] + mask.shape[0] <= src.shape[0]
- # assert mask_on_src[1] + mask.shape[1] <= src.shape[1]
- # assert mask_on_tgt[0] + mask.shape[0] <= tgt.shape[0]
- # assert mask_on_tgt[1] + mask.shape[1] <= tgt.shape[1]
-
- if len(mask.shape) == 3:
- mask = mask.mean(-1)
- mask = (mask >= 128).astype(np.int32)
-
- # zero-out edge
- mask[0] = 0
- mask[-1] = 0
- mask[:, 0] = 0
- mask[:, -1] = 0
-
- x, y = np.nonzero(mask)
- x0, x1 = x.min() - 1, x.max() + 2
- y0, y1 = y.min() - 1, y.max() + 2
- mask = mask[x0:x1, y0:y1]
- max_id = np.prod(mask.shape)
-
- src_crop = src[mask_on_src[0] + x0:mask_on_src[0] + x1,
- mask_on_src[1] + y0:mask_on_src[1] + y1].astype(np.float32)
- tgt_crop = tgt[mask_on_tgt[0] + x0:mask_on_tgt[0] + x1,
- mask_on_tgt[1] + y0:mask_on_tgt[1] + y1].astype(np.float32)
- grad = np.zeros([*mask.shape, 3], np.float32)
- grad[1:] += self.mixgrad(
- src_crop[1:] - src_crop[:-1], tgt_crop[1:] - tgt_crop[:-1]
- )
- grad[:-1] += self.mixgrad(
- src_crop[:-1] - src_crop[1:], tgt_crop[:-1] - tgt_crop[1:]
- )
- grad[:, 1:] += self.mixgrad(
- src_crop[:, 1:] - src_crop[:, :-1], tgt_crop[:, 1:] - tgt_crop[:, :-1]
- )
- grad[:, :-1] += self.mixgrad(
- src_crop[:, :-1] - src_crop[:, 1:], tgt_crop[:, :-1] - tgt_crop[:, 1:]
- )
-
- grad[mask == 0] = 0
- if True:
- kernel = [[1] * 3 for _ in range(3)]
- nmask = mask.copy()
- nmask[nmask > 0] = 1
- res = scipy.signal.convolve2d(
- nmask, kernel, mode="same", boundary="fill", fillvalue=1
- )
- res[nmask < 1] = 0
- res[res == 9] = 0
- res[res > 0] = 1
- grad[res>0]=0
- # ylst, xlst = res.nonzero()
- # for y, x in zip(ylst, xlst):
- # grad[y,x]=0
- # for yi in range(-1,2):
- # for xi in range(-1,2):
- # grad[y+yi,x+xi]=0
- self.x0 = mask_on_tgt[0] + x0
- self.x1 = mask_on_tgt[0] + x1
- self.y0 = mask_on_tgt[1] + y0
- self.y1 = mask_on_tgt[1] + y1
- self.tgt = tgt.copy()
- self.core.reset(max_id, mask, tgt_crop, grad)
- return max_id
-
- def step(self, iteration: int) -> Optional[Tuple[np.ndarray, np.ndarray]]:
- result = self.core.step(iteration)
- if self.root:
- tgt, err = result
- self.tgt[self.x0:self.x1, self.y0:self.y1] = tgt
- return self.tgt, err
- return None
diff --git a/spaces/Frorozcol/music_recommedation/app.py b/spaces/Frorozcol/music_recommedation/app.py
deleted file mode 100644
index 463f742053bc97379a35c177c6a7b9ea661d3948..0000000000000000000000000000000000000000
--- a/spaces/Frorozcol/music_recommedation/app.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import streamlit as st
-import librosa
-from src.preprosecing import preprosecing
-st.write("Music Genre Classification")
-#st.set_page_config(page_title="Upload Music", page_icon=":musical_note:", layout="wide")
-
-def main():
- uploaded_file = st.file_uploader("Choose a music file", type=["mp3"])
-
- if uploaded_file is not None:
- uploaded_file, features = preprosecing(uploaded_file)
- st.audio(uploaded_file, format='audio/wav')
- st.success("30 secs audio snippet")
- st.success("File uploaded successfully")
- st.write(f"Predict the genre of the music: {features[0]}")
- st.write("Internal classification: ", features[1])
- else:
- st.warning("Please upload a file of type: mp3, wav")
-
-if __name__ == "__main__":
- main()
\ No newline at end of file
diff --git a/spaces/GEM/submission-form/utils.py b/spaces/GEM/submission-form/utils.py
deleted file mode 100644
index ee0f9d75a7825867169980480e93b8023ad8bdd4..0000000000000000000000000000000000000000
--- a/spaces/GEM/submission-form/utils.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import json
-
-import jsonschema
-import requests
-
-
-def load_schema():
- """Load the GEM schema"""
- with open("schema.json", "r", encoding="utf8") as file:
- schema = json.load(file)
- return schema
-
-
-def validate_json(json_data):
- execute_api_schema = load_schema()
- try:
- jsonschema.validate(instance=json_data, schema=execute_api_schema)
- except jsonschema.exceptions.ValidationError as err:
- err = "❌ Submission does not match GEM schema. Please fix the submission file 🙈"
- return False, err
-
- message = "✅ Submission matches GEM schema!"
- return True, message
-
-
-def get_auth_headers(token: str, prefix: str = "autonlp"):
- return {"Authorization": f"{prefix} {token}"}
-
-
-def http_post(path: str, token: str, payload=None, domain: str = None, params=None) -> requests.Response:
- """HTTP POST request to the AutoNLP API, raises UnreachableAPIError if the API cannot be reached"""
- try:
- response = requests.post(
- url=domain + path, json=payload, headers=get_auth_headers(token=token), allow_redirects=True, params=params
- )
- except requests.exceptions.ConnectionError:
- print("❌ Failed to reach AutoNLP API, check your internet connection")
- response.raise_for_status()
- return response
-
-
-def http_get(
- path: str,
- token: str,
- domain: str = None,
-) -> requests.Response:
- """HTTP POST request to the AutoNLP API, raises UnreachableAPIError if the API cannot be reached"""
- try:
- response = requests.get(url=domain + path, headers=get_auth_headers(token=token), allow_redirects=True)
- except requests.exceptions.ConnectionError:
- print("❌ Failed to reach AutoNLP API, check your internet connection")
- response.raise_for_status()
- return response
diff --git a/spaces/Goutam982/RVC_V2_voice_clone/utils.py b/spaces/Goutam982/RVC_V2_voice_clone/utils.py
deleted file mode 100644
index 62be8d03a8e8b839f8747310ef0ec0e82fb8ff0a..0000000000000000000000000000000000000000
--- a/spaces/Goutam982/RVC_V2_voice_clone/utils.py
+++ /dev/null
@@ -1,151 +0,0 @@
-import ffmpeg
-import numpy as np
-
-# import praatio
-# import praatio.praat_scripts
-import os
-import sys
-
-import random
-
-import csv
-
-platform_stft_mapping = {
- "linux": "stftpitchshift",
- "darwin": "stftpitchshift",
- "win32": "stftpitchshift.exe",
-}
-
-stft = platform_stft_mapping.get(sys.platform)
-# praatEXE = join('.',os.path.abspath(os.getcwd()) + r"\Praat.exe")
-
-
-def CSVutil(file, rw, type, *args):
- if type == "formanting":
- if rw == "r":
- with open(file) as fileCSVread:
- csv_reader = list(csv.reader(fileCSVread))
- return (
- (csv_reader[0][0], csv_reader[0][1], csv_reader[0][2])
- if csv_reader is not None
- else (lambda: exec('raise ValueError("No data")'))()
- )
- else:
- if args:
- doformnt = args[0]
- else:
- doformnt = False
- qfr = args[1] if len(args) > 1 else 1.0
- tmb = args[2] if len(args) > 2 else 1.0
- with open(file, rw, newline="") as fileCSVwrite:
- csv_writer = csv.writer(fileCSVwrite, delimiter=",")
- csv_writer.writerow([doformnt, qfr, tmb])
- elif type == "stop":
- stop = args[0] if args else False
- with open(file, rw, newline="") as fileCSVwrite:
- csv_writer = csv.writer(fileCSVwrite, delimiter=",")
- csv_writer.writerow([stop])
-
-
-def load_audio(file, sr, DoFormant, Quefrency, Timbre):
- converted = False
- DoFormant, Quefrency, Timbre = CSVutil("csvdb/formanting.csv", "r", "formanting")
- try:
- # https://github.com/openai/whisper/blob/main/whisper/audio.py#L26
- # This launches a subprocess to decode audio while down-mixing and resampling as necessary.
- # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
- file = (
- file.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
- ) # 防止小白拷路径头尾带了空格和"和回车
- file_formanted = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
-
- # print(f"dofor={bool(DoFormant)} timbr={Timbre} quef={Quefrency}\n")
-
- if (
- lambda DoFormant: True
- if DoFormant.lower() == "true"
- else (False if DoFormant.lower() == "false" else DoFormant)
- )(DoFormant):
- numerator = round(random.uniform(1, 4), 4)
- # os.system(f"stftpitchshift -i {file} -q {Quefrency} -t {Timbre} -o {file_formanted}")
- # print('stftpitchshift -i "%s" -p 1.0 --rms -w 128 -v 8 -q %s -t %s -o "%s"' % (file, Quefrency, Timbre, file_formanted))
-
- if not file.endswith(".wav"):
- if not os.path.isfile(f"{file_formanted}.wav"):
- converted = True
- # print(f"\nfile = {file}\n")
- # print(f"\nfile_formanted = {file_formanted}\n")
- converting = (
- ffmpeg.input(file_formanted, threads=0)
- .output(f"{file_formanted}.wav")
- .run(
- cmd=["ffmpeg", "-nostdin"],
- capture_stdout=True,
- capture_stderr=True,
- )
- )
- else:
- pass
-
- file_formanted = (
- f"{file_formanted}.wav"
- if not file_formanted.endswith(".wav")
- else file_formanted
- )
-
- print(f" · Formanting {file_formanted}...\n")
-
- os.system(
- '%s -i "%s" -q "%s" -t "%s" -o "%sFORMANTED_%s.wav"'
- % (
- stft,
- file_formanted,
- Quefrency,
- Timbre,
- file_formanted,
- str(numerator),
- )
- )
-
- print(f" · Formanted {file_formanted}!\n")
-
- # filepraat = (os.path.abspath(os.getcwd()) + '\\' + file).replace('/','\\')
- # file_formantedpraat = ('"' + os.path.abspath(os.getcwd()) + '/' + 'formanted'.join(file_formanted) + '"').replace('/','\\')
- # print("%sFORMANTED_%s.wav" % (file_formanted, str(numerator)))
-
- out, _ = (
- ffmpeg.input(
- "%sFORMANTED_%s.wav" % (file_formanted, str(numerator)), threads=0
- )
- .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr)
- .run(
- cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True
- )
- )
-
- try:
- os.remove("%sFORMANTED_%s.wav" % (file_formanted, str(numerator)))
- except Exception:
- pass
- print("couldn't remove formanted type of file")
-
- else:
- out, _ = (
- ffmpeg.input(file, threads=0)
- .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr)
- .run(
- cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True
- )
- )
- except Exception as e:
- raise RuntimeError(f"Failed to load audio: {e}")
-
- if converted:
- try:
- os.remove(file_formanted)
- except Exception:
- pass
- print("couldn't remove converted type of file")
- converted = False
-
- return np.frombuffer(out, np.float32).flatten()
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py
deleted file mode 100644
index e77a7fa8d6b8c1ad7fe293bc932d621464287e0c..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py
+++ /dev/null
@@ -1,5 +0,0 @@
-_base_ = [
- '../_base_/models/faster_rcnn_r50_fpn.py',
- '../_base_/datasets/coco_detection.py',
- '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
-]
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/fcn/fcn_d6_r101b-d16_769x769_80k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/fcn/fcn_d6_r101b-d16_769x769_80k_cityscapes.py
deleted file mode 100644
index e3d4d884fd0c92b35dd428a55ce22255cecac497..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/fcn/fcn_d6_r101b-d16_769x769_80k_cityscapes.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = './fcn_d6_r50b-d16_769x769_80k_cityscapes.py'
-model = dict(
- pretrained='torchvision://resnet101',
- backbone=dict(type='ResNet', depth=101))
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/exp/upernet_global_base/test.sh b/spaces/Gradio-Blocks/uniformer_image_segmentation/exp/upernet_global_base/test.sh
deleted file mode 100644
index d9a85e7a0d3b7c96b060f473d41254b37a382fcb..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/exp/upernet_global_base/test.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env bash
-
-work_path=$(dirname $0)
-PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \
-python -m torch.distributed.launch --nproc_per_node=8 \
- tools/test.py ${work_path}/test_config_h32.py \
- ${work_path}/ckpt/latest.pth \
- --launcher pytorch \
- --eval mIoU \
- 2>&1 | tee -a ${work_path}/log.txt
diff --git a/spaces/GroveStreet/GTA_SOVITS/vdecoder/hifigan/env.py b/spaces/GroveStreet/GTA_SOVITS/vdecoder/hifigan/env.py
deleted file mode 100644
index 2bdbc95d4f7a8bad8fd4f5eef657e2b51d946056..0000000000000000000000000000000000000000
--- a/spaces/GroveStreet/GTA_SOVITS/vdecoder/hifigan/env.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import os
-import shutil
-
-
-class AttrDict(dict):
- def __init__(self, *args, **kwargs):
- super(AttrDict, self).__init__(*args, **kwargs)
- self.__dict__ = self
-
-
-def build_env(config, config_name, path):
- t_path = os.path.join(path, config_name)
- if config != t_path:
- os.makedirs(path, exist_ok=True)
- shutil.copyfile(config, os.path.join(path, config_name))
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/distributed/module_proxy_wrapper.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/distributed/module_proxy_wrapper.py
deleted file mode 100644
index fc2c6f8c718f2ac8ece308e50f7ba74a05474f4a..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/distributed/module_proxy_wrapper.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from torch import nn
-
-
-class ModuleProxyWrapper(nn.Module):
- """
- Wrap a DistributedDataParallel module and forward requests for missing
- attributes to the module wrapped by DDP (the twice-wrapped module).
- Also forward calls to :func:`state_dict` and :func:`load_state_dict`.
-
- Usage::
-
- module.xyz = "hello world"
- wrapped_module = DistributedDataParallel(module, **ddp_args)
- wrapped_module = ModuleProxyWrapper(wrapped_module)
- assert wrapped_module.xyz == "hello world"
- assert wrapped_module.state_dict().keys() == module.state_dict().keys()
-
- Args:
- module (nn.Module): module to wrap
- """
-
- def __init__(self, module: nn.Module):
- super().__init__()
- assert hasattr(module, "module"), \
- "ModuleProxyWrapper expects input to wrap another module"
- self.module = module
-
- def __getattr__(self, name):
- """Forward missing attributes to twice-wrapped module."""
- try:
- # defer to nn.Module's logic
- return super().__getattr__(name)
- except AttributeError:
- try:
- # forward to the once-wrapped module
- return getattr(self.module, name)
- except AttributeError:
- # forward to the twice-wrapped module
- return getattr(self.module.module, name)
-
- def state_dict(self, *args, **kwargs):
- """Forward to the twice-wrapped module."""
- return self.module.module.state_dict(*args, **kwargs)
-
- def load_state_dict(self, *args, **kwargs):
- """Forward to the twice-wrapped module."""
- return self.module.module.load_state_dict(*args, **kwargs)
-
- def forward(self, *args, **kwargs):
- return self.module(*args, **kwargs)
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/bart/model.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/bart/model.py
deleted file mode 100644
index 71d0b27cd2c0655fe3b00479b672d6d042a4d5ed..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/bart/model.py
+++ /dev/null
@@ -1,384 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-"""
-BART: Denoising Sequence-to-Sequence Pre-training for
-Natural Language Generation, Translation, and Comprehension
-"""
-from typing import Optional
-
-import logging
-
-import torch
-import torch.nn as nn
-from fairseq import utils
-from fairseq.models import register_model, register_model_architecture
-from fairseq.models.transformer import TransformerModel
-from fairseq.modules.transformer_sentence_encoder import init_bert_params
-
-from .hub_interface import BARTHubInterface
-
-
-logger = logging.getLogger(__name__)
-
-
-@register_model("bart")
-class BARTModel(TransformerModel):
- __jit_unused_properties__ = ["supported_targets"]
-
- @classmethod
- def hub_models(cls):
- return {
- "bart.base": "http://dl.fbaipublicfiles.com/fairseq/models/bart.base.tar.gz",
- "bart.large": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz",
- "bart.large.mnli": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.mnli.tar.gz",
- "bart.large.cnn": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.cnn.tar.gz",
- "bart.large.xsum": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.xsum.tar.gz",
- }
-
- def __init__(self, args, encoder, decoder):
- super().__init__(args, encoder, decoder)
-
- # We follow BERT's random weight initialization
- self.apply(init_bert_params)
-
- self.classification_heads = nn.ModuleDict()
- if hasattr(self.encoder, "dictionary"):
- self.eos: int = self.encoder.dictionary.eos()
-
- @staticmethod
- def add_args(parser):
- super(BARTModel, BARTModel).add_args(parser)
- parser.add_argument(
- "--pooler-dropout",
- type=float,
- metavar="D",
- help="dropout probability in the masked_lm pooler layers",
- )
- parser.add_argument(
- "--pooler-activation-fn",
- choices=utils.get_available_activation_fns(),
- help="activation function to use for pooler layer",
- )
- parser.add_argument(
- "--spectral-norm-classification-head",
- action="store_true",
- help="Apply spectral normalization on the classification head",
- )
-
- @property
- def supported_targets(self):
- return {"self"}
-
- def forward(
- self,
- src_tokens,
- src_lengths,
- prev_output_tokens,
- features_only: bool = False,
- classification_head_name: Optional[str] = None,
- token_embeddings: Optional[torch.Tensor] = None,
- return_all_hiddens: bool = True,
- alignment_layer: Optional[int] = None,
- alignment_heads: Optional[int] = None,
- ):
- if classification_head_name is not None:
- features_only = True
-
- encoder_out = self.encoder(
- src_tokens,
- src_lengths=src_lengths,
- token_embeddings=token_embeddings,
- return_all_hiddens=return_all_hiddens
- )
- x, extra = self.decoder(
- prev_output_tokens,
- encoder_out=encoder_out,
- features_only=features_only,
- alignment_layer=alignment_layer,
- alignment_heads=alignment_heads,
- src_lengths=src_lengths,
- return_all_hiddens=return_all_hiddens,
- )
- eos: int = self.eos
- if classification_head_name is not None:
- sentence_representation = x[
- src_tokens.eq(eos), :
- ].view(x.size(0), -1, x.size(-1))[:, -1, :]
- for k, head in self.classification_heads.items():
- # for torch script only supports iteration
- if k == classification_head_name:
- x = head(sentence_representation)
- break
- return x, extra
-
- @classmethod
- def from_pretrained(
- cls,
- model_name_or_path,
- checkpoint_file="model.pt",
- data_name_or_path=".",
- bpe="gpt2",
- sample_break_mode="eos",
- **kwargs,
- ):
- from fairseq import hub_utils
-
- x = hub_utils.from_pretrained(
- model_name_or_path,
- checkpoint_file,
- data_name_or_path,
- archive_map=cls.hub_models(),
- bpe=bpe,
- load_checkpoint_heads=True,
- sample_break_mode=sample_break_mode,
- **kwargs,
- )
- return BARTHubInterface(x["args"], x["task"], x["models"][0])
-
- def register_classification_head(
- self, name, num_classes=None, inner_dim=None, **kwargs
- ):
- """Register a classification head."""
- logger.info("Registering classification head: {0}".format(name))
- if name in self.classification_heads:
- prev_num_classes = self.classification_heads[name].out_proj.out_features
- prev_inner_dim = self.classification_heads[name].dense.out_features
- if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
- logger.warning(
- 're-registering head "{}" with num_classes {} (prev: {}) '
- "and inner_dim {} (prev: {})".format(
- name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
- )
- )
- self.classification_heads[name] = BARTClassificationHead(
- input_dim=self.args.encoder_embed_dim,
- inner_dim=inner_dim or self.args.encoder_embed_dim,
- num_classes=num_classes,
- activation_fn=self.args.pooler_activation_fn,
- pooler_dropout=self.args.pooler_dropout,
- do_spectral_norm=getattr(
- self.args, "spectral_norm_classification_head", False
- ),
- )
-
- def upgrade_state_dict_named(self, state_dict, name):
- super().upgrade_state_dict_named(state_dict, name)
-
- prefix = name + "." if name != "" else ""
- current_head_names = (
- []
- if not hasattr(self, "classification_heads")
- else self.classification_heads.keys()
- )
-
- # Handle new classification heads present in the state dict.
- keys_to_delete = []
- for k in state_dict.keys():
- if not k.startswith(prefix + "classification_heads."):
- continue
-
- head_name = k[len(prefix + "classification_heads.") :].split(".")[0]
- num_classes = state_dict[
- prefix + "classification_heads." + head_name + ".out_proj.weight"
- ].size(0)
- inner_dim = state_dict[
- prefix + "classification_heads." + head_name + ".dense.weight"
- ].size(0)
-
- if getattr(self.args, "load_checkpoint_heads", False):
- if head_name not in current_head_names:
- self.register_classification_head(head_name, num_classes, inner_dim)
- else:
- if head_name not in current_head_names:
- logger.warning(
- "deleting classification head ({}) from checkpoint "
- "not present in current model: {}".format(head_name, k)
- )
- keys_to_delete.append(k)
- elif (
- num_classes
- != self.classification_heads[head_name].out_proj.out_features
- or inner_dim
- != self.classification_heads[head_name].dense.out_features
- ):
- logger.warning(
- "deleting classification head ({}) from checkpoint "
- "with different dimensions than current model: {}".format(
- head_name, k
- )
- )
- keys_to_delete.append(k)
- for k in keys_to_delete:
- del state_dict[k]
-
- def truncate_emb(key):
- if key in state_dict:
- state_dict[key] = state_dict[key][:-1, :]
-
- # When finetuning on translation task, remove last row of
- # embedding matrix that corresponds to mask_idx token.
- loaded_dict_size = state_dict["encoder.embed_tokens.weight"].size(0)
- if (
- loaded_dict_size == len(self.encoder.dictionary) + 1
- and "" not in self.encoder.dictionary
- ):
- truncate_emb("encoder.embed_tokens.weight")
- truncate_emb("decoder.embed_tokens.weight")
- truncate_emb("encoder.output_projection.weight")
- truncate_emb("decoder.output_projection.weight")
-
- # When continued pretraining on new set of languages for mbart,
- # add extra lang embeddings at the end of embed_tokens.
- # Note: newly added languages are assumed to have been added at the end.
- if self.args.task == "multilingual_denoising" and loaded_dict_size < len(
- self.encoder.dictionary
- ):
- logger.info(
- "Adding extra language embeddings not found in pretrained model for "
- "continued pretraining of MBART on new set of languages."
- )
- loaded_mask_token_embedding = state_dict["encoder.embed_tokens.weight"][
- -1, :
- ]
-
- num_langids_to_add = len(self.encoder.dictionary) - loaded_dict_size
- embed_dim = state_dict["encoder.embed_tokens.weight"].size(1)
-
- new_lang_embed_to_add = torch.zeros(num_langids_to_add, embed_dim)
- nn.init.normal_(new_lang_embed_to_add, mean=0, std=embed_dim ** -0.5)
- new_lang_embed_to_add = new_lang_embed_to_add.to(
- dtype=state_dict["encoder.embed_tokens.weight"].dtype,
- )
-
- state_dict["encoder.embed_tokens.weight"] = torch.cat(
- [
- state_dict["encoder.embed_tokens.weight"][
- : loaded_dict_size - 1, :
- ],
- new_lang_embed_to_add,
- loaded_mask_token_embedding.unsqueeze(0),
- ]
- )
- state_dict["decoder.embed_tokens.weight"] = torch.cat(
- [
- state_dict["decoder.embed_tokens.weight"][
- : loaded_dict_size - 1, :
- ],
- new_lang_embed_to_add,
- loaded_mask_token_embedding.unsqueeze(0),
- ]
- )
-
- # Copy any newly-added classification heads into the state dict
- # with their current weights.
- if hasattr(self, "classification_heads"):
- cur_state = self.classification_heads.state_dict()
- for k, v in cur_state.items():
- if prefix + "classification_heads." + k not in state_dict:
- logger.info("Overwriting " + prefix + "classification_heads." + k)
- state_dict[prefix + "classification_heads." + k] = v
-
-
-class BARTClassificationHead(nn.Module):
- """Head for sentence-level classification tasks."""
-
- def __init__(
- self,
- input_dim,
- inner_dim,
- num_classes,
- activation_fn,
- pooler_dropout,
- do_spectral_norm=False,
- ):
- super().__init__()
- self.dense = nn.Linear(input_dim, inner_dim)
- self.activation_fn = utils.get_activation_fn(activation_fn)
- self.dropout = nn.Dropout(p=pooler_dropout)
- self.out_proj = nn.Linear(inner_dim, num_classes)
-
- if do_spectral_norm:
- self.out_proj = torch.nn.utils.spectral_norm(self.out_proj)
-
- def forward(self, features, **kwargs):
- x = features
- x = self.dropout(x)
- x = self.dense(x)
- x = self.activation_fn(x)
- x = self.dropout(x)
- x = self.out_proj(x)
- return x
-
-
-@register_model_architecture("bart", "bart_large")
-def bart_large_architecture(args):
- args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
- args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 1024)
- args.encoder_layers = getattr(args, "encoder_layers", 12)
- args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
- args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
- args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
- args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
- args.decoder_ffn_embed_dim = getattr(
- args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
- )
- args.decoder_layers = getattr(args, "decoder_layers", 12)
- args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
- args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
- args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True)
- args.attention_dropout = getattr(args, "attention_dropout", 0.0)
- args.relu_dropout = getattr(args, "relu_dropout", 0.0)
- args.dropout = getattr(args, "dropout", 0.1)
- args.max_target_positions = getattr(args, "max_target_positions", 1024)
- args.max_source_positions = getattr(args, "max_source_positions", 1024)
- args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
- args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
- args.share_decoder_input_output_embed = getattr(
- args, "share_decoder_input_output_embed", True
- )
- args.share_all_embeddings = getattr(args, "share_all_embeddings", True)
-
- args.decoder_output_dim = getattr(
- args, "decoder_output_dim", args.decoder_embed_dim
- )
- args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
-
- args.no_scale_embedding = getattr(args, "no_scale_embedding", True)
- args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
-
- args.activation_fn = getattr(args, "activation_fn", "gelu")
- args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
- args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
-
-
-@register_model_architecture("bart", "bart_base")
-def bart_base_architecture(args):
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
- args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 768)
- args.encoder_layers = getattr(args, "encoder_layers", 6)
- args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
- args.decoder_layers = getattr(args, "decoder_layers", 6)
- args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12)
- bart_large_architecture(args)
-
-
-@register_model_architecture("bart", "mbart_large")
-def mbart_large_architecture(args):
- args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
- bart_large_architecture(args)
-
-
-@register_model_architecture("bart", "mbart_base")
-def mbart_base_architecture(args):
- args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
- bart_base_architecture(args)
-
-
-@register_model_architecture("bart", "mbart_base_wmt20")
-def mbart_base_wmt20_architecture(args):
- args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
- mbart_base_architecture(args)
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/utils/cider/pyciderevalcap/ciderD/ciderD.py b/spaces/HarryLee/eCommerceImageCaptioning/utils/cider/pyciderevalcap/ciderD/ciderD.py
deleted file mode 100644
index 280f9890312a76b54695b2a8c456c5d52a87e186..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/utils/cider/pyciderevalcap/ciderD/ciderD.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# Filename: ciderD.py
-#
-# Description: Describes the class to compute the CIDEr-D (Consensus-Based Image Description Evaluation) Metric
-# by Vedantam, Zitnick, and Parikh (http://arxiv.org/abs/1411.5726)
-#
-# Creation Date: Sun Feb 8 14:16:54 2015
-#
-# Authors: Ramakrishna Vedantam and Tsung-Yi Lin
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from .ciderD_scorer import CiderScorer
-import pdb
-
-class CiderD:
- """
- Main Class to compute the CIDEr metric
-
- """
- def __init__(self, n=4, sigma=6.0, df="corpus"):
- # set cider to sum over 1 to 4-grams
- self._n = n
- # set the standard deviation parameter for gaussian penalty
- self._sigma = sigma
- # set which where to compute document frequencies from
- self._df = df
- self.cider_scorer = CiderScorer(n=self._n, df_mode=self._df)
-
- def compute_score(self, gts, res):
- """
- Main function to compute CIDEr score
- :param hypo_for_image (dict) : dictionary with key and value
- ref_for_image (dict) : dictionary with key and value
- :return: cider (float) : computed CIDEr score for the corpus
- """
-
- # clear all the previous hypos and refs
- tmp_cider_scorer = self.cider_scorer.copy_empty()
- tmp_cider_scorer.clear()
- for res_id in res:
-
- hypo = res_id['caption']
- ref = gts[res_id['image_id']]
-
- # Sanity check.
- assert(type(hypo) is list)
- assert(len(hypo) == 1)
- assert(type(ref) is list)
- assert(len(ref) > 0)
- tmp_cider_scorer += (hypo[0], ref)
-
- (score, scores) = tmp_cider_scorer.compute_score()
-
- return score, scores
-
- def method(self):
- return "CIDEr-D"
diff --git a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/tts_infer/example_inference.py b/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/tts_infer/example_inference.py
deleted file mode 100644
index 676718fff3c6a7120cea91b0cfc95f8872929da7..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/tts_infer/example_inference.py
+++ /dev/null
@@ -1,79 +0,0 @@
-''' Example file to test tts_infer after installing it. Refer to section 1.1 in README.md for steps of installation. '''
-
-from tts_infer.tts import TextToMel, MelToWav
-from tts_infer.transliterate import XlitEngine
-from tts_infer.num_to_word_on_sent import normalize_nums
-
-import re
-import numpy as np
-from scipy.io.wavfile import write
-
-from mosestokenizer import *
-from indicnlp.tokenize import sentence_tokenize
-
-INDIC = ["as", "bn", "gu", "hi", "kn", "ml", "mr", "or", "pa", "ta", "te"]
-
-def split_sentences(paragraph, language):
- if language == "en":
- with MosesSentenceSplitter(language) as splitter:
- return splitter([paragraph])
- elif language in INDIC:
- return sentence_tokenize.sentence_split(paragraph, lang=language)
-
-
-device='cpu'
-text_to_mel = TextToMel(glow_model_dir='/path/to/glow_ckp', device=device)
-mel_to_wav = MelToWav(hifi_model_dir='/path/to/hifi_ckp', device=device)
-
-lang='hi' # transliteration from En to Hi
-engine = XlitEngine(lang) # loading translit model globally
-
-def translit(text, lang):
- reg = re.compile(r'[a-zA-Z]')
- words = [engine.translit_word(word, topk=1)[lang][0] if reg.match(word) else word for word in text.split()]
- updated_sent = ' '.join(words)
- return updated_sent
-
-def run_tts(text, lang):
- text = text.replace('।', '.') # only for hindi models
- text_num_to_word = normalize_nums(text, lang) # converting numbers to words in lang
- text_num_to_word_and_transliterated = translit(text_num_to_word, lang) # transliterating english words to lang
- final_text = ' ' + text_num_to_word_and_transliterated
-
- mel = text_to_mel.generate_mel(final_text)
- audio, sr = mel_to_wav.generate_wav(mel)
- write(filename='temp.wav', rate=sr, data=audio) # for saving wav file, if needed
- return (sr, audio)
-
-def run_tts_paragraph(text, lang):
- audio_list = []
- split_sentences_list = split_sentences(text, language='hi')
-
- for sent in split_sentences_list:
- sr, audio = run_tts(sent, lang)
- audio_list.append(audio)
-
- concatenated_audio = np.concatenate([i for i in audio_list])
- write(filename='temp_long.wav', rate=sr, data=concatenated_audio)
- return (sr, concatenated_audio)
-
-if __name__ == "__main__":
- _, audio = run_tts('mera naam neeraj hai', 'hi')
-
- para = '''
- भारत मेरा देश है और मुझे भारतीय होने पर गर्व है। ये विश्व का सातवाँ सबसे बड़ा और विश्व में दूसरा सबसे अधिक जनसंख्या वाला देश है।
- इसे भारत, हिन्दुस्तान और आर्यव्रत के नाम से भी जाना जाता है। ये एक प्रायद्वीप है जो पूरब में बंगाल की खाड़ी,
- पश्चिम में अरेबियन सागर और दक्षिण में भारतीय महासागर जैसे तीन महासगरों से घिरा हुआ है।
- भारत का राष्ट्रीय पशु चीता, राष्ट्रीय पक्षी मोर, राष्ट्रीय फूल कमल, और राष्ट्रीय फल आम है।
- भारत मेरा देश है और मुझे भारतीय होने पर गर्व है। ये विश्व का सातवाँ सबसे बड़ा और विश्व में दूसरा सबसे अधिक जनसंख्या वाला देश है।
- इसे भारत, हिन्दुस्तान और आर्यव्रत के नाम से भी जाना जाता है। ये एक प्रायद्वीप है जो पूरब में बंगाल की खाड़ी,
- पश्चिम में अरेबियन सागर और दक्षिण में भारतीय महासागर जैसे तीन महासगरों से घिरा हुआ है।
- भारत का राष्ट्रीय पशु चीता, राष्ट्रीय पक्षी मोर, राष्ट्रीय फूल कमल, और राष्ट्रीय फल आम है।
- भारत मेरा देश है और मुझे भारतीय होने पर गर्व है। ये विश्व का सातवाँ सबसे बड़ा और विश्व में दूसरा सबसे अधिक जनसंख्या वाला देश है।
- इसे भारत, हिन्दुस्तान और आर्यव्रत के नाम से भी जाना जाता है। ये एक प्रायद्वीप है जो पूरब में बंगाल की खाड़ी,
- पश्चिम में अरेबियन सागर और दक्षिण में भारतीय महासागर जैसे तीन महासगरों से घिरा हुआ है।
- भारत का राष्ट्रीय पशु चीता, राष्ट्रीय पक्षी मोर, राष्ट्रीय फूल कमल, और राष्ट्रीय फल आम है।
- '''
-
- print('Num chars in paragraph: ', len(para))
- _, audio_long = run_tts_paragraph(para, 'hi')
diff --git a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/utils/inference/run_gradio.py b/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/utils/inference/run_gradio.py
deleted file mode 100644
index 19aa3218dae66961e268c458c4626f18dd20021a..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/utils/inference/run_gradio.py
+++ /dev/null
@@ -1,60 +0,0 @@
-import gradio as gr
-import argparse
-import numpy as np
-from argparse import Namespace
-from .advanced_tts import load_all_models, run_tts_paragraph
-
-
-def hit_tts(textbox, gender, slider_noise_scale, slider_length_sclae, choice_transliteration, choice_number_conversion, choice_split_sentences):
- inputs_to_gradio = {'text' : textbox,
- 'gender' : gender,
- 'noise_scale': slider_noise_scale,
- 'length_scale': slider_length_sclae,
- 'transliteration' : 1 if choice_transliteration else 0,
- 'number_conversion' : 1 if choice_number_conversion else 0,
- 'split_sentences' : 1 if choice_split_sentences else 0
- }
-
- args = Namespace(**inputs_to_gradio)
- args.wav = None
- args.lang = lang
- args.gender = gender
-
- if args.text:
- sr, audio = run_tts_paragraph(args)
- return (sr, audio)
-
-def build_gradio(args):
- global lang
- lang = args.lang
- load_all_models(args)
- textbox = gr.inputs.Textbox(placeholder="Enter Text to run", default="", label="Enter Input Text")
- gender = gr.inputs.Radio(choices = ['Female', 'Male'], default='Female', label='Gender')
- slider_noise_scale = gr.inputs.Slider(minimum=0, maximum=1.0, step=0.001, default=0.667, label='Noise Scale')
- slider_length_sclae = gr.inputs.Slider(minimum=0, maximum=2.0, step=0.1, default=1.0, label='Length Scale')
-
- choice_transliteration = gr.inputs.Checkbox(default=True, label="Transliteration")
- choice_number_conversion = gr.inputs.Checkbox(default=True, label="Number Conversion")
- choice_split_sentences = gr.inputs.Checkbox(default=True, label="Split Sentences")
-
- examples = [['ഇന്ത്യ എന്റെ രാജ്യമാണ്, ഒരു ഇന്ത്യക്കാരനായതിൽ ഞാൻ അഭിമാനിക്കുന്നു.', 'Male', 0.667, 1, 0, 1, 1]]
-
- op = gr.outputs.Audio(type="numpy", label=None)
-
- inputs_to_gradio = [textbox, gender, slider_noise_scale, slider_length_sclae, choice_transliteration, choice_number_conversion, choice_split_sentences]
- iface = gr.Interface(fn=hit_tts, examples = examples, inputs=inputs_to_gradio, outputs=op, theme='huggingface', title='Vakyansh Malayalam TTS', article = 'Note: Transliteration models may not work well in some scenarios which can hamper the TTS quality, to evaluate the model in better sense it is advisable to provide input in the required language and switch off transliteration. Contact @harveenchadha on twitter for any issues.')
- iface.launch(enable_queue=True)
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("-a", "--acoustic", required=True, type=str)
- parser.add_argument("-v", "--vocoder", required=True, type=str)
- parser.add_argument("-d", "--device", type=str, default="cpu")
- parser.add_argument("-L", "--lang", type=str, required=True)
-
- global lang
-
- args = parser.parse_args()
- lang = args.lang
-
- build_gradio(args)
\ No newline at end of file
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/criterions/wav2vec_criterion.py b/spaces/ICML2022/OFA/fairseq/fairseq/criterions/wav2vec_criterion.py
deleted file mode 100644
index e04786cc3b75517cefd06303f98f8536f9279311..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/criterions/wav2vec_criterion.py
+++ /dev/null
@@ -1,229 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-from dataclasses import dataclass, field
-from typing import List, Optional
-
-import torch
-import torch.nn.functional as F
-from fairseq import metrics, utils
-from fairseq.criterions import FairseqCriterion, register_criterion
-from fairseq.dataclass import FairseqDataclass
-from fairseq.logging.meters import safe_round
-from fairseq.utils import is_xla_tensor
-
-
-@dataclass
-class Wav2VecCriterionConfig(FairseqDataclass):
- infonce: bool = field(
- default=False,
- metadata={
- "help": "if set, uses cross entropy instead of binary cross entropy (i.e. InfoNCE loss)"
- },
- )
- loss_weights: Optional[List[float]] = field(
- default=None,
- metadata={"help": "weights for additional loss terms (not first one)"},
- )
- log_keys: List[str] = field(
- default_factory=lambda: [],
- metadata={"help": "output keys to log"},
- )
-
-@register_criterion("wav2vec", dataclass=Wav2VecCriterionConfig)
-class Wav2vecCriterion(FairseqCriterion):
- def __init__(self, task, infonce=False, loss_weights=None, log_keys=None):
- super().__init__(task)
- self.infonce = infonce
- self.loss_weights = loss_weights
- self.log_keys = [] if log_keys is None else log_keys
-
- def forward(self, model, sample, reduce=True):
- """Compute the loss for the given sample.
-
- Returns a tuple with three elements:
- 1) the loss
- 2) the sample size, which is used as the denominator for the gradient
- 3) logging outputs to display while training
- """
- net_output = model(**sample["net_input"])
- logits = model.get_logits(net_output).float()
- target = model.get_targets(sample, net_output)
- self.xla = is_xla_tensor(logits)
-
- # XXX: handle weights on xla.
- weights = None
- if hasattr(model, "get_target_weights") and not self.infonce:
- weights = model.get_target_weights(target, net_output)
- if torch.is_tensor(weights):
- weights = weights.float()
-
- losses = []
-
- reduction = "none" if ((not reduce) or self.xla) else "sum"
- if self.infonce:
- loss = F.cross_entropy(logits, target, reduction=reduction)
- else:
- loss = F.binary_cross_entropy_with_logits(
- logits, target.float(), weights, reduction=reduction
- )
-
- if self.xla:
- # tpu-comment: since dynamic shapes lead to recompilations on xla,
- # we don't shrink tensors using mask_indices.
- # Instead, we use mask indices to adjust loss.
- mi = (
- sample['net_input']['mask_indices']
- .transpose(0, 1) # logits are transposed in `model.get_logits`
- .reshape(logits.size(0))
- )
- loss = (loss * mi).sum() if reduce else (loss * mi)
-
- if 'sample_size' in sample:
- sample_size = sample['sample_size']
- elif 'mask_indices' in sample['net_input']:
- sample_size = sample['net_input']['mask_indices'].sum()
- else:
- sample_size = target.numel() if self.infonce else target.long().sum().item()
- losses.append(loss.detach().clone())
-
- if self.loss_weights is not None:
- assert hasattr(model, "get_extra_losses")
- extra_losses = model.get_extra_losses(net_output)
- if torch.is_tensor(extra_losses):
- extra_losses = [extra_losses]
- if len(self.loss_weights) == 1 and len(extra_losses) != 1:
- self.loss_weights = [self.loss_weights[0]] * len(extra_losses)
- assert len(extra_losses) == len(
- self.loss_weights
- ), f"{len(extra_losses)}, {len(self.loss_weights)}"
- for p, coef in zip(extra_losses, self.loss_weights):
- if coef != 0 and p is not None:
- p = coef * p.float() * sample_size
- loss += p
- losses.append(p)
-
- logging_output = {
- "loss": loss.item() if (reduce and not self.xla) else loss.detach(),
- "ntokens": sample_size,
- "nsentences": sample["id"].numel(),
- "sample_size": sample_size,
- }
-
- for lk in self.log_keys:
- # Only store "logits" and "target" for computing MAP and MAUC
- # during validation
- if lk == "logits":
- if not self.training:
- logging_output["logits"] = logits.cpu().numpy()
- elif lk == "target":
- if not self.training:
- # If the targets have been mixed with the predictions of
- # teacher models, find the original targets
- if hasattr(model, "get_original_targets"):
- original_target = model.get_original_targets(sample, net_output)
- else:
- original_target = target
- logging_output["target"] = original_target.cpu().numpy()
- elif lk in net_output:
- value = net_output[lk]
- if not is_xla_tensor(value):
- value = float(value)
- logging_output[lk] = value
-
- if len(losses) > 1:
- for i, l in enumerate(losses):
- logging_output[f"loss_{i}"] = l.item() if not self.xla else l.detach()
-
- if self.infonce:
- with torch.no_grad():
- if logits.numel() == 0:
- corr = 0
- count = 0
- else:
- assert logits.dim() > 1, logits.shape
- max = logits.argmax(-1) == 0
- min = logits.argmin(-1) == 0
- if is_xla_tensor(logits):
- max, min = max * mi, min * mi
- both = max & min
- corr = max.long().sum() - both.long().sum()
- count = mi.sum()
- else:
- both = max & min
- corr = max.long().sum().item() - both.long().sum().item()
- count = float(max.numel())
-
- logging_output["correct"] = corr
- logging_output["count"] = count
-
- return loss, sample_size, logging_output
-
- @staticmethod
- def reduce_metrics(logging_outputs) -> None:
- """Aggregate logging outputs from data parallel training."""
- loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
- ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
- nsentences = utils.item(
- sum(log.get("nsentences", 0) for log in logging_outputs)
- )
- sample_size = utils.item(
- sum(log.get("sample_size", 0) for log in logging_outputs)
- )
-
- metrics.log_scalar(
- "loss", loss_sum / (sample_size or 1) / math.log(2), sample_size, round=3
- )
- metrics.log_scalar("ntokens", ntokens)
- metrics.log_scalar("nsentences", nsentences)
-
- correct = sum(log.get("correct", 0) for log in logging_outputs)
- metrics.log_scalar("_correct", correct)
-
- total = sum(log.get("count", 0) for log in logging_outputs)
- metrics.log_scalar("_total", total)
-
- if total > 0:
- metrics.log_derived(
- "accuracy",
- lambda meters: safe_round(
- meters["_correct"].sum / meters["_total"].sum, 5
- )
- if meters["_total"].sum > 0
- else float("nan"),
- )
-
- builtin_keys = {
- "loss",
- "ntokens",
- "nsentences",
- "sample_size",
- "correct",
- "count",
- }
-
- for k in logging_outputs[0]:
- if k not in builtin_keys:
- val = sum(log.get(k, 0) for log in logging_outputs)
- if k.startswith("loss"):
- metrics.log_scalar(
- k, val / (sample_size or 1) / math.log(2), sample_size, round=3
- )
- else:
- metrics.log_scalar(k, val / len(logging_outputs), round=3)
-
- # FIXME: revert when gather based xla reduction is implemented
- #@staticmethod
- #def logging_outputs_can_be_summed() -> bool:
- def logging_outputs_can_be_summed(self) -> bool:
- """
- Whether the logging outputs returned by `forward` can be summed
- across workers prior to calling `reduce_metrics`. Setting this
- to True will improves distributed training speed.
- """
- # XXX: Gather based reduction not implemented for xla yet.
- # So we fall to sum based reduction for xla.
- return self.xla
diff --git a/spaces/IDEA-Research/Grounded-SAM/segment_anything/segment_anything/utils/amg.py b/spaces/IDEA-Research/Grounded-SAM/segment_anything/segment_anything/utils/amg.py
deleted file mode 100644
index 3a137778e45c464c079658ecb87ec53270e789f7..0000000000000000000000000000000000000000
--- a/spaces/IDEA-Research/Grounded-SAM/segment_anything/segment_anything/utils/amg.py
+++ /dev/null
@@ -1,346 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import numpy as np
-import torch
-
-import math
-from copy import deepcopy
-from itertools import product
-from typing import Any, Dict, Generator, ItemsView, List, Tuple
-
-
-class MaskData:
- """
- A structure for storing masks and their related data in batched format.
- Implements basic filtering and concatenation.
- """
-
- def __init__(self, **kwargs) -> None:
- for v in kwargs.values():
- assert isinstance(
- v, (list, np.ndarray, torch.Tensor)
- ), "MaskData only supports list, numpy arrays, and torch tensors."
- self._stats = dict(**kwargs)
-
- def __setitem__(self, key: str, item: Any) -> None:
- assert isinstance(
- item, (list, np.ndarray, torch.Tensor)
- ), "MaskData only supports list, numpy arrays, and torch tensors."
- self._stats[key] = item
-
- def __delitem__(self, key: str) -> None:
- del self._stats[key]
-
- def __getitem__(self, key: str) -> Any:
- return self._stats[key]
-
- def items(self) -> ItemsView[str, Any]:
- return self._stats.items()
-
- def filter(self, keep: torch.Tensor) -> None:
- for k, v in self._stats.items():
- if v is None:
- self._stats[k] = None
- elif isinstance(v, torch.Tensor):
- self._stats[k] = v[torch.as_tensor(keep, device=v.device)]
- elif isinstance(v, np.ndarray):
- self._stats[k] = v[keep.detach().cpu().numpy()]
- elif isinstance(v, list) and keep.dtype == torch.bool:
- self._stats[k] = [a for i, a in enumerate(v) if keep[i]]
- elif isinstance(v, list):
- self._stats[k] = [v[i] for i in keep]
- else:
- raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
-
- def cat(self, new_stats: "MaskData") -> None:
- for k, v in new_stats.items():
- if k not in self._stats or self._stats[k] is None:
- self._stats[k] = deepcopy(v)
- elif isinstance(v, torch.Tensor):
- self._stats[k] = torch.cat([self._stats[k], v], dim=0)
- elif isinstance(v, np.ndarray):
- self._stats[k] = np.concatenate([self._stats[k], v], axis=0)
- elif isinstance(v, list):
- self._stats[k] = self._stats[k] + deepcopy(v)
- else:
- raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
-
- def to_numpy(self) -> None:
- for k, v in self._stats.items():
- if isinstance(v, torch.Tensor):
- self._stats[k] = v.detach().cpu().numpy()
-
-
-def is_box_near_crop_edge(
- boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0
-) -> torch.Tensor:
- """Filter masks at the edge of a crop, but not at the edge of the original image."""
- crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
- orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
- boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
- near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
- near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
- near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
- return torch.any(near_crop_edge, dim=1)
-
-
-def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:
- box_xywh = deepcopy(box_xyxy)
- box_xywh[2] = box_xywh[2] - box_xywh[0]
- box_xywh[3] = box_xywh[3] - box_xywh[1]
- return box_xywh
-
-
-def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
- assert len(args) > 0 and all(
- len(a) == len(args[0]) for a in args
- ), "Batched iteration must have inputs of all the same size."
- n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
- for b in range(n_batches):
- yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]
-
-
-def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
- """
- Encodes masks to an uncompressed RLE, in the format expected by
- pycoco tools.
- """
- # Put in fortran order and flatten h,w
- b, h, w = tensor.shape
- tensor = tensor.permute(0, 2, 1).flatten(1)
-
- # Compute change indices
- diff = tensor[:, 1:] ^ tensor[:, :-1]
- change_indices = diff.nonzero()
-
- # Encode run length
- out = []
- for i in range(b):
- cur_idxs = change_indices[change_indices[:, 0] == i, 1]
- cur_idxs = torch.cat(
- [
- torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),
- cur_idxs + 1,
- torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),
- ]
- )
- btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
- counts = [] if tensor[i, 0] == 0 else [0]
- counts.extend(btw_idxs.detach().cpu().tolist())
- out.append({"size": [h, w], "counts": counts})
- return out
-
-
-def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
- """Compute a binary mask from an uncompressed RLE."""
- h, w = rle["size"]
- mask = np.empty(h * w, dtype=bool)
- idx = 0
- parity = False
- for count in rle["counts"]:
- mask[idx : idx + count] = parity
- idx += count
- parity ^= True
- mask = mask.reshape(w, h)
- return mask.transpose() # Put in C order
-
-
-def area_from_rle(rle: Dict[str, Any]) -> int:
- return sum(rle["counts"][1::2])
-
-
-def calculate_stability_score(
- masks: torch.Tensor, mask_threshold: float, threshold_offset: float
-) -> torch.Tensor:
- """
- Computes the stability score for a batch of masks. The stability
- score is the IoU between the binary masks obtained by thresholding
- the predicted mask logits at high and low values.
- """
- # One mask is always contained inside the other.
- # Save memory by preventing unnecesary cast to torch.int64
- intersections = (
- (masks > (mask_threshold + threshold_offset))
- .sum(-1, dtype=torch.int16)
- .sum(-1, dtype=torch.int32)
- )
- unions = (
- (masks > (mask_threshold - threshold_offset))
- .sum(-1, dtype=torch.int16)
- .sum(-1, dtype=torch.int32)
- )
- return intersections / unions
-
-
-def build_point_grid(n_per_side: int) -> np.ndarray:
- """Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
- offset = 1 / (2 * n_per_side)
- points_one_side = np.linspace(offset, 1 - offset, n_per_side)
- points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
- points_y = np.tile(points_one_side[:, None], (1, n_per_side))
- points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
- return points
-
-
-def build_all_layer_point_grids(
- n_per_side: int, n_layers: int, scale_per_layer: int
-) -> List[np.ndarray]:
- """Generates point grids for all crop layers."""
- points_by_layer = []
- for i in range(n_layers + 1):
- n_points = int(n_per_side / (scale_per_layer**i))
- points_by_layer.append(build_point_grid(n_points))
- return points_by_layer
-
-
-def generate_crop_boxes(
- im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float
-) -> Tuple[List[List[int]], List[int]]:
- """
- Generates a list of crop boxes of different sizes. Each layer
- has (2**i)**2 boxes for the ith layer.
- """
- crop_boxes, layer_idxs = [], []
- im_h, im_w = im_size
- short_side = min(im_h, im_w)
-
- # Original image
- crop_boxes.append([0, 0, im_w, im_h])
- layer_idxs.append(0)
-
- def crop_len(orig_len, n_crops, overlap):
- return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))
-
- for i_layer in range(n_layers):
- n_crops_per_side = 2 ** (i_layer + 1)
- overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
-
- crop_w = crop_len(im_w, n_crops_per_side, overlap)
- crop_h = crop_len(im_h, n_crops_per_side, overlap)
-
- crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]
- crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]
-
- # Crops in XYWH format
- for x0, y0 in product(crop_box_x0, crop_box_y0):
- box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]
- crop_boxes.append(box)
- layer_idxs.append(i_layer + 1)
-
- return crop_boxes, layer_idxs
-
-
-def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
- x0, y0, _, _ = crop_box
- offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
- # Check if boxes has a channel dimension
- if len(boxes.shape) == 3:
- offset = offset.unsqueeze(1)
- return boxes + offset
-
-
-def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
- x0, y0, _, _ = crop_box
- offset = torch.tensor([[x0, y0]], device=points.device)
- # Check if points has a channel dimension
- if len(points.shape) == 3:
- offset = offset.unsqueeze(1)
- return points + offset
-
-
-def uncrop_masks(
- masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int
-) -> torch.Tensor:
- x0, y0, x1, y1 = crop_box
- if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
- return masks
- # Coordinate transform masks
- pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)
- pad = (x0, pad_x - x0, y0, pad_y - y0)
- return torch.nn.functional.pad(masks, pad, value=0)
-
-
-def remove_small_regions(
- mask: np.ndarray, area_thresh: float, mode: str
-) -> Tuple[np.ndarray, bool]:
- """
- Removes small disconnected regions and holes in a mask. Returns the
- mask and an indicator of if the mask has been modified.
- """
- import cv2 # type: ignore
-
- assert mode in ["holes", "islands"]
- correct_holes = mode == "holes"
- working_mask = (correct_holes ^ mask).astype(np.uint8)
- n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)
- sizes = stats[:, -1][1:] # Row 0 is background label
- small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]
- if len(small_regions) == 0:
- return mask, False
- fill_labels = [0] + small_regions
- if not correct_holes:
- fill_labels = [i for i in range(n_labels) if i not in fill_labels]
- # If every region is below threshold, keep largest
- if len(fill_labels) == 0:
- fill_labels = [int(np.argmax(sizes)) + 1]
- mask = np.isin(regions, fill_labels)
- return mask, True
-
-
-def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:
- from pycocotools import mask as mask_utils # type: ignore
-
- h, w = uncompressed_rle["size"]
- rle = mask_utils.frPyObjects(uncompressed_rle, h, w)
- rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json
- return rle
-
-
-def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
- """
- Calculates boxes in XYXY format around masks. Return [0,0,0,0] for
- an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.
- """
- # torch.max below raises an error on empty inputs, just skip in this case
- if torch.numel(masks) == 0:
- return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
-
- # Normalize shape to CxHxW
- shape = masks.shape
- h, w = shape[-2:]
- if len(shape) > 2:
- masks = masks.flatten(0, -3)
- else:
- masks = masks.unsqueeze(0)
-
- # Get top and bottom edges
- in_height, _ = torch.max(masks, dim=-1)
- in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]
- bottom_edges, _ = torch.max(in_height_coords, dim=-1)
- in_height_coords = in_height_coords + h * (~in_height)
- top_edges, _ = torch.min(in_height_coords, dim=-1)
-
- # Get left and right edges
- in_width, _ = torch.max(masks, dim=-2)
- in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]
- right_edges, _ = torch.max(in_width_coords, dim=-1)
- in_width_coords = in_width_coords + w * (~in_width)
- left_edges, _ = torch.min(in_width_coords, dim=-1)
-
- # If the mask is empty the right edge will be to the left of the left edge.
- # Replace these boxes with [0, 0, 0, 0]
- empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
- out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
- out = out * (~empty_filter).unsqueeze(-1)
-
- # Return to original shape
- if len(shape) > 2:
- out = out.reshape(*shape[:-2], 4)
- else:
- out = out[0]
-
- return out
diff --git a/spaces/Illumotion/Koboldcpp/examples/console.h b/spaces/Illumotion/Koboldcpp/examples/console.h
deleted file mode 100644
index ec175269b9d8af48803d0b6e618d008a9ab99b4d..0000000000000000000000000000000000000000
--- a/spaces/Illumotion/Koboldcpp/examples/console.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// Console functions
-
-#pragma once
-
-#include
-
-namespace console {
- enum display_t {
- reset = 0,
- prompt,
- user_input,
- error
- };
-
- void init(bool use_simple_io, bool use_advanced_display);
- void cleanup();
- void set_display(display_t display);
- bool readline(std::string & line, bool multiline_input);
-}
diff --git a/spaces/Jackflack09/diffuse-custom/diffusers/models/resnet_flax.py b/spaces/Jackflack09/diffuse-custom/diffusers/models/resnet_flax.py
deleted file mode 100644
index 632780378ee0e8fa49404ecae470146250270ce5..0000000000000000000000000000000000000000
--- a/spaces/Jackflack09/diffuse-custom/diffusers/models/resnet_flax.py
+++ /dev/null
@@ -1,124 +0,0 @@
-# Copyright 2022 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import flax.linen as nn
-import jax
-import jax.numpy as jnp
-
-
-class FlaxUpsample2D(nn.Module):
- out_channels: int
- dtype: jnp.dtype = jnp.float32
-
- def setup(self):
- self.conv = nn.Conv(
- self.out_channels,
- kernel_size=(3, 3),
- strides=(1, 1),
- padding=((1, 1), (1, 1)),
- dtype=self.dtype,
- )
-
- def __call__(self, hidden_states):
- batch, height, width, channels = hidden_states.shape
- hidden_states = jax.image.resize(
- hidden_states,
- shape=(batch, height * 2, width * 2, channels),
- method="nearest",
- )
- hidden_states = self.conv(hidden_states)
- return hidden_states
-
-
-class FlaxDownsample2D(nn.Module):
- out_channels: int
- dtype: jnp.dtype = jnp.float32
-
- def setup(self):
- self.conv = nn.Conv(
- self.out_channels,
- kernel_size=(3, 3),
- strides=(2, 2),
- padding=((1, 1), (1, 1)), # padding="VALID",
- dtype=self.dtype,
- )
-
- def __call__(self, hidden_states):
- # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
- # hidden_states = jnp.pad(hidden_states, pad_width=pad)
- hidden_states = self.conv(hidden_states)
- return hidden_states
-
-
-class FlaxResnetBlock2D(nn.Module):
- in_channels: int
- out_channels: int = None
- dropout_prob: float = 0.0
- use_nin_shortcut: bool = None
- dtype: jnp.dtype = jnp.float32
-
- def setup(self):
- out_channels = self.in_channels if self.out_channels is None else self.out_channels
-
- self.norm1 = nn.GroupNorm(num_groups=32, epsilon=1e-5)
- self.conv1 = nn.Conv(
- out_channels,
- kernel_size=(3, 3),
- strides=(1, 1),
- padding=((1, 1), (1, 1)),
- dtype=self.dtype,
- )
-
- self.time_emb_proj = nn.Dense(out_channels, dtype=self.dtype)
-
- self.norm2 = nn.GroupNorm(num_groups=32, epsilon=1e-5)
- self.dropout = nn.Dropout(self.dropout_prob)
- self.conv2 = nn.Conv(
- out_channels,
- kernel_size=(3, 3),
- strides=(1, 1),
- padding=((1, 1), (1, 1)),
- dtype=self.dtype,
- )
-
- use_nin_shortcut = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
-
- self.conv_shortcut = None
- if use_nin_shortcut:
- self.conv_shortcut = nn.Conv(
- out_channels,
- kernel_size=(1, 1),
- strides=(1, 1),
- padding="VALID",
- dtype=self.dtype,
- )
-
- def __call__(self, hidden_states, temb, deterministic=True):
- residual = hidden_states
- hidden_states = self.norm1(hidden_states)
- hidden_states = nn.swish(hidden_states)
- hidden_states = self.conv1(hidden_states)
-
- temb = self.time_emb_proj(nn.swish(temb))
- temb = jnp.expand_dims(jnp.expand_dims(temb, 1), 1)
- hidden_states = hidden_states + temb
-
- hidden_states = self.norm2(hidden_states)
- hidden_states = nn.swish(hidden_states)
- hidden_states = self.dropout(hidden_states, deterministic)
- hidden_states = self.conv2(hidden_states)
-
- if self.conv_shortcut is not None:
- residual = self.conv_shortcut(residual)
-
- return hidden_states + residual
diff --git a/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py b/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py
deleted file mode 100644
index bc416f57d3e0ee09331b763ef91c01acb3ae4e57..0000000000000000000000000000000000000000
--- a/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py
+++ /dev/null
@@ -1,725 +0,0 @@
-# Copyright 2022 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import inspect
-from typing import Callable, List, Optional, Union
-
-import numpy as np
-import torch
-
-import PIL
-from diffusers.utils import is_accelerate_available
-from packaging import version
-from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
-
-from ...configuration_utils import FrozenDict
-from ...models import AutoencoderKL, UNet2DConditionModel
-from ...pipeline_utils import DiffusionPipeline
-from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
-from ...utils import deprecate, logging
-from . import StableDiffusionPipelineOutput
-from .safety_checker import StableDiffusionSafetyChecker
-
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-
-def prepare_mask_and_masked_image(image, mask):
- """
- Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
- converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
- ``image`` and ``1`` for the ``mask``.
-
- The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
- binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
-
- Args:
- image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
- It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
- ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
- mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
- It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
- ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
-
-
- Raises:
- ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
- should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
- TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
- (ot the other way around).
-
- Returns:
- tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
- dimensions: ``batch x channels x height x width``.
- """
- if isinstance(image, torch.Tensor):
- if not isinstance(mask, torch.Tensor):
- raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not")
-
- # Batch single image
- if image.ndim == 3:
- assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
- image = image.unsqueeze(0)
-
- # Batch and add channel dim for single mask
- if mask.ndim == 2:
- mask = mask.unsqueeze(0).unsqueeze(0)
-
- # Batch single mask or add channel dim
- if mask.ndim == 3:
- # Single batched mask, no channel dim or single mask not batched but channel dim
- if mask.shape[0] == 1:
- mask = mask.unsqueeze(0)
-
- # Batched masks no channel dim
- else:
- mask = mask.unsqueeze(1)
-
- assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
- assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
- assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
-
- # Check image is in [-1, 1]
- if image.min() < -1 or image.max() > 1:
- raise ValueError("Image should be in [-1, 1] range")
-
- # Check mask is in [0, 1]
- if mask.min() < 0 or mask.max() > 1:
- raise ValueError("Mask should be in [0, 1] range")
-
- # Binarize mask
- mask[mask < 0.5] = 0
- mask[mask >= 0.5] = 1
-
- # Image as float32
- image = image.to(dtype=torch.float32)
- elif isinstance(mask, torch.Tensor):
- raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
- else:
- if isinstance(image, PIL.Image.Image):
- image = np.array(image.convert("RGB"))
- image = image[None].transpose(0, 3, 1, 2)
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
- if isinstance(mask, PIL.Image.Image):
- mask = np.array(mask.convert("L"))
- mask = mask.astype(np.float32) / 255.0
- mask = mask[None, None]
- mask[mask < 0.5] = 0
- mask[mask >= 0.5] = 1
- mask = torch.from_numpy(mask)
-
- masked_image = image * (mask < 0.5)
-
- return mask, masked_image
-
-
-class StableDiffusionInpaintPipeline(DiffusionPipeline):
- r"""
- Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*.
-
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
-
- Args:
- vae ([`AutoencoderKL`]):
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
- text_encoder ([`CLIPTextModel`]):
- Frozen text-encoder. Stable Diffusion uses the text portion of
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
- tokenizer (`CLIPTokenizer`):
- Tokenizer of class
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
- scheduler ([`SchedulerMixin`]):
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
- safety_checker ([`StableDiffusionSafetyChecker`]):
- Classification module that estimates whether generated images could be considered offensive or harmful.
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
- feature_extractor ([`CLIPFeatureExtractor`]):
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
- """
- _optional_components = ["safety_checker", "feature_extractor"]
-
- def __init__(
- self,
- vae: AutoencoderKL,
- text_encoder: CLIPTextModel,
- tokenizer: CLIPTokenizer,
- unet: UNet2DConditionModel,
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
- safety_checker: StableDiffusionSafetyChecker,
- feature_extractor: CLIPFeatureExtractor,
- requires_safety_checker: bool = True,
- ):
- super().__init__()
-
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
- deprecation_message = (
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
- " file"
- )
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
- new_config = dict(scheduler.config)
- new_config["steps_offset"] = 1
- scheduler._internal_dict = FrozenDict(new_config)
-
- if hasattr(scheduler.config, "skip_prk_steps") and scheduler.config.skip_prk_steps is False:
- deprecation_message = (
- f"The configuration file of this scheduler: {scheduler} has not set the configuration"
- " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
- " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
- " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
- " Hub, it would be very nice if you could open a Pull request for the"
- " `scheduler/scheduler_config.json` file"
- )
- deprecate("skip_prk_steps not set", "1.0.0", deprecation_message, standard_warn=False)
- new_config = dict(scheduler.config)
- new_config["skip_prk_steps"] = True
- scheduler._internal_dict = FrozenDict(new_config)
-
- if safety_checker is None and requires_safety_checker:
- logger.warning(
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
- )
-
- if safety_checker is not None and feature_extractor is None:
- raise ValueError(
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
- )
-
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
- version.parse(unet.config._diffusers_version).base_version
- ) < version.parse("0.9.0.dev0")
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
- deprecation_message = (
- "The configuration file of the unet has set the default `sample_size` to smaller than"
- " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
- " the `unet/config.json` file"
- )
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
- new_config = dict(unet.config)
- new_config["sample_size"] = 64
- unet._internal_dict = FrozenDict(new_config)
-
- self.register_modules(
- vae=vae,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- unet=unet,
- scheduler=scheduler,
- safety_checker=safety_checker,
- feature_extractor=feature_extractor,
- )
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
- self.register_to_config(requires_safety_checker=requires_safety_checker)
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_attention_slicing
- def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
- r"""
- Enable sliced attention computation.
-
- When this option is enabled, the attention module will split the input tensor in slices, to compute attention
- in several steps. This is useful to save some memory in exchange for a small speed decrease.
-
- Args:
- slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
- When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
- a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
- `attention_head_dim` must be a multiple of `slice_size`.
- """
- if slice_size == "auto":
- if isinstance(self.unet.config.attention_head_dim, int):
- # half the attention head size is usually a good trade-off between
- # speed and memory
- slice_size = self.unet.config.attention_head_dim // 2
- else:
- # if `attention_head_dim` is a list, take the smallest head size
- slice_size = min(self.unet.config.attention_head_dim)
-
- self.unet.set_attention_slice(slice_size)
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_attention_slicing
- def disable_attention_slicing(self):
- r"""
- Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
- back to computing attention in one step.
- """
- # set slice_size = `None` to disable `attention slicing`
- self.enable_attention_slicing(None)
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload
- def enable_sequential_cpu_offload(self, gpu_id=0):
- r"""
- Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
- text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
- `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
- """
- if is_accelerate_available():
- from accelerate import cpu_offload
- else:
- raise ImportError("Please install accelerate via `pip install accelerate`")
-
- device = torch.device(f"cuda:{gpu_id}")
-
- for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
- if cpu_offloaded_model is not None:
- cpu_offload(cpu_offloaded_model, device)
-
- if self.safety_checker is not None:
- # TODO(Patrick) - there is currently a bug with cpu offload of nn.Parameter in accelerate
- # fix by only offloading self.safety_checker for now
- cpu_offload(self.safety_checker.vision_model, device)
-
- @property
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
- def _execution_device(self):
- r"""
- Returns the device on which the pipeline's models will be executed. After calling
- `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
- hooks.
- """
- if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
- return self.device
- for module in self.unet.modules():
- if (
- hasattr(module, "_hf_hook")
- and hasattr(module._hf_hook, "execution_device")
- and module._hf_hook.execution_device is not None
- ):
- return torch.device(module._hf_hook.execution_device)
- return self.device
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
- def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
- r"""
- Encodes the prompt into text encoder hidden states.
-
- Args:
- prompt (`str` or `list(int)`):
- prompt to be encoded
- device: (`torch.device`):
- torch device
- num_images_per_prompt (`int`):
- number of images that should be generated per prompt
- do_classifier_free_guidance (`bool`):
- whether to use classifier free guidance or not
- negative_prompt (`str` or `List[str]`):
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
- if `guidance_scale` is less than `1`).
- """
- batch_size = len(prompt) if isinstance(prompt, list) else 1
-
- text_inputs = self.tokenizer(
- prompt,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
- text_input_ids = text_inputs.input_ids
- untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids
-
- if not torch.equal(text_input_ids, untruncated_ids):
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
- logger.warning(
- "The following part of your input was truncated because CLIP can only handle sequences up to"
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
- )
-
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
- attention_mask = text_inputs.attention_mask.to(device)
- else:
- attention_mask = None
-
- text_embeddings = self.text_encoder(
- text_input_ids.to(device),
- attention_mask=attention_mask,
- )
- text_embeddings = text_embeddings[0]
-
- # duplicate text embeddings for each generation per prompt, using mps friendly method
- bs_embed, seq_len, _ = text_embeddings.shape
- text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
- text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
-
- # get unconditional embeddings for classifier free guidance
- if do_classifier_free_guidance:
- uncond_tokens: List[str]
- if negative_prompt is None:
- uncond_tokens = [""] * batch_size
- elif type(prompt) is not type(negative_prompt):
- raise TypeError(
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
- f" {type(prompt)}."
- )
- elif isinstance(negative_prompt, str):
- uncond_tokens = [negative_prompt]
- elif batch_size != len(negative_prompt):
- raise ValueError(
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
- " the batch size of `prompt`."
- )
- else:
- uncond_tokens = negative_prompt
-
- max_length = text_input_ids.shape[-1]
- uncond_input = self.tokenizer(
- uncond_tokens,
- padding="max_length",
- max_length=max_length,
- truncation=True,
- return_tensors="pt",
- )
-
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
- attention_mask = uncond_input.attention_mask.to(device)
- else:
- attention_mask = None
-
- uncond_embeddings = self.text_encoder(
- uncond_input.input_ids.to(device),
- attention_mask=attention_mask,
- )
- uncond_embeddings = uncond_embeddings[0]
-
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
- seq_len = uncond_embeddings.shape[1]
- uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
- uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
-
- # For classifier free guidance, we need to do two forward passes.
- # Here we concatenate the unconditional and text embeddings into a single batch
- # to avoid doing two forward passes
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
-
- return text_embeddings
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
- def run_safety_checker(self, image, device, dtype):
- if self.safety_checker is not None:
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
- image, has_nsfw_concept = self.safety_checker(
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
- )
- else:
- has_nsfw_concept = None
- return image, has_nsfw_concept
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
- def prepare_extra_step_kwargs(self, generator, eta):
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
- # and should be between [0, 1]
-
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
- extra_step_kwargs = {}
- if accepts_eta:
- extra_step_kwargs["eta"] = eta
-
- # check if the scheduler accepts generator
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
- if accepts_generator:
- extra_step_kwargs["generator"] = generator
- return extra_step_kwargs
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
- def decode_latents(self, latents):
- latents = 1 / 0.18215 * latents
- image = self.vae.decode(latents).sample
- image = (image / 2 + 0.5).clamp(0, 1)
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
- return image
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
- def check_inputs(self, prompt, height, width, callback_steps):
- if not isinstance(prompt, str) and not isinstance(prompt, list):
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
-
- if height % 8 != 0 or width % 8 != 0:
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
-
- if (callback_steps is None) or (
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
- ):
- raise ValueError(
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
- f" {type(callback_steps)}."
- )
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
- if latents is None:
- if device.type == "mps":
- # randn does not work reproducibly on mps
- latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device)
- else:
- latents = torch.randn(shape, generator=generator, device=device, dtype=dtype)
- else:
- if latents.shape != shape:
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
- latents = latents.to(device)
-
- # scale the initial noise by the standard deviation required by the scheduler
- latents = latents * self.scheduler.init_noise_sigma
- return latents
-
- def prepare_mask_latents(
- self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
- ):
- # resize the mask to latents shape as we concatenate the mask to the latents
- # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
- # and half precision
- mask = torch.nn.functional.interpolate(
- mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
- )
- mask = mask.to(device=device, dtype=dtype)
-
- masked_image = masked_image.to(device=device, dtype=dtype)
-
- # encode the mask image into latents space so we can concatenate it to the latents
- masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
- masked_image_latents = 0.18215 * masked_image_latents
-
- # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
- mask = mask.repeat(batch_size, 1, 1, 1)
- masked_image_latents = masked_image_latents.repeat(batch_size, 1, 1, 1)
-
- mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
- masked_image_latents = (
- torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
- )
-
- # aligning device to prevent device errors when concating it with the latent model input
- masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
- return mask, masked_image_latents
-
- @torch.no_grad()
- def __call__(
- self,
- prompt: Union[str, List[str]],
- image: Union[torch.FloatTensor, PIL.Image.Image],
- mask_image: Union[torch.FloatTensor, PIL.Image.Image],
- height: Optional[int] = None,
- width: Optional[int] = None,
- num_inference_steps: int = 50,
- guidance_scale: float = 7.5,
- negative_prompt: Optional[Union[str, List[str]]] = None,
- num_images_per_prompt: Optional[int] = 1,
- eta: float = 0.0,
- generator: Optional[torch.Generator] = None,
- latents: Optional[torch.FloatTensor] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
- callback_steps: Optional[int] = 1,
- ):
- r"""
- Function invoked when calling the pipeline for generation.
-
- Args:
- prompt (`str` or `List[str]`):
- The prompt or prompts to guide the image generation.
- image (`PIL.Image.Image`):
- `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
- be masked out with `mask_image` and repainted according to `prompt`.
- mask_image (`PIL.Image.Image`):
- `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
- repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
- to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
- instead of 3, so the expected shape would be `(B, H, W, 1)`.
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
- The height in pixels of the generated image.
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
- The width in pixels of the generated image.
- num_inference_steps (`int`, *optional*, defaults to 50):
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
- expense of slower inference.
- guidance_scale (`float`, *optional*, defaults to 7.5):
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
- usually at the expense of lower image quality.
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
- if `guidance_scale` is less than `1`).
- num_images_per_prompt (`int`, *optional*, defaults to 1):
- The number of images to generate per prompt.
- eta (`float`, *optional*, defaults to 0.0):
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
- [`schedulers.DDIMScheduler`], will be ignored for others.
- generator (`torch.Generator`, *optional*):
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
- deterministic.
- latents (`torch.FloatTensor`, *optional*):
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
- tensor will ge generated by sampling using the supplied random `generator`.
- output_type (`str`, *optional*, defaults to `"pil"`):
- The output format of the generate image. Choose between
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
- plain tuple.
- callback (`Callable`, *optional*):
- A function that will be called every `callback_steps` steps during inference. The function will be
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
- callback_steps (`int`, *optional*, defaults to 1):
- The frequency at which the `callback` function will be called. If not specified, the callback will be
- called at every step.
-
- Returns:
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
- When returning a tuple, the first element is a list with the generated images, and the second element is a
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
- (nsfw) content, according to the `safety_checker`.
- """
- # 0. Default height and width to unet
- height = height or self.unet.config.sample_size * self.vae_scale_factor
- width = width or self.unet.config.sample_size * self.vae_scale_factor
-
- # 1. Check inputs
- self.check_inputs(prompt, height, width, callback_steps)
-
- # 2. Define call parameters
- batch_size = 1 if isinstance(prompt, str) else len(prompt)
- device = self._execution_device
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
- # corresponds to doing no classifier free guidance.
- do_classifier_free_guidance = guidance_scale > 1.0
-
- # 3. Encode input prompt
- text_embeddings = self._encode_prompt(
- prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
- )
-
- # 4. Preprocess mask and image
- if isinstance(image, PIL.Image.Image) and isinstance(mask_image, PIL.Image.Image):
- mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
- else:
- mask = mask_image
- masked_image = image * (mask < 0.5)
-
- # 5. set timesteps
- self.scheduler.set_timesteps(num_inference_steps, device=device)
- timesteps = self.scheduler.timesteps
-
- # 6. Prepare latent variables
- num_channels_latents = self.vae.config.latent_channels
- latents = self.prepare_latents(
- batch_size * num_images_per_prompt,
- num_channels_latents,
- height,
- width,
- text_embeddings.dtype,
- device,
- generator,
- latents,
- )
-
- # 7. Prepare mask latent variables
- mask, masked_image_latents = self.prepare_mask_latents(
- mask,
- masked_image,
- batch_size * num_images_per_prompt,
- height,
- width,
- text_embeddings.dtype,
- device,
- generator,
- do_classifier_free_guidance,
- )
-
- # 8. Check that sizes of mask, masked image and latents match
- num_channels_mask = mask.shape[1]
- num_channels_masked_image = masked_image_latents.shape[1]
- if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
- raise ValueError(
- f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
- f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
- f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
- f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
- " `pipeline.unet` or your `mask_image` or `image` input."
- )
-
- # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
-
- # 10. Denoising loop
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
- with self.progress_bar(total=num_inference_steps) as progress_bar:
- for i, t in enumerate(timesteps):
- # expand the latents if we are doing classifier free guidance
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
-
- # concat latents, mask, masked_image_latents in the channel dimension
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
- latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
-
- # predict the noise residual
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
-
- # perform guidance
- if do_classifier_free_guidance:
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
-
- # compute the previous noisy sample x_t -> x_t-1
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
-
- # call the callback, if provided
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
- progress_bar.update()
- if callback is not None and i % callback_steps == 0:
- callback(i, t, latents)
-
- # 11. Post-processing
- image = self.decode_latents(latents)
-
- # 12. Run safety checker
- image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype)
-
- # 13. Convert to PIL
- if output_type == "pil":
- image = self.numpy_to_pil(image)
-
- if not return_dict:
- return (image, has_nsfw_concept)
-
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diff --git a/spaces/Jaehan/zero-shot-classification-1/README.md b/spaces/Jaehan/zero-shot-classification-1/README.md
deleted file mode 100644
index 02938296552c74561e8650c18c21db72fbd1aa65..0000000000000000000000000000000000000000
--- a/spaces/Jaehan/zero-shot-classification-1/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Zero Shot Classification 1
-emoji: 🏃
-colorFrom: green
-colorTo: green
-sdk: gradio
-sdk_version: 3.35.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/models/__init__.py b/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/models/__init__.py
deleted file mode 100644
index 00bde45f003698a5b15d3517ae47b59ef1d86e0c..0000000000000000000000000000000000000000
--- a/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/models/__init__.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import importlib
-from copy import deepcopy
-from os import path as osp
-
-from basicsr.utils import get_root_logger, scandir
-from basicsr.utils.registry import MODEL_REGISTRY
-
-__all__ = ['build_model']
-
-# automatically scan and import model modules for registry
-# scan all the files under the 'models' folder and collect files ending with
-# '_model.py'
-model_folder = osp.dirname(osp.abspath(__file__))
-model_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(model_folder) if v.endswith('_model.py')]
-# import all the model modules
-_model_modules = [importlib.import_module(f'basicsr.models.{file_name}') for file_name in model_filenames]
-
-
-def build_model(opt):
- """Build model from options.
-
- Args:
- opt (dict): Configuration. It must constain:
- model_type (str): Model type.
- """
- opt = deepcopy(opt)
- model = MODEL_REGISTRY.get(opt['model_type'])(opt)
- logger = get_root_logger()
- logger.info(f'Model [{model.__class__.__name__}] is created.')
- return model
diff --git a/spaces/Kangarroar/ApplioRVC-Inference/lib/infer_pack/commons.py b/spaces/Kangarroar/ApplioRVC-Inference/lib/infer_pack/commons.py
deleted file mode 100644
index 54470986f37825b35d90d7efa7437d1c26b87215..0000000000000000000000000000000000000000
--- a/spaces/Kangarroar/ApplioRVC-Inference/lib/infer_pack/commons.py
+++ /dev/null
@@ -1,166 +0,0 @@
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-
-def init_weights(m, mean=0.0, std=0.01):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- m.weight.data.normal_(mean, std)
-
-
-def get_padding(kernel_size, dilation=1):
- return int((kernel_size * dilation - dilation) / 2)
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def kl_divergence(m_p, logs_p, m_q, logs_q):
- """KL(P||Q)"""
- kl = (logs_q - logs_p) - 0.5
- kl += (
- 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
- )
- return kl
-
-
-def rand_gumbel(shape):
- """Sample from the Gumbel distribution, protect from overflows."""
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
- return -torch.log(-torch.log(uniform_samples))
-
-
-def rand_gumbel_like(x):
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
- return g
-
-
-def slice_segments(x, ids_str, segment_size=4):
- ret = torch.zeros_like(x[:, :, :segment_size])
- for i in range(x.size(0)):
- idx_str = ids_str[i]
- idx_end = idx_str + segment_size
- ret[i] = x[i, :, idx_str:idx_end]
- return ret
-
-
-def slice_segments2(x, ids_str, segment_size=4):
- ret = torch.zeros_like(x[:, :segment_size])
- for i in range(x.size(0)):
- idx_str = ids_str[i]
- idx_end = idx_str + segment_size
- ret[i] = x[i, idx_str:idx_end]
- return ret
-
-
-def rand_slice_segments(x, x_lengths=None, segment_size=4):
- b, d, t = x.size()
- if x_lengths is None:
- x_lengths = t
- ids_str_max = x_lengths - segment_size + 1
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
- ret = slice_segments(x, ids_str, segment_size)
- return ret, ids_str
-
-
-def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
- position = torch.arange(length, dtype=torch.float)
- num_timescales = channels // 2
- log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
- num_timescales - 1
- )
- inv_timescales = min_timescale * torch.exp(
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
- )
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
- signal = F.pad(signal, [0, 0, 0, channels % 2])
- signal = signal.view(1, channels, length)
- return signal
-
-
-def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return x + signal.to(dtype=x.dtype, device=x.device)
-
-
-def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
-
-
-def subsequent_mask(length):
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
- return mask
-
-
-@torch.jit.script
-def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
- n_channels_int = n_channels[0]
- in_act = input_a + input_b
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
- acts = t_act * s_act
- return acts
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def shift_1d(x):
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
- return x
-
-
-def sequence_mask(length, max_length=None):
- if max_length is None:
- max_length = length.max()
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
- return x.unsqueeze(0) < length.unsqueeze(1)
-
-
-def generate_path(duration, mask):
- """
- duration: [b, 1, t_x]
- mask: [b, 1, t_y, t_x]
- """
- device = duration.device
-
- b, _, t_y, t_x = mask.shape
- cum_duration = torch.cumsum(duration, -1)
-
- cum_duration_flat = cum_duration.view(b * t_x)
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
- path = path.view(b, t_x, t_y)
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
- path = path.unsqueeze(1).transpose(2, 3) * mask
- return path
-
-
-def clip_grad_value_(parameters, clip_value, norm_type=2):
- if isinstance(parameters, torch.Tensor):
- parameters = [parameters]
- parameters = list(filter(lambda p: p.grad is not None, parameters))
- norm_type = float(norm_type)
- if clip_value is not None:
- clip_value = float(clip_value)
-
- total_norm = 0
- for p in parameters:
- param_norm = p.grad.data.norm(norm_type)
- total_norm += param_norm.item() ** norm_type
- if clip_value is not None:
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
- total_norm = total_norm ** (1.0 / norm_type)
- return total_norm
diff --git a/spaces/KarmKarma/genshinimpact-rvc-models-v2/lib/infer_pack/commons.py b/spaces/KarmKarma/genshinimpact-rvc-models-v2/lib/infer_pack/commons.py
deleted file mode 100644
index 54470986f37825b35d90d7efa7437d1c26b87215..0000000000000000000000000000000000000000
--- a/spaces/KarmKarma/genshinimpact-rvc-models-v2/lib/infer_pack/commons.py
+++ /dev/null
@@ -1,166 +0,0 @@
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-
-def init_weights(m, mean=0.0, std=0.01):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- m.weight.data.normal_(mean, std)
-
-
-def get_padding(kernel_size, dilation=1):
- return int((kernel_size * dilation - dilation) / 2)
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def kl_divergence(m_p, logs_p, m_q, logs_q):
- """KL(P||Q)"""
- kl = (logs_q - logs_p) - 0.5
- kl += (
- 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
- )
- return kl
-
-
-def rand_gumbel(shape):
- """Sample from the Gumbel distribution, protect from overflows."""
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
- return -torch.log(-torch.log(uniform_samples))
-
-
-def rand_gumbel_like(x):
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
- return g
-
-
-def slice_segments(x, ids_str, segment_size=4):
- ret = torch.zeros_like(x[:, :, :segment_size])
- for i in range(x.size(0)):
- idx_str = ids_str[i]
- idx_end = idx_str + segment_size
- ret[i] = x[i, :, idx_str:idx_end]
- return ret
-
-
-def slice_segments2(x, ids_str, segment_size=4):
- ret = torch.zeros_like(x[:, :segment_size])
- for i in range(x.size(0)):
- idx_str = ids_str[i]
- idx_end = idx_str + segment_size
- ret[i] = x[i, idx_str:idx_end]
- return ret
-
-
-def rand_slice_segments(x, x_lengths=None, segment_size=4):
- b, d, t = x.size()
- if x_lengths is None:
- x_lengths = t
- ids_str_max = x_lengths - segment_size + 1
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
- ret = slice_segments(x, ids_str, segment_size)
- return ret, ids_str
-
-
-def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
- position = torch.arange(length, dtype=torch.float)
- num_timescales = channels // 2
- log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (
- num_timescales - 1
- )
- inv_timescales = min_timescale * torch.exp(
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment
- )
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
- signal = F.pad(signal, [0, 0, 0, channels % 2])
- signal = signal.view(1, channels, length)
- return signal
-
-
-def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return x + signal.to(dtype=x.dtype, device=x.device)
-
-
-def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
-
-
-def subsequent_mask(length):
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
- return mask
-
-
-@torch.jit.script
-def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
- n_channels_int = n_channels[0]
- in_act = input_a + input_b
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
- acts = t_act * s_act
- return acts
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def shift_1d(x):
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
- return x
-
-
-def sequence_mask(length, max_length=None):
- if max_length is None:
- max_length = length.max()
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
- return x.unsqueeze(0) < length.unsqueeze(1)
-
-
-def generate_path(duration, mask):
- """
- duration: [b, 1, t_x]
- mask: [b, 1, t_y, t_x]
- """
- device = duration.device
-
- b, _, t_y, t_x = mask.shape
- cum_duration = torch.cumsum(duration, -1)
-
- cum_duration_flat = cum_duration.view(b * t_x)
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
- path = path.view(b, t_x, t_y)
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
- path = path.unsqueeze(1).transpose(2, 3) * mask
- return path
-
-
-def clip_grad_value_(parameters, clip_value, norm_type=2):
- if isinstance(parameters, torch.Tensor):
- parameters = [parameters]
- parameters = list(filter(lambda p: p.grad is not None, parameters))
- norm_type = float(norm_type)
- if clip_value is not None:
- clip_value = float(clip_value)
-
- total_norm = 0
- for p in parameters:
- param_norm = p.grad.data.norm(norm_type)
- total_norm += param_norm.item() ** norm_type
- if clip_value is not None:
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
- total_norm = total_norm ** (1.0 / norm_type)
- return total_norm
diff --git a/spaces/KenjieDec/GPEN/README.md b/spaces/KenjieDec/GPEN/README.md
deleted file mode 100644
index d285e588ff3f89adbf3b59e2715e2500edeca0ac..0000000000000000000000000000000000000000
--- a/spaces/KenjieDec/GPEN/README.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-title: GPEN
-emoji: 💩
-colorFrom: blue
-colorTo: red
-sdk: gradio
-app_file: app.py
-pinned: false
----
\ No newline at end of file
diff --git a/spaces/Kevin676/Real-Time-Voice-Cloning/synthesizer/utils/__init__.py b/spaces/Kevin676/Real-Time-Voice-Cloning/synthesizer/utils/__init__.py
deleted file mode 100644
index 5ae3e48110e61231acf1e666e5fa76af5e4ebdcd..0000000000000000000000000000000000000000
--- a/spaces/Kevin676/Real-Time-Voice-Cloning/synthesizer/utils/__init__.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import torch
-
-
-_output_ref = None
-_replicas_ref = None
-
-def data_parallel_workaround(model, *input):
- global _output_ref
- global _replicas_ref
- device_ids = list(range(torch.cuda.device_count()))
- output_device = device_ids[0]
- replicas = torch.nn.parallel.replicate(model, device_ids)
- # input.shape = (num_args, batch, ...)
- inputs = torch.nn.parallel.scatter(input, device_ids)
- # inputs.shape = (num_gpus, num_args, batch/num_gpus, ...)
- replicas = replicas[:len(inputs)]
- outputs = torch.nn.parallel.parallel_apply(replicas, inputs)
- y_hat = torch.nn.parallel.gather(outputs, output_device)
- _output_ref = outputs
- _replicas_ref = replicas
- return y_hat
-
-
-class ValueWindow():
- def __init__(self, window_size=100):
- self._window_size = window_size
- self._values = []
-
- def append(self, x):
- self._values = self._values[-(self._window_size - 1):] + [x]
-
- @property
- def sum(self):
- return sum(self._values)
-
- @property
- def count(self):
- return len(self._values)
-
- @property
- def average(self):
- return self.sum / max(1, self.count)
-
- def reset(self):
- self._values = []
diff --git a/spaces/KyanChen/RSPrompter/mmdet/engine/schedulers/quadratic_warmup.py b/spaces/KyanChen/RSPrompter/mmdet/engine/schedulers/quadratic_warmup.py
deleted file mode 100644
index 639b47854887786bf3f81d6d0a375033d190d91e..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/engine/schedulers/quadratic_warmup.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from mmengine.optim.scheduler.lr_scheduler import LRSchedulerMixin
-from mmengine.optim.scheduler.momentum_scheduler import MomentumSchedulerMixin
-from mmengine.optim.scheduler.param_scheduler import INF, _ParamScheduler
-from torch.optim import Optimizer
-
-from mmdet.registry import PARAM_SCHEDULERS
-
-
-@PARAM_SCHEDULERS.register_module()
-class QuadraticWarmupParamScheduler(_ParamScheduler):
- r"""Warm up the parameter value of each parameter group by quadratic
- formula:
-
- .. math::
-
- X_{t} = X_{t-1} + \frac{2t+1}{{(end-begin)}^{2}} \times X_{base}
-
- Args:
- optimizer (Optimizer): Wrapped optimizer.
- param_name (str): Name of the parameter to be adjusted, such as
- ``lr``, ``momentum``.
- begin (int): Step at which to start updating the parameters.
- Defaults to 0.
- end (int): Step at which to stop updating the parameters.
- Defaults to INF.
- last_step (int): The index of last step. Used for resume without
- state dict. Defaults to -1.
- by_epoch (bool): Whether the scheduled parameters are updated by
- epochs. Defaults to True.
- verbose (bool): Whether to print the value for each update.
- Defaults to False.
- """
-
- def __init__(self,
- optimizer: Optimizer,
- param_name: str,
- begin: int = 0,
- end: int = INF,
- last_step: int = -1,
- by_epoch: bool = True,
- verbose: bool = False):
- if end >= INF:
- raise ValueError('``end`` must be less than infinity,'
- 'Please set ``end`` parameter of '
- '``QuadraticWarmupScheduler`` as the '
- 'number of warmup end.')
- self.total_iters = end - begin
- super().__init__(
- optimizer=optimizer,
- param_name=param_name,
- begin=begin,
- end=end,
- last_step=last_step,
- by_epoch=by_epoch,
- verbose=verbose)
-
- @classmethod
- def build_iter_from_epoch(cls,
- *args,
- begin=0,
- end=INF,
- by_epoch=True,
- epoch_length=None,
- **kwargs):
- """Build an iter-based instance of this scheduler from an epoch-based
- config."""
- assert by_epoch, 'Only epoch-based kwargs whose `by_epoch=True` can ' \
- 'be converted to iter-based.'
- assert epoch_length is not None and epoch_length > 0, \
- f'`epoch_length` must be a positive integer, ' \
- f'but got {epoch_length}.'
- by_epoch = False
- begin = begin * epoch_length
- if end != INF:
- end = end * epoch_length
- return cls(*args, begin=begin, end=end, by_epoch=by_epoch, **kwargs)
-
- def _get_value(self):
- """Compute value using chainable form of the scheduler."""
- if self.last_step == 0:
- return [
- base_value * (2 * self.last_step + 1) / self.total_iters**2
- for base_value in self.base_values
- ]
-
- return [
- group[self.param_name] + base_value *
- (2 * self.last_step + 1) / self.total_iters**2
- for base_value, group in zip(self.base_values,
- self.optimizer.param_groups)
- ]
-
-
-@PARAM_SCHEDULERS.register_module()
-class QuadraticWarmupLR(LRSchedulerMixin, QuadraticWarmupParamScheduler):
- """Warm up the learning rate of each parameter group by quadratic formula.
-
- Args:
- optimizer (Optimizer): Wrapped optimizer.
- begin (int): Step at which to start updating the parameters.
- Defaults to 0.
- end (int): Step at which to stop updating the parameters.
- Defaults to INF.
- last_step (int): The index of last step. Used for resume without
- state dict. Defaults to -1.
- by_epoch (bool): Whether the scheduled parameters are updated by
- epochs. Defaults to True.
- verbose (bool): Whether to print the value for each update.
- Defaults to False.
- """
-
-
-@PARAM_SCHEDULERS.register_module()
-class QuadraticWarmupMomentum(MomentumSchedulerMixin,
- QuadraticWarmupParamScheduler):
- """Warm up the momentum value of each parameter group by quadratic formula.
-
- Args:
- optimizer (Optimizer): Wrapped optimizer.
- begin (int): Step at which to start updating the parameters.
- Defaults to 0.
- end (int): Step at which to stop updating the parameters.
- Defaults to INF.
- last_step (int): The index of last step. Used for resume without
- state dict. Defaults to -1.
- by_epoch (bool): Whether the scheduled parameters are updated by
- epochs. Defaults to True.
- verbose (bool): Whether to print the value for each update.
- Defaults to False.
- """
diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/dense_heads/dab_detr_head.py b/spaces/KyanChen/RSPrompter/mmdet/models/dense_heads/dab_detr_head.py
deleted file mode 100644
index 892833ffce5f17f6f9e82e67b7d32c6b9c1bafc0..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/models/dense_heads/dab_detr_head.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from typing import Tuple
-
-import torch.nn as nn
-from mmcv.cnn import Linear
-from mmengine.model import bias_init_with_prob, constant_init
-from torch import Tensor
-
-from mmdet.registry import MODELS
-from mmdet.structures import SampleList
-from mmdet.utils import InstanceList
-from ..layers import MLP, inverse_sigmoid
-from .conditional_detr_head import ConditionalDETRHead
-
-
-@MODELS.register_module()
-class DABDETRHead(ConditionalDETRHead):
- """Head of DAB-DETR. DAB-DETR: Dynamic Anchor Boxes are Better Queries for
- DETR.
-
- More details can be found in the `paper
- `_ .
- """
-
- def _init_layers(self) -> None:
- """Initialize layers of the transformer head."""
- # cls branch
- self.fc_cls = Linear(self.embed_dims, self.cls_out_channels)
- # reg branch
- self.fc_reg = MLP(self.embed_dims, self.embed_dims, 4, 3)
-
- def init_weights(self) -> None:
- """initialize weights."""
- if self.loss_cls.use_sigmoid:
- bias_init = bias_init_with_prob(0.01)
- nn.init.constant_(self.fc_cls.bias, bias_init)
- constant_init(self.fc_reg.layers[-1], 0., bias=0.)
-
- def forward(self, hidden_states: Tensor,
- references: Tensor) -> Tuple[Tensor, Tensor]:
- """"Forward function.
-
- Args:
- hidden_states (Tensor): Features from transformer decoder. If
- `return_intermediate_dec` is True output has shape
- (num_decoder_layers, bs, num_queries, dim), else has shape (1,
- bs, num_queries, dim) which only contains the last layer
- outputs.
- references (Tensor): References from transformer decoder. If
- `return_intermediate_dec` is True output has shape
- (num_decoder_layers, bs, num_queries, 2/4), else has shape (1,
- bs, num_queries, 2/4)
- which only contains the last layer reference.
- Returns:
- tuple[Tensor]: results of head containing the following tensor.
-
- - layers_cls_scores (Tensor): Outputs from the classification head,
- shape (num_decoder_layers, bs, num_queries, cls_out_channels).
- Note cls_out_channels should include background.
- - layers_bbox_preds (Tensor): Sigmoid outputs from the regression
- head with normalized coordinate format (cx, cy, w, h), has shape
- (num_decoder_layers, bs, num_queries, 4).
- """
- layers_cls_scores = self.fc_cls(hidden_states)
- references_before_sigmoid = inverse_sigmoid(references, eps=1e-3)
- tmp_reg_preds = self.fc_reg(hidden_states)
- tmp_reg_preds[..., :references_before_sigmoid.
- size(-1)] += references_before_sigmoid
- layers_bbox_preds = tmp_reg_preds.sigmoid()
- return layers_cls_scores, layers_bbox_preds
-
- def predict(self,
- hidden_states: Tensor,
- references: Tensor,
- batch_data_samples: SampleList,
- rescale: bool = True) -> InstanceList:
- """Perform forward propagation of the detection head and predict
- detection results on the features of the upstream network. Over-write
- because img_metas are needed as inputs for bbox_head.
-
- Args:
- hidden_states (Tensor): Feature from the transformer decoder, has
- shape (num_decoder_layers, bs, num_queries, dim).
- references (Tensor): references from the transformer decoder, has
- shape (num_decoder_layers, bs, num_queries, 2/4).
- batch_data_samples (List[:obj:`DetDataSample`]): The Data
- Samples. It usually includes information such as
- `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
- rescale (bool, optional): Whether to rescale the results.
- Defaults to True.
-
- Returns:
- list[obj:`InstanceData`]: Detection results of each image
- after the post process.
- """
- batch_img_metas = [
- data_samples.metainfo for data_samples in batch_data_samples
- ]
-
- last_layer_hidden_state = hidden_states[-1].unsqueeze(0)
- last_layer_reference = references[-1].unsqueeze(0)
- outs = self(last_layer_hidden_state, last_layer_reference)
-
- predictions = self.predict_by_feat(
- *outs, batch_img_metas=batch_img_metas, rescale=rescale)
- return predictions
diff --git a/spaces/KyanChen/RSPrompter/mmdet/testing/_utils.py b/spaces/KyanChen/RSPrompter/mmdet/testing/_utils.py
deleted file mode 100644
index ce74376250ee3bddc8d4740aed57699771e5af75..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/testing/_utils.py
+++ /dev/null
@@ -1,317 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import copy
-from os.path import dirname, exists, join
-
-import numpy as np
-import torch
-from mmengine.config import Config
-from mmengine.dataset import pseudo_collate
-from mmengine.structures import InstanceData, PixelData
-
-from ..registry import TASK_UTILS
-from ..structures import DetDataSample
-from ..structures.bbox import HorizontalBoxes
-
-
-def _get_config_directory():
- """Find the predefined detector config directory."""
- try:
- # Assume we are running in the source mmdetection repo
- repo_dpath = dirname(dirname(dirname(__file__)))
- except NameError:
- # For IPython development when this __file__ is not defined
- import mmdet
- repo_dpath = dirname(dirname(mmdet.__file__))
- config_dpath = join(repo_dpath, 'configs')
- if not exists(config_dpath):
- raise Exception('Cannot find config path')
- return config_dpath
-
-
-def _get_config_module(fname):
- """Load a configuration as a python module."""
- config_dpath = _get_config_directory()
- config_fpath = join(config_dpath, fname)
- config_mod = Config.fromfile(config_fpath)
- return config_mod
-
-
-def get_detector_cfg(fname):
- """Grab configs necessary to create a detector.
-
- These are deep copied to allow for safe modification of parameters without
- influencing other tests.
- """
- config = _get_config_module(fname)
- model = copy.deepcopy(config.model)
- return model
-
-
-def get_roi_head_cfg(fname):
- """Grab configs necessary to create a roi_head.
-
- These are deep copied to allow for safe modification of parameters without
- influencing other tests.
- """
- config = _get_config_module(fname)
- model = copy.deepcopy(config.model)
-
- roi_head = model.roi_head
- train_cfg = None if model.train_cfg is None else model.train_cfg.rcnn
- test_cfg = None if model.test_cfg is None else model.test_cfg.rcnn
- roi_head.update(dict(train_cfg=train_cfg, test_cfg=test_cfg))
- return roi_head
-
-
-def _rand_bboxes(rng, num_boxes, w, h):
- cx, cy, bw, bh = rng.rand(num_boxes, 4).T
-
- tl_x = ((cx * w) - (w * bw / 2)).clip(0, w)
- tl_y = ((cy * h) - (h * bh / 2)).clip(0, h)
- br_x = ((cx * w) + (w * bw / 2)).clip(0, w)
- br_y = ((cy * h) + (h * bh / 2)).clip(0, h)
-
- bboxes = np.vstack([tl_x, tl_y, br_x, br_y]).T
- return bboxes
-
-
-def _rand_masks(rng, num_boxes, bboxes, img_w, img_h):
- from mmdet.structures.mask import BitmapMasks
- masks = np.zeros((num_boxes, img_h, img_w))
- for i, bbox in enumerate(bboxes):
- bbox = bbox.astype(np.int32)
- mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >
- 0.3).astype(np.int64)
- masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask
- return BitmapMasks(masks, height=img_h, width=img_w)
-
-
-def demo_mm_inputs(batch_size=2,
- image_shapes=(3, 128, 128),
- num_items=None,
- num_classes=10,
- sem_seg_output_strides=1,
- with_mask=False,
- with_semantic=False,
- use_box_type=False,
- device='cpu'):
- """Create a superset of inputs needed to run test or train batches.
-
- Args:
- batch_size (int): batch size. Defaults to 2.
- image_shapes (List[tuple], Optional): image shape.
- Defaults to (3, 128, 128)
- num_items (None | List[int]): specifies the number
- of boxes in each batch item. Default to None.
- num_classes (int): number of different labels a
- box might have. Defaults to 10.
- with_mask (bool): Whether to return mask annotation.
- Defaults to False.
- with_semantic (bool): whether to return semantic.
- Defaults to False.
- device (str): Destination device type. Defaults to cpu.
- """
- rng = np.random.RandomState(0)
-
- if isinstance(image_shapes, list):
- assert len(image_shapes) == batch_size
- else:
- image_shapes = [image_shapes] * batch_size
-
- if isinstance(num_items, list):
- assert len(num_items) == batch_size
-
- packed_inputs = []
- for idx in range(batch_size):
- image_shape = image_shapes[idx]
- c, h, w = image_shape
-
- image = rng.randint(0, 255, size=image_shape, dtype=np.uint8)
-
- mm_inputs = dict()
- mm_inputs['inputs'] = torch.from_numpy(image).to(device)
-
- img_meta = {
- 'img_id': idx,
- 'img_shape': image_shape[1:],
- 'ori_shape': image_shape[1:],
- 'filename': '.png',
- 'scale_factor': np.array([1.1, 1.2]),
- 'flip': False,
- 'flip_direction': None,
- 'border': [1, 1, 1, 1] # Only used by CenterNet
- }
-
- data_sample = DetDataSample()
- data_sample.set_metainfo(img_meta)
-
- # gt_instances
- gt_instances = InstanceData()
- if num_items is None:
- num_boxes = rng.randint(1, 10)
- else:
- num_boxes = num_items[idx]
-
- bboxes = _rand_bboxes(rng, num_boxes, w, h)
- labels = rng.randint(1, num_classes, size=num_boxes)
- # TODO: remove this part when all model adapted with BaseBoxes
- if use_box_type:
- gt_instances.bboxes = HorizontalBoxes(bboxes, dtype=torch.float32)
- else:
- gt_instances.bboxes = torch.FloatTensor(bboxes)
- gt_instances.labels = torch.LongTensor(labels)
-
- if with_mask:
- masks = _rand_masks(rng, num_boxes, bboxes, w, h)
- gt_instances.masks = masks
-
- # TODO: waiting for ci to be fixed
- # masks = np.random.randint(0, 2, (len(bboxes), h, w), dtype=np.uint8)
- # gt_instances.mask = BitmapMasks(masks, h, w)
-
- data_sample.gt_instances = gt_instances
-
- # ignore_instances
- ignore_instances = InstanceData()
- bboxes = _rand_bboxes(rng, num_boxes, w, h)
- if use_box_type:
- ignore_instances.bboxes = HorizontalBoxes(
- bboxes, dtype=torch.float32)
- else:
- ignore_instances.bboxes = torch.FloatTensor(bboxes)
- data_sample.ignored_instances = ignore_instances
-
- # gt_sem_seg
- if with_semantic:
- # assume gt_semantic_seg using scale 1/8 of the img
- gt_semantic_seg = torch.from_numpy(
- np.random.randint(
- 0,
- num_classes, (1, h // sem_seg_output_strides,
- w // sem_seg_output_strides),
- dtype=np.uint8))
- gt_sem_seg_data = dict(sem_seg=gt_semantic_seg)
- data_sample.gt_sem_seg = PixelData(**gt_sem_seg_data)
-
- mm_inputs['data_samples'] = data_sample.to(device)
-
- # TODO: gt_ignore
-
- packed_inputs.append(mm_inputs)
- data = pseudo_collate(packed_inputs)
- return data
-
-
-def demo_mm_proposals(image_shapes, num_proposals, device='cpu'):
- """Create a list of fake porposals.
-
- Args:
- image_shapes (list[tuple[int]]): Batch image shapes.
- num_proposals (int): The number of fake proposals.
- """
- rng = np.random.RandomState(0)
-
- results = []
- for img_shape in image_shapes:
- result = InstanceData()
- w, h = img_shape[1:]
- proposals = _rand_bboxes(rng, num_proposals, w, h)
- result.bboxes = torch.from_numpy(proposals).float()
- result.scores = torch.from_numpy(rng.rand(num_proposals)).float()
- result.labels = torch.zeros(num_proposals).long()
- results.append(result.to(device))
- return results
-
-
-def demo_mm_sampling_results(proposals_list,
- batch_gt_instances,
- batch_gt_instances_ignore=None,
- assigner_cfg=None,
- sampler_cfg=None,
- feats=None):
- """Create sample results that can be passed to BBoxHead.get_targets."""
- assert len(proposals_list) == len(batch_gt_instances)
- if batch_gt_instances_ignore is None:
- batch_gt_instances_ignore = [None for _ in batch_gt_instances]
- else:
- assert len(batch_gt_instances_ignore) == len(batch_gt_instances)
-
- default_assigner_cfg = dict(
- type='MaxIoUAssigner',
- pos_iou_thr=0.5,
- neg_iou_thr=0.5,
- min_pos_iou=0.5,
- ignore_iof_thr=-1)
- assigner_cfg = assigner_cfg if assigner_cfg is not None \
- else default_assigner_cfg
- default_sampler_cfg = dict(
- type='RandomSampler',
- num=512,
- pos_fraction=0.25,
- neg_pos_ub=-1,
- add_gt_as_proposals=True)
- sampler_cfg = sampler_cfg if sampler_cfg is not None \
- else default_sampler_cfg
- bbox_assigner = TASK_UTILS.build(assigner_cfg)
- bbox_sampler = TASK_UTILS.build(sampler_cfg)
-
- sampling_results = []
- for i in range(len(batch_gt_instances)):
- if feats is not None:
- feats = [lvl_feat[i][None] for lvl_feat in feats]
- # rename proposals.bboxes to proposals.priors
- proposals = proposals_list[i]
- proposals.priors = proposals.pop('bboxes')
-
- assign_result = bbox_assigner.assign(proposals, batch_gt_instances[i],
- batch_gt_instances_ignore[i])
- sampling_result = bbox_sampler.sample(
- assign_result, proposals, batch_gt_instances[i], feats=feats)
- sampling_results.append(sampling_result)
-
- return sampling_results
-
-
-# TODO: Support full ceph
-def replace_to_ceph(cfg):
- backend_args = dict(
- backend='petrel',
- path_mapping=dict({
- './data/': 's3://openmmlab/datasets/detection/',
- 'data/': 's3://openmmlab/datasets/detection/'
- }))
-
- # TODO: name is a reserved interface, which will be used later.
- def _process_pipeline(dataset, name):
-
- def replace_img(pipeline):
- if pipeline['type'] == 'LoadImageFromFile':
- pipeline['backend_args'] = backend_args
-
- def replace_ann(pipeline):
- if pipeline['type'] == 'LoadAnnotations' or pipeline[
- 'type'] == 'LoadPanopticAnnotations':
- pipeline['backend_args'] = backend_args
-
- if 'pipeline' in dataset:
- replace_img(dataset.pipeline[0])
- replace_ann(dataset.pipeline[1])
- if 'dataset' in dataset:
- # dataset wrapper
- replace_img(dataset.dataset.pipeline[0])
- replace_ann(dataset.dataset.pipeline[1])
- else:
- # dataset wrapper
- replace_img(dataset.dataset.pipeline[0])
- replace_ann(dataset.dataset.pipeline[1])
-
- def _process_evaluator(evaluator, name):
- if evaluator['type'] == 'CocoPanopticMetric':
- evaluator['backend_args'] = backend_args
-
- # half ceph
- _process_pipeline(cfg.train_dataloader.dataset, cfg.filename)
- _process_pipeline(cfg.val_dataloader.dataset, cfg.filename)
- _process_pipeline(cfg.test_dataloader.dataset, cfg.filename)
- _process_evaluator(cfg.val_evaluator, cfg.filename)
- _process_evaluator(cfg.test_evaluator, cfg.filename)
diff --git a/spaces/LLaMaWhisperer/LegalLLaMa/README.md b/spaces/LLaMaWhisperer/LegalLLaMa/README.md
deleted file mode 100644
index 0f8ded56c0fd9833f2cd89a523b67b4d1231cdb0..0000000000000000000000000000000000000000
--- a/spaces/LLaMaWhisperer/LegalLLaMa/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
----
-title: LegalLLaMa
-emoji: 🦙
-colorFrom: yellow
-colorTo: blue
-sdk: streamlit
-sdk_version: 1.25.0
-app_file: app.py
-pinned: false
-license: gpl-3.0
----
-# LegalLLaMa 🦙 (*WORK IN PROGRESS*)
-LegalLLaMa: Your friendly neighborhood lawyer llama, turning legal jargon into a piece of cake!
-
-Legal LLaMa is a chatbot developed to provide summaries of U.S. legislative bills based on user queries. It's built using the Hugging Face's Transformers library, and is hosted using Streamlit on Hugging Face Spaces.
-
-You can interact with the live demo of Legal LLaMa on Hugging Face Spaces [here](https://huggingface.co/spaces/LLaMaWhisperer/legalLLaMa).
-
-The chatbot uses a frame-based dialog management system to handle conversations, and leverages the ProPublica and Congress APIs to fetch information about legislative bills. The summaries of bills are generated using a state-of-the-art text summarization model.
-
-## Features 🎁
-
-- Frame-based dialog management
-- Intent recognition and slot filling
-- Real-time interaction with users
-- Bill retrieval using ProPublica and Congress APIs
-- Bill summarization using Transformer models
-
-## Future Work 💡
-
-Legal LLaMa is still a work in progress, and there are plans to make it even more useful and user-friendly. Here are some of the planned improvements:
-
-- Enhance intent recognition and slot filling using Natural Language Understanding (NLU) models
-- Expand the chatbot's capabilities to handle more tasks, such as providing summaries of recent bills by a particular congressman
-- Train a custom summarization model specifically for legislative texts
-
-## Getting Started 🚀
-
-To get the project running on your local machine, follow these steps:
-
-1. Clone the repository:
-```commandline
-git clone https://github.com/YuvrajSharma9981/LegalLLaMa.git
-```
-2. Install the required packages:
-```commandline
-pip install -r requirements.txt
-```
-
-3. Run the Streamlit app:
-```commandline
-streamlit run app.py
-```
-
-Please note that you will need to obtain API keys from ProPublica and Congress to access their APIs.
-
-## Contributing 🤝
-
-Contributions to improve Legal LLaMa are welcomed. Feel free to submit a pull request or create an issue for any bugs, feature requests, or questions about the project.
-
-## License 📄
-
-This project is licensed under the GPL-3.0 License - see the [LICENSE](LICENSE) file for details.
diff --git a/spaces/Laihiujin/OneFormer/oneformer/config.py b/spaces/Laihiujin/OneFormer/oneformer/config.py
deleted file mode 100644
index 78bc13fd7e3fbc7cff4a3325d851bd15275ae633..0000000000000000000000000000000000000000
--- a/spaces/Laihiujin/OneFormer/oneformer/config.py
+++ /dev/null
@@ -1,239 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) Facebook, Inc. and its affiliates.
-from detectron2.config import CfgNode as CN
-
-__all__ = ["add_common_config", "add_oneformer_config", "add_swin_config",
- "add_dinat_config", "add_beit_adapter_config", "add_convnext_config"]
-
-def add_common_config(cfg):
- """
- Add config for common configuration
- """
- # data config
- # select the dataset mapper
- cfg.INPUT.DATASET_MAPPER_NAME = "oneformer_unified"
- # Color augmentation
- cfg.INPUT.COLOR_AUG_SSD = False
- # We retry random cropping until no single category in semantic segmentation GT occupies more
- # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.
- cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0
- # Pad image and segmentation GT in dataset mapper.
- cfg.INPUT.SIZE_DIVISIBILITY = -1
-
- cfg.INPUT.TASK_SEQ_LEN = 77
- cfg.INPUT.MAX_SEQ_LEN = 77
-
- cfg.INPUT.TASK_PROB = CN()
- cfg.INPUT.TASK_PROB.SEMANTIC = 0.33
- cfg.INPUT.TASK_PROB.INSTANCE = 0.66
-
- # test dataset
- cfg.DATASETS.TEST_PANOPTIC = ("",)
- cfg.DATASETS.TEST_INSTANCE = ("",)
- cfg.DATASETS.TEST_SEMANTIC = ("",)
-
- # solver config
- # weight decay on embedding
- cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0
- # optimizer
- cfg.SOLVER.OPTIMIZER = "ADAMW"
- cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1
-
- # wandb
- cfg.WANDB = CN()
- cfg.WANDB.PROJECT = "unified_dense_recognition"
- cfg.WANDB.NAME = None
-
- cfg.MODEL.IS_TRAIN = False
- cfg.MODEL.IS_DEMO = True
-
- # text encoder config
- cfg.MODEL.TEXT_ENCODER = CN()
-
- cfg.MODEL.TEXT_ENCODER.WIDTH = 256
- cfg.MODEL.TEXT_ENCODER.CONTEXT_LENGTH = 77
- cfg.MODEL.TEXT_ENCODER.NUM_LAYERS = 12
- cfg.MODEL.TEXT_ENCODER.VOCAB_SIZE = 49408
- cfg.MODEL.TEXT_ENCODER.PROJ_NUM_LAYERS = 2
- cfg.MODEL.TEXT_ENCODER.N_CTX = 16
-
- # mask_former inference config
- cfg.MODEL.TEST = CN()
- cfg.MODEL.TEST.SEMANTIC_ON = True
- cfg.MODEL.TEST.INSTANCE_ON = False
- cfg.MODEL.TEST.PANOPTIC_ON = False
- cfg.MODEL.TEST.DETECTION_ON = False
- cfg.MODEL.TEST.OBJECT_MASK_THRESHOLD = 0.0
- cfg.MODEL.TEST.OVERLAP_THRESHOLD = 0.0
- cfg.MODEL.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False
- cfg.MODEL.TEST.TASK = "panoptic"
-
- # TEST AUG Slide
- cfg.TEST.AUG.IS_SLIDE = False
- cfg.TEST.AUG.CROP_SIZE = (640, 640)
- cfg.TEST.AUG.STRIDE = (426, 426)
- cfg.TEST.AUG.SCALE = (2048, 640)
- cfg.TEST.AUG.SETR_MULTI_SCALE = True
- cfg.TEST.AUG.KEEP_RATIO = True
- cfg.TEST.AUG.SIZE_DIVISOR = 32
-
- # pixel decoder config
- cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256
- # adding transformer in pixel decoder
- cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0
- # pixel decoder
- cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = "BasePixelDecoder"
- cfg.MODEL.SEM_SEG_HEAD.SEM_EMBED_DIM = 256
- cfg.MODEL.SEM_SEG_HEAD.INST_EMBED_DIM = 256
-
- # LSJ aug
- cfg.INPUT.IMAGE_SIZE = 1024
- cfg.INPUT.MIN_SCALE = 0.1
- cfg.INPUT.MAX_SCALE = 2.0
-
- # MSDeformAttn encoder configs
- cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = ["res3", "res4", "res5"]
- cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4
- cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8
-
-def add_oneformer_config(cfg):
- """
- Add config for ONE_FORMER.
- """
-
- # mask_former model config
- cfg.MODEL.ONE_FORMER = CN()
-
- # loss
- cfg.MODEL.ONE_FORMER.DEEP_SUPERVISION = True
- cfg.MODEL.ONE_FORMER.NO_OBJECT_WEIGHT = 0.1
- cfg.MODEL.ONE_FORMER.CLASS_WEIGHT = 1.0
- cfg.MODEL.ONE_FORMER.DICE_WEIGHT = 1.0
- cfg.MODEL.ONE_FORMER.MASK_WEIGHT = 20.0
- cfg.MODEL.ONE_FORMER.CONTRASTIVE_WEIGHT = 0.5
- cfg.MODEL.ONE_FORMER.CONTRASTIVE_TEMPERATURE = 0.07
-
- # transformer config
- cfg.MODEL.ONE_FORMER.NHEADS = 8
- cfg.MODEL.ONE_FORMER.DROPOUT = 0.1
- cfg.MODEL.ONE_FORMER.DIM_FEEDFORWARD = 2048
- cfg.MODEL.ONE_FORMER.ENC_LAYERS = 0
- cfg.MODEL.ONE_FORMER.CLASS_DEC_LAYERS = 2
- cfg.MODEL.ONE_FORMER.DEC_LAYERS = 6
- cfg.MODEL.ONE_FORMER.PRE_NORM = False
-
- cfg.MODEL.ONE_FORMER.HIDDEN_DIM = 256
- cfg.MODEL.ONE_FORMER.NUM_OBJECT_QUERIES = 120
- cfg.MODEL.ONE_FORMER.NUM_OBJECT_CTX = 16
- cfg.MODEL.ONE_FORMER.USE_TASK_NORM = True
-
- cfg.MODEL.ONE_FORMER.TRANSFORMER_IN_FEATURE = "res5"
- cfg.MODEL.ONE_FORMER.ENFORCE_INPUT_PROJ = False
-
- # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)
- # you can use this config to override
- cfg.MODEL.ONE_FORMER.SIZE_DIVISIBILITY = 32
-
- # transformer module
- cfg.MODEL.ONE_FORMER.TRANSFORMER_DECODER_NAME = "ContrastiveMultiScaleMaskedTransformerDecoder"
-
- # point loss configs
- # Number of points sampled during training for a mask point head.
- cfg.MODEL.ONE_FORMER.TRAIN_NUM_POINTS = 112 * 112
- # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the
- # original paper.
- cfg.MODEL.ONE_FORMER.OVERSAMPLE_RATIO = 3.0
- # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in
- # the original paper.
- cfg.MODEL.ONE_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75
-
-def add_swin_config(cfg):
- """
- Add config forSWIN Backbone.
- """
-
- # swin transformer backbone
- cfg.MODEL.SWIN = CN()
- cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224
- cfg.MODEL.SWIN.PATCH_SIZE = 4
- cfg.MODEL.SWIN.EMBED_DIM = 96
- cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]
- cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]
- cfg.MODEL.SWIN.WINDOW_SIZE = 7
- cfg.MODEL.SWIN.MLP_RATIO = 4.0
- cfg.MODEL.SWIN.QKV_BIAS = True
- cfg.MODEL.SWIN.QK_SCALE = None
- cfg.MODEL.SWIN.DROP_RATE = 0.0
- cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0
- cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3
- cfg.MODEL.SWIN.APE = False
- cfg.MODEL.SWIN.PATCH_NORM = True
- cfg.MODEL.SWIN.OUT_FEATURES = ["res2", "res3", "res4", "res5"]
- cfg.MODEL.SWIN.USE_CHECKPOINT = False
- ## Semask additions
- cfg.MODEL.SWIN.SEM_WINDOW_SIZE = 7
- cfg.MODEL.SWIN.NUM_SEM_BLOCKS = 1
-
-def add_dinat_config(cfg):
- """
- Add config for NAT Backbone.
- """
-
- # DINAT transformer backbone
- cfg.MODEL.DiNAT = CN()
- cfg.MODEL.DiNAT.DEPTHS = [3, 4, 18, 5]
- cfg.MODEL.DiNAT.OUT_FEATURES = ["res2", "res3", "res4", "res5"]
- cfg.MODEL.DiNAT.EMBED_DIM = 64
- cfg.MODEL.DiNAT.MLP_RATIO = 3.0
- cfg.MODEL.DiNAT.NUM_HEADS = [2, 4, 8, 16]
- cfg.MODEL.DiNAT.DROP_PATH_RATE = 0.2
- cfg.MODEL.DiNAT.KERNEL_SIZE = 7
- cfg.MODEL.DiNAT.DILATIONS = [[1, 16, 1], [1, 4, 1, 8], [1, 2, 1, 3, 1, 4], [1, 2, 1, 2, 1]]
- cfg.MODEL.DiNAT.OUT_INDICES = (0, 1, 2, 3)
- cfg.MODEL.DiNAT.QKV_BIAS = True
- cfg.MODEL.DiNAT.QK_SCALE = None
- cfg.MODEL.DiNAT.DROP_RATE = 0
- cfg.MODEL.DiNAT.ATTN_DROP_RATE = 0.
- cfg.MODEL.DiNAT.IN_PATCH_SIZE = 4
-
-def add_convnext_config(cfg):
- """
- Add config for ConvNeXt Backbone.
- """
-
- # swin transformer backbone
- cfg.MODEL.CONVNEXT = CN()
- cfg.MODEL.CONVNEXT.IN_CHANNELS = 3
- cfg.MODEL.CONVNEXT.DEPTHS = [3, 3, 27, 3]
- cfg.MODEL.CONVNEXT.DIMS = [192, 384, 768, 1536]
- cfg.MODEL.CONVNEXT.DROP_PATH_RATE = 0.4
- cfg.MODEL.CONVNEXT.LSIT = 1.0
- cfg.MODEL.CONVNEXT.OUT_INDICES = [0, 1, 2, 3]
- cfg.MODEL.CONVNEXT.OUT_FEATURES = ["res2", "res3", "res4", "res5"]
-
-def add_beit_adapter_config(cfg):
- """
- Add config for BEiT Adapter Backbone.
- """
-
- # beit adapter backbone
- cfg.MODEL.BEiTAdapter = CN()
- cfg.MODEL.BEiTAdapter.IMG_SIZE = 640
- cfg.MODEL.BEiTAdapter.PATCH_SIZE = 16
- cfg.MODEL.BEiTAdapter.EMBED_DIM = 1024
- cfg.MODEL.BEiTAdapter.DEPTH = 24
- cfg.MODEL.BEiTAdapter.NUM_HEADS = 16
- cfg.MODEL.BEiTAdapter.MLP_RATIO = 4
- cfg.MODEL.BEiTAdapter.QKV_BIAS = True
- cfg.MODEL.BEiTAdapter.USE_ABS_POS_EMB = False
- cfg.MODEL.BEiTAdapter.USE_REL_POS_BIAS = True
- cfg.MODEL.BEiTAdapter.INIT_VALUES = 1e-6
- cfg.MODEL.BEiTAdapter.DROP_PATH_RATE = 0.3
- cfg.MODEL.BEiTAdapter.CONV_INPLANE = 64
- cfg.MODEL.BEiTAdapter.N_POINTS = 4
- cfg.MODEL.BEiTAdapter.DEFORM_NUM_HEADS = 16
- cfg.MODEL.BEiTAdapter.CFFN_RATIO = 0.25
- cfg.MODEL.BEiTAdapter.DEFORM_RATIO = 0.5
- cfg.MODEL.BEiTAdapter.WITH_CP = True
- cfg.MODEL.BEiTAdapter.INTERACTION_INDEXES=[[0, 5], [6, 11], [12, 17], [18, 23]]
- cfg.MODEL.BEiTAdapter.OUT_FEATURES = ["res2", "res3", "res4", "res5"]
\ No newline at end of file
diff --git a/spaces/Lbin123/Lbingo/src/components/header.tsx b/spaces/Lbin123/Lbingo/src/components/header.tsx
deleted file mode 100644
index dc298b722154d1ac6d7a7e148204605562d6cc58..0000000000000000000000000000000000000000
--- a/spaces/Lbin123/Lbingo/src/components/header.tsx
+++ /dev/null
@@ -1,12 +0,0 @@
-import * as React from 'react'
-import { UserMenu } from './user-menu'
-
-export async function Header() {
- return (
-
-
-
-
-
- )
-}
diff --git a/spaces/LinoyTsaban/edit_friendly_ddpm_inversion/inversion_utils.py b/spaces/LinoyTsaban/edit_friendly_ddpm_inversion/inversion_utils.py
deleted file mode 100644
index 9cd6ca07dafddd9ec12736fa1607560cf618dd3d..0000000000000000000000000000000000000000
--- a/spaces/LinoyTsaban/edit_friendly_ddpm_inversion/inversion_utils.py
+++ /dev/null
@@ -1,295 +0,0 @@
-import torch
-import os
-from tqdm import tqdm
-from PIL import Image, ImageDraw ,ImageFont
-from matplotlib import pyplot as plt
-import torchvision.transforms as T
-import os
-import yaml
-import numpy as np
-import gradio as gr
-
-# This file was copied from the DDPM inversion Repo - https://github.com/inbarhub/DDPM_inversion #
-
-def load_512(image_path, left=0, right=0, top=0, bottom=0, device=None):
- if type(image_path) is str:
- image = np.array(Image.open(image_path).convert('RGB'))[:, :, :3]
- else:
- image = image_path
- h, w, c = image.shape
- left = min(left, w-1)
- right = min(right, w - left - 1)
- top = min(top, h - left - 1)
- bottom = min(bottom, h - top - 1)
- image = image[top:h-bottom, left:w-right]
- h, w, c = image.shape
- if h < w:
- offset = (w - h) // 2
- image = image[:, offset:offset + h]
- elif w < h:
- offset = (h - w) // 2
- image = image[offset:offset + w]
- image = np.array(Image.fromarray(image).resize((512, 512)))
- image = torch.from_numpy(image).float() / 127.5 - 1
- image = image.permute(2, 0, 1).unsqueeze(0).to(device)
-
- return image
-
-
-def load_real_image(folder = "data/", img_name = None, idx = 0, img_size=512, device='cuda'):
- from PIL import Image
- from glob import glob
- if img_name is not None:
- path = os.path.join(folder, img_name)
- else:
- path = glob(folder + "*")[idx]
-
- img = Image.open(path).resize((img_size,
- img_size))
-
- img = pil_to_tensor(img).to(device)
-
- if img.shape[1]== 4:
- img = img[:,:3,:,:]
- return img
-
-def mu_tilde(model, xt,x0, timestep):
- "mu_tilde(x_t, x_0) DDPM paper eq. 7"
- prev_timestep = timestep - model.scheduler.config.num_train_timesteps // model.scheduler.num_inference_steps
- alpha_prod_t_prev = model.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else model.scheduler.final_alpha_cumprod
- alpha_t = model.scheduler.alphas[timestep]
- beta_t = 1 - alpha_t
- alpha_bar = model.scheduler.alphas_cumprod[timestep]
- return ((alpha_prod_t_prev ** 0.5 * beta_t) / (1-alpha_bar)) * x0 + ((alpha_t**0.5 *(1-alpha_prod_t_prev)) / (1- alpha_bar))*xt
-
-def sample_xts_from_x0(model, x0, num_inference_steps=50):
- """
- Samples from P(x_1:T|x_0)
- """
- # torch.manual_seed(43256465436)
- alpha_bar = model.scheduler.alphas_cumprod
- sqrt_one_minus_alpha_bar = (1-alpha_bar) ** 0.5
- alphas = model.scheduler.alphas
- betas = 1 - alphas
- variance_noise_shape = (
- num_inference_steps,
- model.unet.in_channels,
- model.unet.sample_size,
- model.unet.sample_size)
-
- timesteps = model.scheduler.timesteps.to(model.device)
- t_to_idx = {int(v):k for k,v in enumerate(timesteps)}
- xts = torch.zeros(variance_noise_shape).to(x0.device)
- for t in reversed(timesteps):
- idx = t_to_idx[int(t)]
- xts[idx] = x0 * (alpha_bar[t] ** 0.5) + torch.randn_like(x0) * sqrt_one_minus_alpha_bar[t]
- xts = torch.cat([xts, x0 ],dim = 0)
-
- return xts
-
-def encode_text(model, prompts):
- text_input = model.tokenizer(
- prompts,
- padding="max_length",
- max_length=model.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
- with torch.no_grad():
- text_encoding = model.text_encoder(text_input.input_ids.to(model.device))[0]
- return text_encoding
-
-def forward_step(model, model_output, timestep, sample):
- next_timestep = min(model.scheduler.config.num_train_timesteps - 2,
- timestep + model.scheduler.config.num_train_timesteps // model.scheduler.num_inference_steps)
-
- # 2. compute alphas, betas
- alpha_prod_t = model.scheduler.alphas_cumprod[timestep]
- # alpha_prod_t_next = self.scheduler.alphas_cumprod[next_timestep] if next_ltimestep >= 0 else self.scheduler.final_alpha_cumprod
-
- beta_prod_t = 1 - alpha_prod_t
-
- # 3. compute predicted original sample from predicted noise also called
- # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
- pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
-
- # 5. TODO: simple noising implementatiom
- next_sample = model.scheduler.add_noise(pred_original_sample,
- model_output,
- torch.LongTensor([next_timestep]))
- return next_sample
-
-
-def get_variance(model, timestep): #, prev_timestep):
- prev_timestep = timestep - model.scheduler.config.num_train_timesteps // model.scheduler.num_inference_steps
- alpha_prod_t = model.scheduler.alphas_cumprod[timestep]
- alpha_prod_t_prev = model.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else model.scheduler.final_alpha_cumprod
- beta_prod_t = 1 - alpha_prod_t
- beta_prod_t_prev = 1 - alpha_prod_t_prev
- variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
- return variance
-
-def inversion_forward_process(model, x0,
- etas = None,
- prog_bar = False,
- prompt = "",
- cfg_scale = 3.5,
- num_inference_steps=50, eps = None
- ):
-
- if not prompt=="":
- text_embeddings = encode_text(model, prompt)
- uncond_embedding = encode_text(model, "")
- timesteps = model.scheduler.timesteps.to(model.device)
- variance_noise_shape = (
- num_inference_steps,
- model.unet.in_channels,
- model.unet.sample_size,
- model.unet.sample_size)
- if etas is None or (type(etas) in [int, float] and etas == 0):
- eta_is_zero = True
- zs = None
- else:
- eta_is_zero = False
- if type(etas) in [int, float]: etas = [etas]*model.scheduler.num_inference_steps
- xts = sample_xts_from_x0(model, x0, num_inference_steps=num_inference_steps)
- alpha_bar = model.scheduler.alphas_cumprod
- zs = torch.zeros(size=variance_noise_shape, device=model.device)
-
- t_to_idx = {int(v):k for k,v in enumerate(timesteps)}
- xt = x0
- op = tqdm(reversed(timesteps)) if prog_bar else reversed(timesteps)
-
- for t in op:
- idx = t_to_idx[int(t)]
- # 1. predict noise residual
- if not eta_is_zero:
- xt = xts[idx][None]
-
- with torch.no_grad():
- out = model.unet.forward(xt, timestep = t, encoder_hidden_states = uncond_embedding)
- if not prompt=="":
- cond_out = model.unet.forward(xt, timestep=t, encoder_hidden_states = text_embeddings)
-
- if not prompt=="":
- ## classifier free guidance
- noise_pred = out.sample + cfg_scale * (cond_out.sample - out.sample)
- else:
- noise_pred = out.sample
-
- if eta_is_zero:
- # 2. compute more noisy image and set x_t -> x_t+1
- xt = forward_step(model, noise_pred, t, xt)
-
- else:
- xtm1 = xts[idx+1][None]
- # pred of x0
- pred_original_sample = (xt - (1-alpha_bar[t]) ** 0.5 * noise_pred ) / alpha_bar[t] ** 0.5
-
- # direction to xt
- prev_timestep = t - model.scheduler.config.num_train_timesteps // model.scheduler.num_inference_steps
- alpha_prod_t_prev = model.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else model.scheduler.final_alpha_cumprod
-
- variance = get_variance(model, t)
- pred_sample_direction = (1 - alpha_prod_t_prev - etas[idx] * variance ) ** (0.5) * noise_pred
-
- mu_xt = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
-
- z = (xtm1 - mu_xt ) / ( etas[idx] * variance ** 0.5 )
- zs[idx] = z
-
- # correction to avoid error accumulation
- xtm1 = mu_xt + ( etas[idx] * variance ** 0.5 )*z
- xts[idx+1] = xtm1
-
- if not zs is None:
- zs[-1] = torch.zeros_like(zs[-1])
-
- return xt, zs, xts
-
-
-def reverse_step(model, model_output, timestep, sample, eta = 0, variance_noise=None):
- # 1. get previous step value (=t-1)
- prev_timestep = timestep - model.scheduler.config.num_train_timesteps // model.scheduler.num_inference_steps
- # 2. compute alphas, betas
- alpha_prod_t = model.scheduler.alphas_cumprod[timestep]
- alpha_prod_t_prev = model.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else model.scheduler.final_alpha_cumprod
- beta_prod_t = 1 - alpha_prod_t
- # 3. compute predicted original sample from predicted noise also called
- # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
- pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
- # 5. compute variance: "sigma_t(η)" -> see formula (16)
- # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
- # variance = self.scheduler._get_variance(timestep, prev_timestep)
- variance = get_variance(model, timestep) #, prev_timestep)
- std_dev_t = eta * variance ** (0.5)
- # Take care of asymetric reverse process (asyrp)
- model_output_direction = model_output
- # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
- # pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output_direction
- pred_sample_direction = (1 - alpha_prod_t_prev - eta * variance) ** (0.5) * model_output_direction
- # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
- prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
- # 8. Add noice if eta > 0
- if eta > 0:
- if variance_noise is None:
- variance_noise = torch.randn(model_output.shape, device=model.device)
- sigma_z = eta * variance ** (0.5) * variance_noise
- prev_sample = prev_sample + sigma_z
-
- return prev_sample
-
-def inversion_reverse_process(model,
- xT,
- etas = 0,
- prompts = "",
- cfg_scales = None,
- prog_bar = False,
- zs = None,
- controller=None,
- asyrp = False
- ):
-
- batch_size = len(prompts)
-
- cfg_scales_tensor = torch.Tensor(cfg_scales).view(-1,1,1,1).to(model.device)
-
- text_embeddings = encode_text(model, prompts)
- uncond_embedding = encode_text(model, [""] * batch_size)
-
- if etas is None: etas = 0
- if type(etas) in [int, float]: etas = [etas]*model.scheduler.num_inference_steps
- assert len(etas) == model.scheduler.num_inference_steps
- timesteps = model.scheduler.timesteps.to(model.device)
-
- xt = xT.expand(batch_size, -1, -1, -1)
- op = tqdm(timesteps[-zs.shape[0]:]) if prog_bar else timesteps[-zs.shape[0]:]
-
- t_to_idx = {int(v):k for k,v in enumerate(timesteps[-zs.shape[0]:])}
-
- for t in op:
- idx = t_to_idx[int(t)]
- ## Unconditional embedding
- with torch.no_grad():
- uncond_out = model.unet.forward(xt, timestep = t,
- encoder_hidden_states = uncond_embedding)
-
- ## Conditional embedding
- if prompts:
- with torch.no_grad():
- cond_out = model.unet.forward(xt, timestep = t,
- encoder_hidden_states = text_embeddings)
-
-
- z = zs[idx] if not zs is None else None
- z = z.expand(batch_size, -1, -1, -1)
- if prompts:
- ## classifier free guidance
- noise_pred = uncond_out.sample + cfg_scales_tensor * (cond_out.sample - uncond_out.sample)
- else:
- noise_pred = uncond_out.sample
- # 2. compute less noisy image and set x_t -> x_t-1
- xt = reverse_step(model, noise_pred, t, xt, eta = etas[idx], variance_noise = z)
- if controller is not None:
- xt = controller.step_callback(xt)
- return xt, zs
diff --git a/spaces/Lippppxy/AiAnimeVoice/text/symbols.py b/spaces/Lippppxy/AiAnimeVoice/text/symbols.py
deleted file mode 100644
index edfbd24247be8c757275ce80b9ec27a0ffa808f3..0000000000000000000000000000000000000000
--- a/spaces/Lippppxy/AiAnimeVoice/text/symbols.py
+++ /dev/null
@@ -1,39 +0,0 @@
-'''
-Defines the set of symbols used in text input to the model.
-'''
-
-'''# japanese_cleaners
-_pad = '_'
-_punctuation = ',.!?-'
-_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ '
-'''
-
-'''# japanese_cleaners2
-_pad = '_'
-_punctuation = ',.!?-~…'
-_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ '
-'''
-
-'''# korean_cleaners
-_pad = '_'
-_punctuation = ',.!?…~'
-_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ '
-'''
-
-'''# chinese_cleaners
-_pad = '_'
-_punctuation = ',。!?—…'
-_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ '
-'''
-
-# zh_ja_mixture_cleaners
-_pad = '_'
-_punctuation = ',.!?-~…'
-_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ '
-
-
-# Export all symbols:
-symbols = [_pad] + list(_punctuation) + list(_letters)
-
-# Special symbol ids
-SPACE_ID = symbols.index(" ")
\ No newline at end of file
diff --git a/spaces/MLVKU/Human_Object_Interaction/hotr/engine/evaluator_hico.py b/spaces/MLVKU/Human_Object_Interaction/hotr/engine/evaluator_hico.py
deleted file mode 100644
index 9d7771a9437bfd7effdccb69d7a447344ce19920..0000000000000000000000000000000000000000
--- a/spaces/MLVKU/Human_Object_Interaction/hotr/engine/evaluator_hico.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import math
-import os
-import sys
-from typing import Iterable
-import numpy as np
-import copy
-import itertools
-
-import torch
-
-import hotr.util.misc as utils
-import hotr.util.logger as loggers
-from hotr.data.evaluators.hico_eval import HICOEvaluator
-
-@torch.no_grad()
-def hico_evaluate(model, postprocessors, data_loader, device, thr, args=None):
- model.eval()
-
- metric_logger = loggers.MetricLogger(mode="test", delimiter=" ")
- header = 'Evaluation Inference (HICO-DET)'
-
- preds = []
- gts = []
- indices = []
- hoi_recognition_time = []
-
- for samples, targets in metric_logger.log_every(data_loader, 50, header):
- samples = samples.to(device)
- targets = [{k: (v.to(device) if k != 'id' else v) for k, v in t.items()} for t in targets]
-
- outputs = model(samples)
- orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
- results = postprocessors['hoi'](outputs, orig_target_sizes, threshold=thr, dataset='hico-det', args=args)
- hoi_recognition_time.append(results[0]['hoi_recognition_time'] * 1000)
-
- preds.extend(list(itertools.chain.from_iterable(utils.all_gather(results))))
- # For avoiding a runtime error, the copy is used
- gts.extend(list(itertools.chain.from_iterable(utils.all_gather(copy.deepcopy(targets)))))
-
- print(f"[stats] HOI Recognition Time (avg) : {sum(hoi_recognition_time)/len(hoi_recognition_time):.4f} ms")
-
- # gather the stats from all processes
- metric_logger.synchronize_between_processes()
-
- img_ids = [img_gts['id'] for img_gts in gts]
- _, indices = np.unique(img_ids, return_index=True)
- preds = [img_preds for i, img_preds in enumerate(preds) if i in indices]
- gts = [img_gts for i, img_gts in enumerate(gts) if i in indices]
-
- evaluator = HICOEvaluator(preds, gts, data_loader.dataset.rare_triplets,
- data_loader.dataset.non_rare_triplets, data_loader.dataset.correct_mat)
-
- stats = evaluator.evaluate()
-
- return stats
\ No newline at end of file
diff --git a/spaces/Marshalls/testmtd/feature_extraction/madmom/utils/stats.py b/spaces/Marshalls/testmtd/feature_extraction/madmom/utils/stats.py
deleted file mode 100644
index 953a473289923a1c265e189b917ad2c696ff88a7..0000000000000000000000000000000000000000
--- a/spaces/Marshalls/testmtd/feature_extraction/madmom/utils/stats.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# encoding: utf-8
-# pylint: disable=no-member
-# pylint: disable=invalid-name
-# pylint: disable=too-many-arguments
-"""
-This module contains some statistical functionality.
-
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import numpy as np
-
-
-def mcnemar_test(test_1, test_2, significance=0.01):
- """
- Perform McNemar's statistical test.
-
- Parameters
- ----------
- test_1 : numpy array
- Test 1 sample(s).
- test_2 : numpy array
- Test 2 sample(s).
- significance : float, optional
- Significance level.
-
- Returns
- -------
- significance : int
- Significance {-1, 0, +1}.
- p_value : float
- P-value.
-
- Notes
- -----
- Please see: http://en.wikipedia.org/wiki/McNemar%27s_test
-
- +-----------------+-----------------+-----------------+-----------+
- | | Test 2 positive | Test 2 negative | Row total |
- +-----------------+-----------------+-----------------+-----------+
- | Test 1 positive | a | b | a + b |
- | Test 1 negative | c | d | c + d |
- +-----------------+-----------------+-----------------+-----------+
- | Column total | a + c | b + d | n |
- +-----------------+-----------------+-----------------+-----------+
-
- """
- from scipy.stats import chi2
- # convert the tests to numpy arrays
- test_1 = np.asarray(test_1)
- test_2 = np.asarray(test_2)
- # both test must have the same length
- if not (test_1.size == test_2.size and test_1.shape == test_2.shape):
- raise ValueError("Both tests must have the same size and shape.")
- # calculate a, b, c, d
- # a = np.sum(test_1 * test_2)
- b = np.sum(test_1 > test_2)
- c = np.sum(test_1 < test_2)
- # d = np.sum(-test_1 * -test_2)
- # is the approximation ok?
- if b + c < 25:
- raise NotImplementedError("implement correct binomial distribution or "
- "use bigger sample sizes (b + c > 25)")
- # statistical test
- stat = (b - c) ** 2 / float(b + c)
- # test under chi square distribution
- p = chi2(1).sf(stat)
- # direction of significance
- sig = 0
- if p < significance:
- sig = 1 if b > c else -1
- return sig, p
diff --git a/spaces/MichaelT8093/Mandarin-TTS/text/symbols.py b/spaces/MichaelT8093/Mandarin-TTS/text/symbols.py
deleted file mode 100644
index 80fd41ea8ee57725ce0f76aa5347a3a1fdd0047d..0000000000000000000000000000000000000000
--- a/spaces/MichaelT8093/Mandarin-TTS/text/symbols.py
+++ /dev/null
@@ -1,71 +0,0 @@
-_pause = ["sil", "eos", "sp", "#0", "#1", "#2", "#3"]
-
-_initials = [
- "^",
- "b",
- "c",
- "ch",
- "d",
- "f",
- "g",
- "h",
- "j",
- "k",
- "l",
- "m",
- "n",
- "p",
- "q",
- "r",
- "s",
- "sh",
- "t",
- "x",
- "z",
- "zh",
-]
-
-_tones = ["1", "2", "3", "4", "5"]
-
-_finals = [
- "a",
- "ai",
- "an",
- "ang",
- "ao",
- "e",
- "ei",
- "en",
- "eng",
- "er",
- "i",
- "ia",
- "ian",
- "iang",
- "iao",
- "ie",
- "ii",
- "iii",
- "in",
- "ing",
- "iong",
- "iou",
- "o",
- "ong",
- "ou",
- "u",
- "ua",
- "uai",
- "uan",
- "uang",
- "uei",
- "uen",
- "ueng",
- "uo",
- "v",
- "van",
- "ve",
- "vn",
-]
-
-symbols = _pause + _initials + [i + j for i in _finals for j in _tones]
\ No newline at end of file
diff --git a/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/apps/eval_spaces.py b/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/apps/eval_spaces.py
deleted file mode 100644
index b0cf689d24f70d95aa0d491fd04987296802e492..0000000000000000000000000000000000000000
--- a/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/apps/eval_spaces.py
+++ /dev/null
@@ -1,138 +0,0 @@
-import sys
-import os
-
-sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
-ROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-
-import time
-import json
-import numpy as np
-import torch
-from torch.utils.data import DataLoader
-
-from lib.options import BaseOptions
-from lib.mesh_util import *
-from lib.sample_util import *
-from lib.train_util import *
-from lib.model import *
-
-from PIL import Image
-import torchvision.transforms as transforms
-
-import trimesh
-from datetime import datetime
-
-# get options
-opt = BaseOptions().parse()
-
-class Evaluator:
- def __init__(self, opt, projection_mode='orthogonal'):
- self.opt = opt
- self.load_size = self.opt.loadSize
- self.to_tensor = transforms.Compose([
- transforms.Resize(self.load_size),
- transforms.ToTensor(),
- transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
- ])
- # set cuda
- cuda = torch.device('cuda:%d' % opt.gpu_id) if torch.cuda.is_available() else torch.device('cpu')
- print("CUDDAAAAA ???", torch.cuda.get_device_name(0) if torch.cuda.is_available() else "NO ONLY CPU")
-
- # create net
- netG = HGPIFuNet(opt, projection_mode).to(device=cuda)
- print('Using Network: ', netG.name)
-
- if opt.load_netG_checkpoint_path:
- netG.load_state_dict(torch.load(opt.load_netG_checkpoint_path, map_location=cuda))
-
- if opt.load_netC_checkpoint_path is not None:
- print('loading for net C ...', opt.load_netC_checkpoint_path)
- netC = ResBlkPIFuNet(opt).to(device=cuda)
- netC.load_state_dict(torch.load(opt.load_netC_checkpoint_path, map_location=cuda))
- else:
- netC = None
-
- os.makedirs(opt.results_path, exist_ok=True)
- os.makedirs('%s/%s' % (opt.results_path, opt.name), exist_ok=True)
-
- opt_log = os.path.join(opt.results_path, opt.name, 'opt.txt')
- with open(opt_log, 'w') as outfile:
- outfile.write(json.dumps(vars(opt), indent=2))
-
- self.cuda = cuda
- self.netG = netG
- self.netC = netC
-
- def load_image(self, image_path, mask_path):
- # Name
- img_name = os.path.splitext(os.path.basename(image_path))[0]
- # Calib
- B_MIN = np.array([-1, -1, -1])
- B_MAX = np.array([1, 1, 1])
- projection_matrix = np.identity(4)
- projection_matrix[1, 1] = -1
- calib = torch.Tensor(projection_matrix).float()
- # Mask
- mask = Image.open(mask_path).convert('L')
- mask = transforms.Resize(self.load_size)(mask)
- mask = transforms.ToTensor()(mask).float()
- # image
- image = Image.open(image_path).convert('RGB')
- image = self.to_tensor(image)
- image = mask.expand_as(image) * image
- return {
- 'name': img_name,
- 'img': image.unsqueeze(0),
- 'calib': calib.unsqueeze(0),
- 'mask': mask.unsqueeze(0),
- 'b_min': B_MIN,
- 'b_max': B_MAX,
- }
-
- def eval(self, data, use_octree=False):
- '''
- Evaluate a data point
- :param data: a dict containing at least ['name'], ['image'], ['calib'], ['b_min'] and ['b_max'] tensors.
- :return:
- '''
- opt = self.opt
- with torch.no_grad():
- self.netG.eval()
- if self.netC:
- self.netC.eval()
- save_path = '%s/%s/result_%s.obj' % (opt.results_path, opt.name, data['name'])
- if self.netC:
- gen_mesh_color(opt, self.netG, self.netC, self.cuda, data, save_path, use_octree=use_octree)
- else:
- gen_mesh(opt, self.netG, self.cuda, data, save_path, use_octree=use_octree)
-
-
-if __name__ == '__main__':
- evaluator = Evaluator(opt)
-
- results_path = opt.results_path
- name = opt.name
- test_image_path = opt.img_path
- test_mask_path = test_image_path[:-4] +'_mask.png'
- test_img_name = os.path.splitext(os.path.basename(test_image_path))[0]
- print("test_image: ", test_image_path)
- print("test_mask: ", test_mask_path)
-
- try:
- time = datetime.now()
- print("evaluating" , time)
- data = evaluator.load_image(test_image_path, test_mask_path)
- evaluator.eval(data, False)
- print("done evaluating" , datetime.now() - time)
- except Exception as e:
- print("error:", e.args)
-
- try:
- mesh = trimesh.load(f'{results_path}/{name}/result_{test_img_name}.obj')
- mesh.apply_transform([[1, 0, 0, 0],
- [0, 1, 0, 0],
- [0, 0, -1, 0],
- [0, 0, 0, 1]])
- mesh.export(file_obj=f'{results_path}/{name}/result_{test_img_name}.glb')
- except Exception as e:
- print("error generating MESH", e)
diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/tasks/sentence_prediction_test.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/tasks/sentence_prediction_test.py
deleted file mode 100644
index 09419f54c4642f08ca37e2588103c45d0847b7bc..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/nlp/tasks/sentence_prediction_test.py
+++ /dev/null
@@ -1,163 +0,0 @@
-# Lint as: python3
-# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Tests for official.nlp.tasks.sentence_prediction."""
-import functools
-import os
-
-from absl.testing import parameterized
-import tensorflow as tf
-
-from official.nlp.bert import configs
-from official.nlp.bert import export_tfhub
-from official.nlp.configs import bert
-from official.nlp.configs import encoders
-from official.nlp.tasks import sentence_prediction
-
-
-class SentencePredictionTaskTest(tf.test.TestCase, parameterized.TestCase):
-
- def setUp(self):
- super(SentencePredictionTaskTest, self).setUp()
- self._train_data_config = bert.SentencePredictionDataConfig(
- input_path="dummy", seq_length=128, global_batch_size=1)
-
- def get_network_config(self, num_classes):
- return bert.BertPretrainerConfig(
- encoder=encoders.TransformerEncoderConfig(
- vocab_size=30522, num_layers=1),
- num_masked_tokens=0,
- cls_heads=[
- bert.ClsHeadConfig(
- inner_dim=10,
- num_classes=num_classes,
- name="sentence_prediction")
- ])
-
- def _run_task(self, config):
- task = sentence_prediction.SentencePredictionTask(config)
- model = task.build_model()
- metrics = task.build_metrics()
-
- strategy = tf.distribute.get_strategy()
- dataset = strategy.experimental_distribute_datasets_from_function(
- functools.partial(task.build_inputs, config.train_data))
-
- iterator = iter(dataset)
- optimizer = tf.keras.optimizers.SGD(lr=0.1)
- task.train_step(next(iterator), model, optimizer, metrics=metrics)
- task.validation_step(next(iterator), model, metrics=metrics)
-
- def test_task(self):
- config = sentence_prediction.SentencePredictionConfig(
- init_checkpoint=self.get_temp_dir(),
- network=self.get_network_config(2),
- train_data=self._train_data_config)
- task = sentence_prediction.SentencePredictionTask(config)
- model = task.build_model()
- metrics = task.build_metrics()
- dataset = task.build_inputs(config.train_data)
-
- iterator = iter(dataset)
- optimizer = tf.keras.optimizers.SGD(lr=0.1)
- task.train_step(next(iterator), model, optimizer, metrics=metrics)
- task.validation_step(next(iterator), model, metrics=metrics)
-
- # Saves a checkpoint.
- pretrain_cfg = bert.BertPretrainerConfig(
- encoder=encoders.TransformerEncoderConfig(
- vocab_size=30522, num_layers=1),
- num_masked_tokens=20,
- cls_heads=[
- bert.ClsHeadConfig(
- inner_dim=10, num_classes=3, name="next_sentence")
- ])
- pretrain_model = bert.instantiate_bertpretrainer_from_cfg(pretrain_cfg)
- ckpt = tf.train.Checkpoint(
- model=pretrain_model, **pretrain_model.checkpoint_items)
- ckpt.save(config.init_checkpoint)
- task.initialize(model)
-
- @parameterized.parameters(("matthews_corrcoef", 2),
- ("pearson_spearman_corr", 1))
- def test_np_metrics(self, metric_type, num_classes):
- config = sentence_prediction.SentencePredictionConfig(
- metric_type=metric_type,
- init_checkpoint=self.get_temp_dir(),
- network=self.get_network_config(num_classes),
- train_data=self._train_data_config)
- task = sentence_prediction.SentencePredictionTask(config)
- model = task.build_model()
- dataset = task.build_inputs(config.train_data)
-
- iterator = iter(dataset)
- strategy = tf.distribute.get_strategy()
- distributed_outputs = strategy.run(
- functools.partial(task.validation_step, model=model),
- args=(next(iterator),))
- outputs = tf.nest.map_structure(strategy.experimental_local_results,
- distributed_outputs)
- aggregated = task.aggregate_logs(step_outputs=outputs)
- aggregated = task.aggregate_logs(state=aggregated, step_outputs=outputs)
- self.assertIn(metric_type, task.reduce_aggregated_logs(aggregated))
-
- def test_task_with_fit(self):
- config = sentence_prediction.SentencePredictionConfig(
- network=self.get_network_config(2), train_data=self._train_data_config)
- task = sentence_prediction.SentencePredictionTask(config)
- model = task.build_model()
- model = task.compile_model(
- model,
- optimizer=tf.keras.optimizers.SGD(lr=0.1),
- train_step=task.train_step,
- metrics=task.build_metrics())
- dataset = task.build_inputs(config.train_data)
- logs = model.fit(dataset, epochs=1, steps_per_epoch=2)
- self.assertIn("loss", logs.history)
-
- def _export_bert_tfhub(self):
- bert_config = configs.BertConfig(
- vocab_size=30522,
- hidden_size=16,
- intermediate_size=32,
- max_position_embeddings=128,
- num_attention_heads=2,
- num_hidden_layers=1)
- _, encoder = export_tfhub.create_bert_model(bert_config)
- model_checkpoint_dir = os.path.join(self.get_temp_dir(), "checkpoint")
- checkpoint = tf.train.Checkpoint(model=encoder)
- checkpoint.save(os.path.join(model_checkpoint_dir, "test"))
- model_checkpoint_path = tf.train.latest_checkpoint(model_checkpoint_dir)
-
- vocab_file = os.path.join(self.get_temp_dir(), "uncased_vocab.txt")
- with tf.io.gfile.GFile(vocab_file, "w") as f:
- f.write("dummy content")
-
- hub_destination = os.path.join(self.get_temp_dir(), "hub")
- export_tfhub.export_bert_tfhub(bert_config, model_checkpoint_path,
- hub_destination, vocab_file)
- return hub_destination
-
- def test_task_with_hub(self):
- hub_module_url = self._export_bert_tfhub()
- config = sentence_prediction.SentencePredictionConfig(
- hub_module_url=hub_module_url,
- network=self.get_network_config(2),
- train_data=self._train_data_config)
- self._run_task(config)
-
-
-if __name__ == "__main__":
- tf.test.main()
diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/metrics.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/metrics.py
deleted file mode 100644
index 4bd6bba6e6862d643c6cb9bb9fb857b70b3cc00f..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/nlp/transformer/metrics.py
+++ /dev/null
@@ -1,183 +0,0 @@
-# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the 'License');
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an 'AS IS' BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Functions for calculating loss, accuracy, and other model metrics.
-
-Metrics:
- - Padded loss, accuracy, and negative log perplexity. Source:
- https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- - BLEU approximation. Source:
- https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- - ROUGE score. Source:
- https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
-"""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import functools
-
-import tensorflow as tf
-
-
-def _pad_tensors_to_same_length(x, y):
- """Pad x and y so that the results have the same length (second dimension)."""
- with tf.name_scope("pad_to_same_length"):
- x_length = tf.shape(x)[1]
- y_length = tf.shape(y)[1]
-
- max_length = tf.maximum(x_length, y_length)
-
- x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
- y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
- return x, y
-
-
-def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
- """Calculate cross entropy loss while ignoring padding.
-
- Args:
- logits: Tensor of size [batch_size, length_logits, vocab_size]
- labels: Tensor of size [batch_size, length_labels]
- smoothing: Label smoothing constant, used to determine the on and off values
- vocab_size: int size of the vocabulary
-
- Returns:
- Returns the cross entropy loss and weight tensors: float32 tensors with
- shape [batch_size, max(length_logits, length_labels)]
- """
- with tf.name_scope("loss"):
- logits, labels = _pad_tensors_to_same_length(logits, labels)
-
- # Calculate smoothing cross entropy
- with tf.name_scope("smoothing_cross_entropy"):
- confidence = 1.0 - smoothing
- low_confidence = (1.0 - confidence) / tf.cast(vocab_size - 1, tf.float32)
- soft_targets = tf.one_hot(
- tf.cast(labels, tf.int32),
- depth=vocab_size,
- on_value=confidence,
- off_value=low_confidence)
- xentropy = tf.nn.softmax_cross_entropy_with_logits(
- logits=logits, labels=soft_targets)
-
- # Calculate the best (lowest) possible value of cross entropy, and
- # subtract from the cross entropy loss.
- normalizing_constant = -(
- confidence * tf.math.log(confidence) +
- tf.cast(vocab_size - 1, tf.float32) * low_confidence *
- tf.math.log(low_confidence + 1e-20))
- xentropy -= normalizing_constant
-
- weights = tf.cast(tf.not_equal(labels, 0), tf.float32)
- return xentropy * weights, weights
-
-
-def padded_accuracy(logits, labels):
- """Percentage of times that predictions matches labels on non-0s."""
- with tf.name_scope("padded_accuracy"):
- logits, labels = _pad_tensors_to_same_length(logits, labels)
- weights = tf.cast(tf.not_equal(labels, 0), tf.float32)
- outputs = tf.cast(tf.argmax(logits, axis=-1), tf.int32)
- padded_labels = tf.cast(labels, tf.int32)
- return tf.cast(tf.equal(outputs, padded_labels), tf.float32), weights
-
-
-def padded_accuracy_topk(logits, labels, k):
- """Percentage of times that top-k predictions matches labels on non-0s."""
- with tf.name_scope("padded_accuracy_topk"):
- logits, labels = _pad_tensors_to_same_length(logits, labels)
- weights = tf.cast(tf.not_equal(labels, 0), tf.float32)
- effective_k = tf.minimum(k, tf.shape(logits)[-1])
- _, outputs = tf.nn.top_k(logits, k=effective_k)
- outputs = tf.cast(outputs, tf.int32)
- padded_labels = tf.cast(labels, tf.int32)
- padded_labels = tf.expand_dims(padded_labels, axis=-1)
- padded_labels += tf.zeros_like(outputs) # Pad to same shape.
- same = tf.cast(tf.equal(outputs, padded_labels), tf.float32)
- same_topk = tf.reduce_sum(same, axis=-1)
- return same_topk, weights
-
-
-def padded_accuracy_top5(logits, labels):
- return padded_accuracy_topk(logits, labels, 5)
-
-
-def padded_sequence_accuracy(logits, labels):
- """Percentage of times that predictions matches labels everywhere (non-0)."""
- with tf.name_scope("padded_sequence_accuracy"):
- logits, labels = _pad_tensors_to_same_length(logits, labels)
- weights = tf.cast(tf.not_equal(labels, 0), tf.float32)
- outputs = tf.cast(tf.argmax(logits, axis=-1), tf.int32)
- padded_labels = tf.cast(labels, tf.int32)
- not_correct = tf.cast(tf.not_equal(outputs, padded_labels),
- tf.float32) * weights
- axis = list(range(1, len(outputs.get_shape())))
- correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
- return correct_seq, tf.constant(1.0)
-
-
-def padded_neg_log_perplexity(logits, labels, vocab_size):
- """Average log-perplexity excluding padding 0s. No smoothing."""
- num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
- return -num, den
-
-
-class MetricLayer(tf.keras.layers.Layer):
- """Custom a layer of metrics for Transformer model."""
-
- def __init__(self, vocab_size):
- super(MetricLayer, self).__init__()
- self.vocab_size = vocab_size
- self.metric_mean_fns = []
-
- def build(self, input_shape):
- """"Builds metric layer."""
- neg_log_perplexity = functools.partial(
- padded_neg_log_perplexity, vocab_size=self.vocab_size)
- self.metric_mean_fns = [
- (tf.keras.metrics.Mean("accuracy"), padded_accuracy),
- (tf.keras.metrics.Mean("accuracy_top5"), padded_accuracy_top5),
- (tf.keras.metrics.Mean("accuracy_per_sequence"),
- padded_sequence_accuracy),
- (tf.keras.metrics.Mean("neg_log_perplexity"), neg_log_perplexity),
- ]
- super(MetricLayer, self).build(input_shape)
-
- def get_config(self):
- return {"vocab_size": self.vocab_size}
-
- def call(self, inputs):
- logits, targets = inputs[0], inputs[1]
- for mean, fn in self.metric_mean_fns:
- m = mean(*fn(logits, targets))
- self.add_metric(m)
- return logits
-
-
-def transformer_loss(logits, labels, smoothing, vocab_size):
- """Calculates total loss containing cross entropy with padding ignored.
-
- Args:
- logits: Tensor of size [batch_size, length_logits, vocab_size]
- labels: Tensor of size [batch_size, length_labels]
- smoothing: Label smoothing constant, used to determine the on and off values
- vocab_size: int size of the vocabulary
-
- Returns:
- A scalar float tensor for loss.
- """
- xentropy, weights = padded_cross_entropy_loss(logits, labels, smoothing,
- vocab_size)
- return tf.reduce_sum(xentropy) / tf.reduce_sum(weights)
diff --git a/spaces/Nadaal/dost5/app.py b/spaces/Nadaal/dost5/app.py
deleted file mode 100644
index 6225ee22f18dfa7654f7452e15e991aa1fb657e2..0000000000000000000000000000000000000000
--- a/spaces/Nadaal/dost5/app.py
+++ /dev/null
@@ -1,878 +0,0 @@
-import io
-import os
-import ssl
-from contextlib import closing
-from typing import Optional, Tuple
-import datetime
-
-import boto3
-import gradio as gr
-import requests
-
-# UNCOMMENT TO USE WHISPER
-import warnings
-import whisper
-
-from langchain import ConversationChain, LLMChain
-
-from langchain.agents import load_tools, initialize_agent
-from langchain.chains.conversation.memory import ConversationBufferMemory
-from langchain.llms import OpenAI
-from threading import Lock
-
-# Console to variable
-from io import StringIO
-import sys
-import re
-
-from openai.error import AuthenticationError, InvalidRequestError, RateLimitError
-
-# Pertains to Express-inator functionality
-from langchain.prompts import PromptTemplate
-
-from polly_utils import PollyVoiceData, NEURAL_ENGINE
-from azure_utils import AzureVoiceData
-
-# Pertains to question answering functionality
-from langchain.embeddings.openai import OpenAIEmbeddings
-from langchain.text_splitter import CharacterTextSplitter
-from langchain.vectorstores.faiss import FAISS
-from langchain.docstore.document import Document
-from langchain.chains.question_answering import load_qa_chain
-
-TOOLS_LIST = ['serpapi', 'wolfram-alpha', 'pal-math', 'pal-colored-objects'] #'google-search','news-api','tmdb-api','open-meteo-api'
-TOOLS_DEFAULT_LIST = ['serpapi', 'pal-math']
-BUG_FOUND_MSG = "Congratulations, you've found a bug in this application!"
-# AUTH_ERR_MSG = "Please paste your OpenAI key from openai.com to use this application. It is not necessary to hit a button or key after pasting it."
-AUTH_ERR_MSG = "Please paste your OpenAI key from openai.com to use this application. "
-MAX_TOKENS = 512
-
-LOOPING_TALKING_HEAD = "videos/Masahiro.mp4"
-TALKING_HEAD_WIDTH = "192"
-MAX_TALKING_HEAD_TEXT_LENGTH = 155
-
-# Pertains to Express-inator functionality
-NUM_WORDS_DEFAULT = 0
-MAX_WORDS = 400
-FORMALITY_DEFAULT = "N/A"
-TEMPERATURE_DEFAULT = 0.5
-EMOTION_DEFAULT = "N/A"
-LANG_LEVEL_DEFAULT = "N/A"
-TRANSLATE_TO_DEFAULT = "N/A"
-LITERARY_STYLE_DEFAULT = "N/A"
-PROMPT_TEMPLATE = PromptTemplate(
- input_variables=["original_words", "num_words", "formality", "emotions", "lang_level", "translate_to",
- "literary_style"],
- template="Restate {num_words}{formality}{emotions}{lang_level}{translate_to}{literary_style}the following: \n{original_words}\n",
-)
-
-POLLY_VOICE_DATA = PollyVoiceData()
-AZURE_VOICE_DATA = AzureVoiceData()
-
-# Pertains to WHISPER functionality
-WHISPER_DETECT_LANG = "Detect language"
-
-
-# UNCOMMENT TO USE WHISPER
-warnings.filterwarnings("ignore")
-WHISPER_MODEL = whisper.load_model("tiny")
-print("WHISPER_MODEL", WHISPER_MODEL)
-
-
-# UNCOMMENT TO USE WHISPER
-def transcribe(aud_inp, whisper_lang):
- if aud_inp is None:
- return ""
- aud = whisper.load_audio(aud_inp)
- aud = whisper.pad_or_trim(aud)
- mel = whisper.log_mel_spectrogram(aud).to(WHISPER_MODEL.device)
- _, probs = WHISPER_MODEL.detect_language(mel)
- options = whisper.DecodingOptions()
- if whisper_lang != WHISPER_DETECT_LANG:
- whisper_lang_code = POLLY_VOICE_DATA.get_whisper_lang_code(whisper_lang)
- options = whisper.DecodingOptions(language=whisper_lang_code)
- result = whisper.decode(WHISPER_MODEL, mel, options)
- print("result.text", result.text)
- result_text = ""
- if result and result.text:
- result_text = result.text
- return result_text
-
-
-# Temporarily address Wolfram Alpha SSL certificate issue
-ssl._create_default_https_context = ssl._create_unverified_context
-
-
-# TEMPORARY FOR TESTING
-def transcribe_dummy(aud_inp_tb, whisper_lang):
- if aud_inp_tb is None:
- return ""
- # aud = whisper.load_audio(aud_inp)
- # aud = whisper.pad_or_trim(aud)
- # mel = whisper.log_mel_spectrogram(aud).to(WHISPER_MODEL.device)
- # _, probs = WHISPER_MODEL.detect_language(mel)
- # options = whisper.DecodingOptions()
- # options = whisper.DecodingOptions(language="ja")
- # result = whisper.decode(WHISPER_MODEL, mel, options)
- result_text = "Whisper will detect language"
- if whisper_lang != WHISPER_DETECT_LANG:
- whisper_lang_code = POLLY_VOICE_DATA.get_whisper_lang_code(whisper_lang)
- result_text = f"Whisper will use lang code: {whisper_lang_code}"
- print("result_text", result_text)
- return aud_inp_tb
-
-
-# Pertains to Express-inator functionality
-def transform_text(desc, express_chain, num_words, formality,
- anticipation_level, joy_level, trust_level,
- fear_level, surprise_level, sadness_level, disgust_level, anger_level,
- lang_level, translate_to, literary_style):
- num_words_prompt = ""
- if num_words and int(num_words) != 0:
- num_words_prompt = "using up to " + str(num_words) + " words, "
-
- # Change some arguments to lower case
- formality = formality.lower()
- anticipation_level = anticipation_level.lower()
- joy_level = joy_level.lower()
- trust_level = trust_level.lower()
- fear_level = fear_level.lower()
- surprise_level = surprise_level.lower()
- sadness_level = sadness_level.lower()
- disgust_level = disgust_level.lower()
- anger_level = anger_level.lower()
-
- formality_str = ""
- if formality != "n/a":
- formality_str = "in a " + formality + " manner, "
-
- # put all emotions into a list
- emotions = []
- if anticipation_level != "n/a":
- emotions.append(anticipation_level)
- if joy_level != "n/a":
- emotions.append(joy_level)
- if trust_level != "n/a":
- emotions.append(trust_level)
- if fear_level != "n/a":
- emotions.append(fear_level)
- if surprise_level != "n/a":
- emotions.append(surprise_level)
- if sadness_level != "n/a":
- emotions.append(sadness_level)
- if disgust_level != "n/a":
- emotions.append(disgust_level)
- if anger_level != "n/a":
- emotions.append(anger_level)
-
- emotions_str = ""
- if len(emotions) > 0:
- if len(emotions) == 1:
- emotions_str = "with emotion of " + emotions[0] + ", "
- else:
- emotions_str = "with emotions of " + ", ".join(emotions[:-1]) + " and " + emotions[-1] + ", "
-
- lang_level_str = ""
- if lang_level != LANG_LEVEL_DEFAULT:
- lang_level_str = "at a level that a person in " + lang_level + " can easily comprehend, " if translate_to == TRANSLATE_TO_DEFAULT else ""
-
- translate_to_str = ""
- if translate_to != TRANSLATE_TO_DEFAULT:
- translate_to_str = "translated to " + translate_to + (
- "" if lang_level == TRANSLATE_TO_DEFAULT else " at a level that a person in " + lang_level + " can easily comprehend") + ", "
-
- literary_style_str = ""
- if literary_style != LITERARY_STYLE_DEFAULT:
- if literary_style == "Prose":
- literary_style_str = "as prose, "
- if literary_style == "Story":
- literary_style_str = "as a story, "
- elif literary_style == "Summary":
- literary_style_str = "as a summary, "
- elif literary_style == "Outline":
- literary_style_str = "as an outline numbers and lower case letters, "
- elif literary_style == "Bullets":
- literary_style_str = "as bullet points using bullets, "
- elif literary_style == "Poetry":
- literary_style_str = "as a poem, "
- elif literary_style == "Haiku":
- literary_style_str = "as a haiku, "
- elif literary_style == "Limerick":
- literary_style_str = "as a limerick, "
- elif literary_style == "Rap":
- literary_style_str = "as a rap, "
- elif literary_style == "Joke":
- literary_style_str = "as a very funny joke with a setup and punchline, "
- elif literary_style == "Knock-knock":
- literary_style_str = "as a very funny knock-knock joke, "
- elif literary_style == "FAQ":
- literary_style_str = "as a FAQ with several questions and answers, "
-
- formatted_prompt = PROMPT_TEMPLATE.format(
- original_words=desc,
- num_words=num_words_prompt,
- formality=formality_str,
- emotions=emotions_str,
- lang_level=lang_level_str,
- translate_to=translate_to_str,
- literary_style=literary_style_str
- )
-
- trans_instr = num_words_prompt + formality_str + emotions_str + lang_level_str + translate_to_str + literary_style_str
- if express_chain and len(trans_instr.strip()) > 0:
- generated_text = express_chain.run(
- {'original_words': desc, 'num_words': num_words_prompt, 'formality': formality_str,
- 'emotions': emotions_str, 'lang_level': lang_level_str, 'translate_to': translate_to_str,
- 'literary_style': literary_style_str}).strip()
- else:
- print("Not transforming text")
- generated_text = desc
-
- # replace all newlines with in generated_text
- generated_text = generated_text.replace("\n", "\n\n")
-
- prompt_plus_generated = "GPT prompt: " + formatted_prompt + "\n\n" + generated_text
-
- print("\n==== date/time: " + str(datetime.datetime.now() - datetime.timedelta(hours=5)) + " ====")
- print("prompt_plus_generated: " + prompt_plus_generated)
-
- return generated_text
-
-
-def load_chain(tools_list, llm):
- chain = None
- express_chain = None
- memory = None
- if llm:
- print("\ntools_list", tools_list)
- tool_names = tools_list
- tools = load_tools(tool_names, llm=llm, news_api_key=news_api_key, tmdb_bearer_token=tmdb_bearer_token)
-
- memory = ConversationBufferMemory(memory_key="chat_history")
-
- chain = initialize_agent(tools, llm, agent="conversational-react-description", verbose=True, memory=memory)
- express_chain = LLMChain(llm=llm, prompt=PROMPT_TEMPLATE, verbose=True)
- return chain, express_chain, memory
-
-
-def set_openai_api_key(api_key):
- """Set the api key and return chain.
- If no api_key, then None is returned.
- """
- if api_key and api_key.startswith("sk-") and len(api_key) > 50:
- os.environ["OPENAI_API_KEY"] = api_key
- print("\n\n ++++++++++++++ Setting OpenAI API key ++++++++++++++ \n\n")
- print(str(datetime.datetime.now()) + ": Before OpenAI, OPENAI_API_KEY length: " + str(
- len(os.environ["OPENAI_API_KEY"])))
- llm = OpenAI(temperature=0, max_tokens=MAX_TOKENS)
- print(str(datetime.datetime.now()) + ": After OpenAI, OPENAI_API_KEY length: " + str(
- len(os.environ["OPENAI_API_KEY"])))
- chain, express_chain, memory = load_chain(TOOLS_DEFAULT_LIST, llm)
-
- # Pertains to question answering functionality
- embeddings = OpenAIEmbeddings()
- qa_chain = load_qa_chain(OpenAI(temperature=0), chain_type="stuff")
-
- print(str(datetime.datetime.now()) + ": After load_chain, OPENAI_API_KEY length: " + str(
- len(os.environ["OPENAI_API_KEY"])))
- os.environ["OPENAI_API_KEY"] = ""
- return chain, express_chain, llm, embeddings, qa_chain, memory
- return None, None, None, None, None, None
-
-
-def run_chain(chain, inp, capture_hidden_text):
- output = ""
- hidden_text = None
- if capture_hidden_text:
- error_msg = None
- tmp = sys.stdout
- hidden_text_io = StringIO()
- sys.stdout = hidden_text_io
-
- try:
- output = chain.run(input=inp)
- except AuthenticationError as ae:
- error_msg = AUTH_ERR_MSG + str(datetime.datetime.now()) + ". " + str(ae)
- print("error_msg", error_msg)
- except RateLimitError as rle:
- error_msg = "\n\nRateLimitError: " + str(rle)
- except ValueError as ve:
- error_msg = "\n\nValueError: " + str(ve)
- except InvalidRequestError as ire:
- error_msg = "\n\nInvalidRequestError: " + str(ire)
- except Exception as e:
- error_msg = "\n\n" + BUG_FOUND_MSG + ":\n\n" + str(e)
-
- sys.stdout = tmp
- hidden_text = hidden_text_io.getvalue()
-
- # remove escape characters from hidden_text
- hidden_text = re.sub(r'\x1b[^m]*m', '', hidden_text)
-
- # remove "Entering new AgentExecutor chain..." from hidden_text
- hidden_text = re.sub(r"Entering new AgentExecutor chain...\n", "", hidden_text)
-
- # remove "Finished chain." from hidden_text
- hidden_text = re.sub(r"Finished chain.", "", hidden_text)
-
- # Add newline after "Thought:" "Action:" "Observation:" "Input:" and "AI:"
- hidden_text = re.sub(r"Thought:", "\n\nThought:", hidden_text)
- hidden_text = re.sub(r"Action:", "\n\nAction:", hidden_text)
- hidden_text = re.sub(r"Observation:", "\n\nObservation:", hidden_text)
- hidden_text = re.sub(r"Input:", "\n\nInput:", hidden_text)
- hidden_text = re.sub(r"AI:", "\n\nAI:", hidden_text)
-
- if error_msg:
- hidden_text += error_msg
-
- print("hidden_text: ", hidden_text)
- else:
- try:
- output = chain.run(input=inp)
- except AuthenticationError as ae:
- output = AUTH_ERR_MSG + str(datetime.datetime.now()) + ". " + str(ae)
- print("output", output)
- except RateLimitError as rle:
- output = "\n\nRateLimitError: " + str(rle)
- except ValueError as ve:
- output = "\n\nValueError: " + str(ve)
- except InvalidRequestError as ire:
- output = "\n\nInvalidRequestError: " + str(ire)
- except Exception as e:
- output = "\n\n" + BUG_FOUND_MSG + ":\n\n" + str(e)
-
- return output, hidden_text
-
-
-def reset_memory(history, memory):
- memory.clear()
- history = []
- return history, history, memory
-
-
-class ChatWrapper:
-
- def __init__(self):
- self.lock = Lock()
-
- def __call__(
- self, api_key: str, inp: str, history: Optional[Tuple[str, str]], chain: Optional[ConversationChain],
- trace_chain: bool, speak_text: bool, talking_head: bool, monologue: bool, express_chain: Optional[LLMChain],
- num_words, formality, anticipation_level, joy_level, trust_level,
- fear_level, surprise_level, sadness_level, disgust_level, anger_level,
- lang_level, translate_to, literary_style, qa_chain, docsearch, use_embeddings
- ):
- """Execute the chat functionality."""
- self.lock.acquire()
- try:
- print("\n==== date/time: " + str(datetime.datetime.now()) + " ====")
- print("inp: " + inp)
- print("trace_chain: ", trace_chain)
- print("speak_text: ", speak_text)
- print("talking_head: ", talking_head)
- print("monologue: ", monologue)
- history = history or []
- # If chain is None, that is because no API key was provided.
- output = "Please paste your OpenAI key from openai.com to use this app. " + str(datetime.datetime.now())
- hidden_text = output
-
- if chain:
- # Set OpenAI key
- import openai
- openai.api_key = api_key
- if not monologue:
- if use_embeddings:
- if inp and inp.strip() != "":
- if docsearch:
- docs = docsearch.similarity_search(inp)
- output = str(qa_chain.run(input_documents=docs, question=inp))
- else:
- output, hidden_text = "Please supply some text in the the Embeddings tab.", None
- else:
- output, hidden_text = "What's on your mind?", None
- else:
- output, hidden_text = run_chain(chain, inp, capture_hidden_text=trace_chain)
- else:
- output, hidden_text = inp, None
-
- output = transform_text(output, express_chain, num_words, formality, anticipation_level, joy_level,
- trust_level,
- fear_level, surprise_level, sadness_level, disgust_level, anger_level,
- lang_level, translate_to, literary_style)
-
- text_to_display = output
- if trace_chain:
- text_to_display = hidden_text + "\n\n" + output
- history.append((inp, text_to_display))
-
- html_video, temp_file, html_audio, temp_aud_file = None, None, None, None
- if speak_text:
- if talking_head:
- if len(output) <= MAX_TALKING_HEAD_TEXT_LENGTH:
- html_video, temp_file = do_html_video_speak(output, translate_to)
- else:
- temp_file = LOOPING_TALKING_HEAD
- html_video = create_html_video(temp_file, TALKING_HEAD_WIDTH)
- html_audio, temp_aud_file = do_html_audio_speak(output, translate_to)
- else:
- html_audio, temp_aud_file = do_html_audio_speak(output, translate_to)
- else:
- if talking_head:
- temp_file = LOOPING_TALKING_HEAD
- html_video = create_html_video(temp_file, TALKING_HEAD_WIDTH)
- else:
- # html_audio, temp_aud_file = do_html_audio_speak(output, translate_to)
- # html_video = create_html_video(temp_file, "128")
- pass
-
- except Exception as e:
- raise e
- finally:
- self.lock.release()
- return history, history, html_video, temp_file, html_audio, temp_aud_file, ""
- # return history, history, html_audio, temp_aud_file, ""
-
-
-chat = ChatWrapper()
-
-
-def do_html_audio_speak(words_to_speak, polly_language):
- polly_client = boto3.Session(
- aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"],
- aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"],
- region_name=os.environ["AWS_DEFAULT_REGION"]
- ).client('polly')
-
- # voice_id, language_code, engine = POLLY_VOICE_DATA.get_voice(polly_language, "Female")
- voice_id, language_code, engine = POLLY_VOICE_DATA.get_voice(polly_language, "Male")
- if not voice_id:
- # voice_id = "Joanna"
- voice_id = "Matthew"
- language_code = "en-US"
- engine = NEURAL_ENGINE
- response = polly_client.synthesize_speech(
- Text=words_to_speak,
- OutputFormat='mp3',
- VoiceId=voice_id,
- LanguageCode=language_code,
- Engine=engine
- )
-
- html_audio = '
no audio
'
-
- # Save the audio stream returned by Amazon Polly on Lambda's temp directory
- if "AudioStream" in response:
- with closing(response["AudioStream"]) as stream:
- # output = os.path.join("/tmp/", "speech.mp3")
-
- try:
- with open('audios/tempfile.mp3', 'wb') as f:
- f.write(stream.read())
- temp_aud_file = gr.File("audios/tempfile.mp3")
- temp_aud_file_url = "/file=" + temp_aud_file.value['name']
- html_audio = f''
- except IOError as error:
- # Could not write to file, exit gracefully
- print(error)
- return None, None
- else:
- # The response didn't contain audio data, exit gracefully
- print("Could not stream audio")
- return None, None
-
- return html_audio, "audios/tempfile.mp3"
-
-
-def create_html_video(file_name, width):
- temp_file_url = "/file=" + tmp_file.value['name']
- html_video = f''
- return html_video
-
-
-def do_html_video_speak(words_to_speak, azure_language):
- azure_voice = AZURE_VOICE_DATA.get_voice(azure_language, "Male")
- if not azure_voice:
- azure_voice = "en-US-ChristopherNeural"
-
- headers = {"Authorization": f"Bearer {os.environ['EXHUMAN_API_KEY']}"}
- body = {
- 'bot_name': 'Masahiro',
- 'bot_response': words_to_speak,
- 'azure_voice': azure_voice,
- 'azure_style': 'friendly',
- 'animation_pipeline': 'high_speed',
- }
- api_endpoint = "https://api.exh.ai/animations/v1/generate_lipsync"
- res = requests.post(api_endpoint, json=body, headers=headers)
- print("res.status_code: ", res.status_code)
-
- html_video = '
""")
-
- openai_api_key_textbox = gr.Textbox(placeholder="Paste your OpenAI API key (sk-...) and hit Enter",
- show_label=False, lines=1, type='password')
-
- with gr.Row():
- with gr.Column(scale=1, min_width=TALKING_HEAD_WIDTH, visible=True):
- speak_text_cb = gr.Checkbox(label="Enable speech", value=False)
- speak_text_cb.change(update_foo, inputs=[speak_text_cb, speak_text_state],
- outputs=[speak_text_state])
-
- my_file = gr.File(label="Upload a file", type="file", visible=False)
- tmp_file = gr.File(LOOPING_TALKING_HEAD, visible=False)
- # tmp_file_url = "/file=" + tmp_file.value['name']
- htm_video = create_html_video(LOOPING_TALKING_HEAD, TALKING_HEAD_WIDTH)
- video_html = gr.HTML(htm_video)
-
- # my_aud_file = gr.File(label="Audio file", type="file", visible=True)
- tmp_aud_file = gr.File("audios/tempfile.mp3", visible=False)
- tmp_aud_file_url = "/file=" + tmp_aud_file.value['name']
- htm_audio = f''
- audio_html = gr.HTML(htm_audio)
-
- with gr.Column(scale=7):
- chatbot = gr.Chatbot()
-
- with gr.Row():
- message = gr.Textbox(label="What's on your mind??",
- placeholder="What's the answer to life, the universe, and everything?",
- lines=1)
- submit = gr.Button(value="Send", variant="secondary").style(full_width=False)
-
- # UNCOMMENT TO USE WHISPER
- with gr.Row():
- audio_comp = gr.Microphone(source="microphone", type="filepath", label="Just say it!",
- interactive=True, streaming=False)
- audio_comp.change(transcribe, inputs=[audio_comp, whisper_lang_state], outputs=[message])
-
- # TEMPORARY FOR TESTING
- # with gr.Row():
- # audio_comp_tb = gr.Textbox(label="Just say it!", lines=1)
- # audio_comp_tb.submit(transcribe_dummy, inputs=[audio_comp_tb, whisper_lang_state], outputs=[message])
-
- gr.Examples(
- examples=["How many people live in Canada?",
- "What is 2 to the 30th power?",
- "If x+y=10 and x-y=4, what are x and y?",
- "How much did it rain in SF today?",
- "Get me information about the movie 'Avatar'",
- "What are the top tech headlines in the US?",
- "On the desk, you see two blue booklets, two purple booklets, and two yellow pairs of sunglasses - "
- "if I remove all the pairs of sunglasses from the desk, how many purple items remain on it?"],
- inputs=message
- )
-
- with gr.Tab("Settings"):
- tools_cb_group = gr.CheckboxGroup(label="Tools:", choices=TOOLS_LIST,
- value=TOOLS_DEFAULT_LIST)
- tools_cb_group.change(update_selected_tools,
- inputs=[tools_cb_group, tools_list_state, llm_state],
- outputs=[tools_list_state, llm_state, chain_state, express_chain_state])
-
- trace_chain_cb = gr.Checkbox(label="Show reasoning chain in chat bubble", value=False)
- trace_chain_cb.change(update_foo, inputs=[trace_chain_cb, trace_chain_state],
- outputs=[trace_chain_state])
-
- # speak_text_cb = gr.Checkbox(label="Speak text from agent", value=False)
- # speak_text_cb.change(update_foo, inputs=[speak_text_cb, speak_text_state],
- # outputs=[speak_text_state])
-
- talking_head_cb = gr.Checkbox(label="Show talking head", value=True)
- talking_head_cb.change(update_talking_head, inputs=[talking_head_cb, talking_head_state],
- outputs=[talking_head_state, video_html])
-
- monologue_cb = gr.Checkbox(label="Babel fish mode (translate/restate what you enter, no conversational agent)",
- value=False)
- monologue_cb.change(update_foo, inputs=[monologue_cb, monologue_state],
- outputs=[monologue_state])
-
- reset_btn = gr.Button(value="Reset chat", variant="secondary").style(full_width=False)
- reset_btn.click(reset_memory, inputs=[history_state, memory_state], outputs=[chatbot, history_state, memory_state])
-
- with gr.Tab("Whisper STT"):
- whisper_lang_radio = gr.Radio(label="Whisper speech-to-text language:", choices=[
- WHISPER_DETECT_LANG, "Arabic", "Arabic (Gulf)", "Catalan", "Chinese (Cantonese)", "Chinese (Mandarin)",
- "Danish", "Dutch", "English (Australian)", "English (British)", "English (Indian)", "English (New Zealand)",
- "English (South African)", "English (US)", "English (Welsh)", "Finnish", "French", "French (Canadian)",
- "German", "German (Austrian)", "Georgian", "Hindi", "Icelandic", "Indonesian", "Italian", "Japanese",
- "Korean", "Norwegian", "Polish",
- "Portuguese (Brazilian)", "Portuguese (European)", "Romanian", "Russian", "Spanish (European)",
- "Spanish (Mexican)", "Spanish (US)", "Swedish", "Turkish", "Ukrainian", "Welsh"],
- value=WHISPER_DETECT_LANG)
-
- whisper_lang_radio.change(update_foo,
- inputs=[whisper_lang_radio, whisper_lang_state],
- outputs=[whisper_lang_state])
-
- with gr.Tab("Translate to"):
- lang_level_radio = gr.Radio(label="Language level:", choices=[
- LANG_LEVEL_DEFAULT, "1st grade", "2nd grade", "3rd grade", "4th grade", "5th grade", "6th grade",
- "7th grade", "8th grade", "9th grade", "10th grade", "11th grade", "12th grade", "University"],
- value=LANG_LEVEL_DEFAULT)
- lang_level_radio.change(update_foo, inputs=[lang_level_radio, lang_level_state],
- outputs=[lang_level_state])
-
- translate_to_radio = gr.Radio(label="Language:", choices=[
- TRANSLATE_TO_DEFAULT, "Arabic", "Arabic (Gulf)", "Catalan", "Chinese (Cantonese)", "Chinese (Mandarin)",
- "Danish", "Dutch", "English (Australian)", "English (British)", "English (Indian)", "English (New Zealand)",
- "English (South African)", "English (US)", "English (Welsh)", "Finnish", "French", "French (Canadian)",
- "German", "German (Austrian)", "Georgian", "Hindi", "Icelandic", "Indonesian", "Italian", "Japanese",
- "Korean", "Norwegian", "Polish",
- "Portuguese (Brazilian)", "Portuguese (European)", "Romanian", "Russian", "Spanish (European)",
- "Spanish (Mexican)", "Spanish (US)", "Swedish", "Turkish", "Ukrainian", "Welsh",
- "emojis", "Gen Z slang", "how the stereotypical Karen would say it", "Klingon", "Neanderthal",
- "Pirate", "Strange Planet expospeak technical talk", "Yoda"],
- value=TRANSLATE_TO_DEFAULT)
-
- translate_to_radio.change(update_foo,
- inputs=[translate_to_radio, translate_to_state],
- outputs=[translate_to_state])
-
- with gr.Tab("Formality"):
- formality_radio = gr.Radio(label="Formality:",
- choices=[FORMALITY_DEFAULT, "Casual", "Polite", "Honorific"],
- value=FORMALITY_DEFAULT)
- formality_radio.change(update_foo,
- inputs=[formality_radio, formality_state],
- outputs=[formality_state])
-
- with gr.Tab("Lit style"):
- literary_style_radio = gr.Radio(label="Literary style:", choices=[
- LITERARY_STYLE_DEFAULT, "Prose", "Story", "Summary", "Outline", "Bullets", "Poetry", "Haiku", "Limerick", "Rap",
- "Joke", "Knock-knock", "FAQ"],
- value=LITERARY_STYLE_DEFAULT)
-
- literary_style_radio.change(update_foo,
- inputs=[literary_style_radio, literary_style_state],
- outputs=[literary_style_state])
-
- with gr.Tab("Emotions"):
- anticipation_level_radio = gr.Radio(label="Anticipation level:",
- choices=[EMOTION_DEFAULT, "Interest", "Anticipation", "Vigilance"],
- value=EMOTION_DEFAULT)
- anticipation_level_radio.change(update_foo,
- inputs=[anticipation_level_radio, anticipation_level_state],
- outputs=[anticipation_level_state])
-
- joy_level_radio = gr.Radio(label="Joy level:",
- choices=[EMOTION_DEFAULT, "Serenity", "Joy", "Ecstasy"],
- value=EMOTION_DEFAULT)
- joy_level_radio.change(update_foo,
- inputs=[joy_level_radio, joy_level_state],
- outputs=[joy_level_state])
-
- trust_level_radio = gr.Radio(label="Trust level:",
- choices=[EMOTION_DEFAULT, "Acceptance", "Trust", "Admiration"],
- value=EMOTION_DEFAULT)
- trust_level_radio.change(update_foo,
- inputs=[trust_level_radio, trust_level_state],
- outputs=[trust_level_state])
-
- fear_level_radio = gr.Radio(label="Fear level:",
- choices=[EMOTION_DEFAULT, "Apprehension", "Fear", "Terror"],
- value=EMOTION_DEFAULT)
- fear_level_radio.change(update_foo,
- inputs=[fear_level_radio, fear_level_state],
- outputs=[fear_level_state])
-
- surprise_level_radio = gr.Radio(label="Surprise level:",
- choices=[EMOTION_DEFAULT, "Distraction", "Surprise", "Amazement"],
- value=EMOTION_DEFAULT)
- surprise_level_radio.change(update_foo,
- inputs=[surprise_level_radio, surprise_level_state],
- outputs=[surprise_level_state])
-
- sadness_level_radio = gr.Radio(label="Sadness level:",
- choices=[EMOTION_DEFAULT, "Pensiveness", "Sadness", "Grief"],
- value=EMOTION_DEFAULT)
- sadness_level_radio.change(update_foo,
- inputs=[sadness_level_radio, sadness_level_state],
- outputs=[sadness_level_state])
-
- disgust_level_radio = gr.Radio(label="Disgust level:",
- choices=[EMOTION_DEFAULT, "Boredom", "Disgust", "Loathing"],
- value=EMOTION_DEFAULT)
- disgust_level_radio.change(update_foo,
- inputs=[disgust_level_radio, disgust_level_state],
- outputs=[disgust_level_state])
-
- anger_level_radio = gr.Radio(label="Anger level:",
- choices=[EMOTION_DEFAULT, "Annoyance", "Anger", "Rage"],
- value=EMOTION_DEFAULT)
- anger_level_radio.change(update_foo,
- inputs=[anger_level_radio, anger_level_state],
- outputs=[anger_level_state])
-
- with gr.Tab("Max words"):
- num_words_slider = gr.Slider(label="Max number of words to generate (0 for don't care)",
- value=NUM_WORDS_DEFAULT, minimum=0, maximum=MAX_WORDS, step=10)
- num_words_slider.change(update_foo,
- inputs=[num_words_slider, num_words_state],
- outputs=[num_words_state])
-
- with gr.Tab("Embeddings"):
- embeddings_text_box = gr.Textbox(label="Enter text for embeddings and hit Create:",
- lines=20)
-
- with gr.Row():
- use_embeddings_cb = gr.Checkbox(label="Use embeddings", value=False)
- use_embeddings_cb.change(update_use_embeddings, inputs=[use_embeddings_cb, use_embeddings_state],
- outputs=[use_embeddings_state])
-
- embeddings_text_submit = gr.Button(value="Create", variant="secondary").style(full_width=False)
- embeddings_text_submit.click(update_embeddings,
- inputs=[embeddings_text_box, embeddings_state, qa_chain_state],
- outputs=[docsearch_state])
-
- gr.HTML("""
-
This application, developed by James L. Weaver,
- demonstrates a conversational agent implemented with OpenAI GPT-3.5 and LangChain.
- When necessary, it leverages tools for complex math, searching the internet, and accessing news and weather.
- Uses talking heads from Ex-Human.
- For faster inference without waiting in queue, you may duplicate the space.
-