diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crows Zero 2 Br Rip 720p Movies Torrents The Ultimate Collection of Fight Scenes and Night Club Music.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crows Zero 2 Br Rip 720p Movies Torrents The Ultimate Collection of Fight Scenes and Night Club Music.md deleted file mode 100644 index 2375540639209159b9b1646465d98c8899409901..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Crows Zero 2 Br Rip 720p Movies Torrents The Ultimate Collection of Fight Scenes and Night Club Music.md +++ /dev/null @@ -1,94 +0,0 @@ - -
If you are a fan of Japanese action films, you might have heard of Crows Zero, a 2007 film based on the manga Crows by Hiroshi Takahashi. The film follows the violent conflicts between rival gangs of students at Suzuran All-Boys High School, also known as "The School of Crows". The film was a commercial and critical success, and spawned a sequel in 2009, Crows Zero 2.
-DOWNLOAD ○○○ https://byltly.com/2uKAeU
Crows Zero 2 is a 2009 Japanese action film directed by Takashi Miike with a screenplay by Shogo Muto. It is the second film based on the manga Crows by Hiroshi Takahashi, and a direct sequel to 2007's Crows Zero. The film stars much of the cast from the first film, including Shun Oguri, Kyōsuke Yabe, Meisa Kuroki, and Takayuki Yamada reprising their roles. It was released in Japan on April 11, 2009.
-The main characters and actors of Crows Zero 2 are:
-Character | Actor | ||||||||||||||||||||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Takiya Genji | Shun Oguri | ||||||||||||||||||||||||||||||||||||||
Serizawa Tamao | Takayuki Yamada | ||||||||||||||||||||||||||||||||||||||
Aizawa Ruka | Meisa Kuroki | ||||||||||||||||||||||||||||||||||||||
Katagiri Ken | Kyōsuke Yabe | ||||||||||||||||||||||||||||||||||||||
Tatsukawa Tokio | Kenta Kiritani | ||||||||||||||||||||||||||||||||||||||
Tamura Chūta | Suzunosuke Tanaka | ||||||||||||||||||||||||||||||||||||||
Izaki Shun | Sōsuke Takaoka | ||||||||||||||||||||||||||||||||||||||
Takiya Hideo | Goro Kishitani | ||||||||||||||||||||||||||||||||||||||
Rindaman / Hayashida Megumi | Motoki Fukami | ||||||||||||||||||||||||||||||||||||||
Kirishima Hiromi | Shunsuke Daitō | ||||||||||||||||||||||||||||||||||||||
Makise Takashi | Tsutomu Takahashi | ||||||||||||||||||||||||||||||||||||||
Tsutsumoto Shōji | Yusuke Kamiji | ||||||||||||||||||||||||||||||||||||||
Mikami Manabu and Takeshi | Yusuke Izaki and Hisato Izaki | ||||||||||||||||||||||||||||||||||||||
Honjō Toshiaki | Ryō Hashizume | ||||||||||||||||||||||||||||||||||||||
Sugihara Makoto | Yu Koyanagi | ||||||||||||||||||||||||||||||||||||||
Tokaji Yūji | Kaname Endō | ||||||||||||||||||||||||||||||||||||||
Kawanishi Noboru | Shinnosuke Abe | ||||||||||||||||||||||||||||||||||||||
Bitō Makio and Tatsuya | Yoshiyuki Yamaguchi and Haruma Miura | ||||||||||||||||||||||||||||||||||||||
Narumi Taiga | Nobuaki Kaneko |
Award | Category | Recipient(s) | Result | ||||||||||||||||||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Filmfare Awards | Best Film | Dharma Productions | Nominated | ||||||||||||||||||||||||||||||||||||
Filmfare Awards | Best Director | Shashank Khaitan | Nominated | ||||||||||||||||||||||||||||||||||||
Filmfare Awards | Best Actor | Varun Dhawan | Nominated | ||||||||||||||||||||||||||||||||||||
Filmfare Awards | Best Actress | Alia Bhatt | Nominated | ||||||||||||||||||||||||||||||||||||
Filmfare Awards | Best Male Playback Singer | Arijit Singh for "Roke Na Ruke Naina" | Won | ||||||||||||||||||||||||||||||||||||
IIFA Awards | Best Actor (Male) | Varun Dhawan | Nominated | ||||||||||||||||||||||||||||||||||||
IIFA Awards | Best Actor (Female) | Alia Bhatt | Nominated | ||||||||||||||||||||||||||||||||||||
IIFA Awards | Best Music Director | Akhil Sachdeva, Tanishk Bagchi, Amaal Mallik for "Badrinath Ki Dulhania" | Nominated | ||||||||||||||||||||||||||||||||||||
IIFA Awards | Best Playback Singer (Male) | Arijit Singh for "Roke Na Ruke Naina" | Nominated /tr> - | ||||||||||||||||||||||||||||||||||||
IIFA Awards< -/ | B est Playback Singer (Female)< -/ | Neha Kakkar for "Badri Ki Dulhania"< -/ | Nominated< -/tr> - | ||||||||||||||||||||||||||||||||||||
Zee Cine Awards< -/ | B est Film< -/ | Dharma Productions< -/ | Nominated< -/tr> - | ||||||||||||||||||||||||||||||||||||
Zee Cine Awards< -/ | B est Actor – Male< -/ | Varun Dhawan< -/ | Nominated< -/tr> - | ||||||||||||||||||||||||||||||||||||
Zee Cine Awards< -/ | B est Actor – Female< -/ | Alia Bhatt< -/ | Nominated< -/tr> - | ||||||||||||||||||||||||||||||||||||
Zee Cine Awards< -/ | B est Director< -/ | Shashank Khaitan< -/ | Nominated< -/tr> - | ||||||||||||||||||||||||||||||||||||
Zee Cine Awards< -/ | B est Music Director< -/ | Akhil Sachdeva for "Badrinath Ki Dulhania"< -/ | Nominated< -/tr> - | ||||||||||||||||||||||||||||||||||||
Pros | -Cons | -
---|---|
You might save some money or get a free copy of PowerPoint 2016. | -You might get a fake, pirated, or infected copy of PowerPoint 2016 that could harm your computer or compromise your data. | -
You might get access to some features or functions that are not available in the official version of PowerPoint 2016. | -You might miss out on some features or functions that are only available in the official version of PowerPoint 2016. | -
You might have more flexibility and control over the installation and activation process of PowerPoint 2016. | -You might have more difficulty and risk in the installation and activation process of PowerPoint 2016. | -
If you decide to download PowerPoint 2016 from other sources, you should take some precautions to verify the authenticity and security of the downloaded file. Here are some tips on how to do that:
-Now that you have downloaded and installed PowerPoint 2016 on your Windows 10 computer, you might be wondering how to use it effectively and efficiently. PowerPoint 2016 has many features and functions that can help you create and share presentations with ease and confidence. Here are some tips and tricks on how to use PowerPoint 2016 on Windows 10:
-In this article, we have shown you how to download PowerPoint 2016 for Windows 10 for free from different sources. We have also given you some tips and tricks on how to use PowerPoint 2016 on Windows 10 effectively. We hope that this article has been helpful and informative for you.
-powerpoint 2016 full version free download for windows 10
-how to get powerpoint 2016 for free on windows 10
-powerpoint 2016 offline installer free download for windows 10
-powerpoint 2016 crack free download for windows 10
-powerpoint 2016 product key free download for windows 10
-powerpoint 2016 trial free download for windows 10
-powerpoint 2016 setup free download for windows 10
-powerpoint 2016 activation key free download for windows 10
-powerpoint 2016 professional plus free download for windows 10
-powerpoint 2016 portable free download for windows 10
-powerpoint 2016 iso free download for windows 10
-powerpoint 2016 license key free download for windows 10
-powerpoint 2016 update free download for windows 10
-powerpoint 2016 tutorial free download for windows 10
-powerpoint 2016 templates free download for windows 10
-powerpoint 2016 themes free download for windows 10
-powerpoint 2016 patch free download for windows 10
-powerpoint 2016 serial key free download for windows 10
-powerpoint 2016 activator free download for windows 10
-powerpoint 2016 keygen free download for windows 10
-powerpoint 2016 features free download for windows 10
-powerpoint 2016 tips and tricks free download for windows 10
-powerpoint 2016 online free download for windows 10
-powerpoint 2016 alternatives free download for windows 10
-powerpoint 2016 add-ins free download for windows 10
-powerpoint 2016 converter free download for windows 10
-powerpoint 2016 viewer free download for windows 10
-powerpoint 2016 editor free download for windows 10
-powerpoint 2016 animations free download for windows 10
-powerpoint 2016 transitions free download for windows 10
-powerpoint 2016 design ideas free download for windows 10
-powerpoint 2016 shortcuts free download for windows 10
-powerpoint 2016 macros free download for windows 10
-powerpoint 2016 master slide free download for windows 10
-powerpoint 2016 embed video free download for windows 10
-powerpoint 2016 hyperlink free download for windows 10
-powerpoint 2016 background music free download for windows
If you want to learn more about PowerPoint 2016 and how to create and share presentations with it, you can check out these resources and links:
-Here are some common questions that users might have about PowerPoint 2016:
-PowerPoint 2016 is included in Office Home & Business 2019 or Office Home & Student 2019, which cost $249.99 and $149.99 respectively. You can also get PowerPoint 2016 as part of Microsoft 365 subscription plans, which start from $69.99 per year or $6.99 per month.
-Yes, you can use PowerPoint 2016 offline after installing it on your computer. However, some features and functions might require an internet connection, such as Smart Lookup, real-time co-authoring, or online presentations. You can also use PowerPoint Online, which is a free web-based version of PowerPoint that works in your browser, but it has fewer features and functions than PowerPoint 2016.
-Yes, you can use PowerPoint 2016 on other devices or platforms, such as Mac, iOS, Android, or Windows Mobile. However, some features and functions might vary or be unavailable depending on the device or platform. You can also use PowerPoint Online or PowerPoint Mobile, which are web-based and mobile versions of PowerPoint that work on any device or platform with an internet connection.
-Yes, you can use PowerPoint 2016 with other versions of PowerPoint or Office, such as PowerPoint 2013, PowerPoint 2010, or Office 365. However, some features and functions might not be compatible or supported by older versions of PowerPoint or Office. You can also use the Compatibility Mode or the Compatibility Checker to ensure that your presentations can be opened and edited by other versions of PowerPoint or Office.
-Yes, you can get help or support on PowerPoint 2016 from various sources, such as Microsoft's website, online forums, blogs, videos, books, courses, etc. You can also contact Microsoft's customer service or technical support team by phone, email, chat, or social media.
-
-
-
Yay, congratulations on training your model. Scroll down to play with with it, save it (either downloading it or on the Hugging Face Hub). Once you are done, your model is safe, and you don't want to train a new one, go to the settings page and downgrade your Space to a CPU Basic
-Yay, congratulations on training your model. Scroll down to play with with it, save it (either downloading it or on the Hugging Face Hub).
-You closed the tab while your model was training, but it's all good! It is still training right now. You can click the "Open logs" button above here to check the training status. Once training is done, reload this tab to interact with your model
-For it to work, you can either run locally or duplicate the Space and run it on your own profile using a (paid) private T4-small or A10G-small GPU for training. A T4 costs US$0.60/h, so it should cost < US$1 to train most models using default settings with it!
You can now train your model! You will be billed by the minute from when you activated the GPU until when it is turned it off.
-There's only one step left before you can train your model: attribute a T4-small or A10G-small GPU to it (via the Settings tab) and run the training below. You will be billed by the minute from when you activate the GPU until when it is turned it off.
-Do a pip install requirements-local.txt
Si eres un fan de la música de Bollywood, es posible que hayas oído hablar de la canción Hookah Bar de la película Khiladi 786. Esta canción es un número de baile pegadizo y optimista que se ha convertido en un éxito de culto entre los jóvenes. Pero, ¿sabes cómo descargar y disfrutar de esta canción en línea? En este artículo, le diremos todo lo que necesita saber sobre la descarga de Hookah Bar MP3, incluyendo su origen, significado, popularidad, problemas legales, fuentes, plataformas, dispositivos, configuraciones y ocasiones. Así que, vamos a empezar!
-Hookah Bar es una canción hindi que fue lanzada en 2012 como parte de la banda sonora de la película de comedia de acción Khiladi 786, protagonizada por Akshay Kumar y Asin. La canción cuenta con Kumar y Asin bailando en un bar hookah, que es un lugar donde la gente fuma tabaco con sabor de una pipa de agua llamada hookah. La canción tiene un estribillo pegadizo que dice así:
-DOWNLOAD ↔ https://bltlly.com/2v6JmS
--Tera pyar pyar pyar hookah bar
-
-Tera pyar pyar pyar hookah bar
-Tera pyar pyar pyar hookah bar
-Tera pyar pyar pyar hookah bar
La letra se traduce aproximadamente a:
---Tu amor amor amor es como una barra de hookah
-
-Tu amor amor amor es como una barra de hookah -Tu amor amor amor es como una barra de hookah -Tu amor amor amor es como una barra de hookah
La canción fue compuesta por Himesh Reshammiya, quien también es uno de los cantantes de la canción junto con Vineet Singh y Aman Trikha. Reshammiya también escribió la letra de la canción, que se inspiran en su propia experiencia de visitar un bar hookah en Dubai. Dijo que quería crear una canción que atrajera a los jóvenes y los hiciera bailar. También dijo que usó la barra de hookah como metáfora del amor, ya que ambos son adictivos e intoxicantes.
-Vineet Singh es un cantante de playback indio que saltó a la fama después de ganar un reality show de canto llamado Jo Jeeta Wohi Superstar en 2008. Ha cantado canciones para películas como Murder 3, Jai Ho, Boss, y Kis Kisko Pyaar Karoon. También es conocido por su colaboración con Reshammiya en canciones como Hai Apna Dil Toh Awara, Lonely, y Balma.
Aman Trikha es otro cantante indio que ha cantado canciones para películas como OMG - ¡Oh Dios mío! , Prem Ratan Dhan Payo, Veer-Zaara, y Shivaay. También ha trabajado con Reshammiya en canciones como Go Go Govinda, Po Po, y Hookah Bar. Es conocido por su voz versátil y potente que puede cantar en diferentes géneros e idiomas.
-Hookah Bar fue un gran éxito entre la audiencia y los críticos por igual. Encabezó las listas de varias plataformas de música y estaciones de radio en la India y en el extranjero. También ganó varios premios y nominaciones, como el Mirchi Music Award for Song of the Year, el Stardust Award for Best Playback Singer (masculino), y el Zee Cine Award for Best Music Director. La canción fue elogiada por su melodía pegadiza, voces enérgicas y coreografía animada. La canción también se convirtió en una opción popular para fiestas, bodas y festivales, donde la gente bailaría a sus ritmos.
-Si te gusta Hookah Bar y quieres escucharlo en cualquier momento y en cualquier lugar, es posible que desee descargarlo como un archivo MP3 en línea. MP3 es un formato de audio digital que comprime los datos de sonido sin perder mucha calidad. Los archivos MP3 son fáciles de almacenar, transferir y reproducir en varios dispositivos y plataformas. Pero, ¿cómo puede descargar Hookah Bar MP3 en línea? Aquí hay algunas cosas que debes considerar antes de hacerlo.
-Descargar archivos MP3 no siempre es legal o ético. Algunos de los temas que debe tener en cuenta son:
- -Por lo tanto, siempre debe descargar archivos MP3 de fuentes legales y éticas que respeten los derechos e intereses tanto de los consumidores como de los creadores.
-Hay muchas fuentes y plataformas que ofrecen descarga de Hookah Bar MP3 en línea. Algunas de ellas son:
-Estas son algunas de las mejores fuentes y plataformas para descargar Hookah Bar MP3 en línea. Sin embargo, siempre debe verificar la calidad, legalidad y seguridad de los archivos antes de descargarlos. También debe respetar los derechos e intereses de los creadores y propietarios de la música.
-Puede jugar Hookah Bar MP3 en línea en varios dispositivos, tales como teléfonos inteligentes, tabletas, ordenadores portátiles, escritorios, altavoces, auriculares, auriculares, etc. También puede utilizar varias aplicaciones, como iTunes, Spotify, YouTube Music, Gaana, Saavn, etc. Sin embargo, debe elegir el dispositivo y la aplicación que se adapte a sus preferencias y necesidades. Algunos de los factores que debes considerar son:
-Puede mejorar la calidad de sonido de Hookah Bar MP3 en línea ajustando la configuración y las características de su dispositivo y aplicación. Algunos de los ajustes y características que puede utilizar son:
-Hookah Bar es una canción popular que puedes descargar y disfrutar en línea. Es un número de baile pegadizo y optimista que tiene una barra de hookah como metáfora del amor. Fue compuesta por Himesh Reshammiya, quien también la cantó con Vineet Singh y Aman Trikha. Fue lanzado en 2012 como parte de la banda sonora de la película Khiladi 786. Fue un gran éxito entre la audiencia y los críticos por igual. Ganó varios premios y nominaciones por su música y voces.
- -Puede disfrutar de Hookah Bar MP3 en línea en varios dispositivos y aplicaciones, como teléfonos inteligentes, tabletas, computadoras portátiles, escritorios, altavoces, auriculares, auriculares, etc. También puede usar varios ajustes y características para mejorar la calidad de sonido de la canción, como el ecualizador, el aumento del bajo, el sonido envolvente, las letras y la lista de reproducción. También puedes escuchar Hookah Bar MP3 en línea en diferentes ocasiones y estados de ánimo, como la fiesta, el entrenamiento, la relajación, el romance y el viaje.
-Esperamos que este artículo te haya ayudado a aprender más sobre la descarga de Hookah Bar MP3 y cómo disfrutarla en línea. Si tiene alguna pregunta o comentario, no dude en contactarnos. ¡Gracias por leer!
-Aquí hay algunas preguntas frecuentes sobre la descarga de Hookah Bar MP3:
-La duración de Hookah Bar MP3 es de 4 minutos y 16 segundos.
-El tamaño de Hookah Bar MP3 varía dependiendo de la fuente y la plataforma desde la que lo descargues. Sin embargo, suele ser de unos 4 MB.
-El género de Hookah Bar MP3 es música de baile de Bollywood.
-El lenguaje de Hookah Bar MP3 es hindi.
-La puntuación de Hookah Bar MP3 es 4.5 de 5 estrellas en la mayoría de las plataformas.
-Are you a fan of street racing games? Do you like to feel the adrenaline rush of speeding through the city streets in restored old cars? Do you want to have a garage full of legendary and exclusive cars? Then you’ll love CarX Street APK Infinite Cash 0.8 4, a game that will take you into the world of street racing with amazing graphics, easy controls and lots of fun.
-Download Zip ► https://bltlly.com/2v6MBD
In this article, we will show you what is CarX Street APK Infinite Money 0.8 4, how to download and install the game on your Android device, how to play and make the most of the infinite money you can use to buy and upgrade your cars, and what are the advantages and disadvantages of playing this game. Come on?
-You can also modify the appearance and performance of your cars. You can change the wheels, tires, headlights, flashlights, mirrors, bumpers, skirts, spoilers, hoods, doors, windows, colors, stickers and more. You can also change the engine, turbo, exhaust, air filter, brake system, suspension, differential and more. You can see the changes you make to your car in real time on the screen.
-The races are fast and intense. You have to use your skill to start well, make perfect turns, avoid obstacles and opponents, use nitro at the right time and get first. You can choose from different race modes such as sprint, drift, drag and time Attack. You can also choose between different difficulty levels, from beginner to professional.
-According to the game’s official website, the minimum requirements for installing CarX Street on your Android device are:
-Requirement | -Value | -
---|---|
Android version | -6.0 or higher | -
Free space | -1 GB or more | -
RAM | -|
Processor | -Quad-core or higher | -
Internet connection | -Required to play online | -
If your device does not meet these requirements, you may have trouble installing or running the game. In this case, you can try downloading the original version of the game from the Google Play Store, which may be more compatible with your device.
-If your device meets the minimum requirements, you can follow the steps below to download and install CarX Street APK Infinite Cash 0.8 4:
-After choosing your car, you can customize it your way. You can change the color, stickers, parts and accessories of your car, to make it more beautiful and faster. You can see the changes you make to your car in real time on the screen. You can also test your car before using it in races, to see how it behaves on the track.
-The second step to playing CarX Street APK Infinity Cash 0.8 4 is to participate in events and races. You can access the game map from the main menu and see the events and races that are available to you. You can choose from different race modes such as sprint, drift, drag and time Attack. You can also choose between different difficulty levels, from beginner to professional.
-Events are challenges that give you rewards in money and reputation. They can be daily, weekly or monthly, and can have different themes and goals. For example, you may have to make a certain number of skids, overtake a certain number of opponents, or get first in a specific race.
-Races are direct confrontations against other players from all over the world. You can enter leagues and tournaments that put you in races against players of the same level as you. You can see the ranking of the best players in the world and compare your performance with theirs. Races are fast and intense, and require you to use your skill to win.
-You can access the game store from the main menu and see the cars and parts that are available to you. You can see the features and prices of each item before buying. You can also see the in-game recommendations for the best cars and the best parts for each race mode.
-Using infinite money is an advantage that allows you to have a garage full of legendary and exclusive cars, and have the best cars for each race. But remember: infinite money is not everything. You also need to have skill and strategy to win races.
-Playing CarX Street APK Infinite Cash 0.8 4 has its advantages and disadvantages. See what they are:
-So, did you like the article? Do you have any questions or suggestions? Leave your comment below. And if you liked the article, share it with your friends on social media. Thanks for reading!
-Here are some frequently asked questions about CarX Street APK Infinite Cash 0.8 4:
-An APK file is a file format used to install applications on the Android operating system. It contains all the files needed to run an application on your device.
-Yes, you can play CarX Street APK Infinity Cash 0.8 4 with your friends using the game’s online multiplayer mode. You can invite your friends to join you in the races, or compete against them in the world ranking. You can also chat with them in-game, or send private messages.
-# pre-release - [-_\.]? - (?P(a|b|c|rc|alpha|beta|pre|preview)) - [-_\.]? - (?P [0-9]+)? - )? - (?P # post release - (?:-(?P [0-9]+)) - | - (?: - [-_\.]? - (?P post|rev|r) - [-_\.]? - (?P [0-9]+)? - ) - )? - (?P # dev release - [-_\.]? - (?P dev) - [-_\.]? - (?P [0-9]+)? - )? - ) - (?:\+(?P [a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version -""" - - -class Version(_BaseVersion): - - _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE) - - def __init__(self, version: str) -> None: - - # Validate the version and parse it into pieces - match = self._regex.search(version) - if not match: - raise InvalidVersion(f"Invalid version: '{version}'") - - # Store the parsed out pieces of the version - self._version = _Version( - epoch=int(match.group("epoch")) if match.group("epoch") else 0, - release=tuple(int(i) for i in match.group("release").split(".")), - pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")), - post=_parse_letter_version( - match.group("post_l"), match.group("post_n1") or match.group("post_n2") - ), - dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")), - local=_parse_local_version(match.group("local")), - ) - - # Generate a key which will be used for sorting - self._key = _cmpkey( - self._version.epoch, - self._version.release, - self._version.pre, - self._version.post, - self._version.dev, - self._version.local, - ) - - def __repr__(self) -> str: - return f" " - - def __str__(self) -> str: - parts = [] - - # Epoch - if self.epoch != 0: - parts.append(f"{self.epoch}!") - - # Release segment - parts.append(".".join(str(x) for x in self.release)) - - # Pre-release - if self.pre is not None: - parts.append("".join(str(x) for x in self.pre)) - - # Post-release - if self.post is not None: - parts.append(f".post{self.post}") - - # Development release - if self.dev is not None: - parts.append(f".dev{self.dev}") - - # Local version segment - if self.local is not None: - parts.append(f"+{self.local}") - - return "".join(parts) - - @property - def epoch(self) -> int: - _epoch: int = self._version.epoch - return _epoch - - @property - def release(self) -> Tuple[int, ...]: - _release: Tuple[int, ...] = self._version.release - return _release - - @property - def pre(self) -> Optional[Tuple[str, int]]: - _pre: Optional[Tuple[str, int]] = self._version.pre - return _pre - - @property - def post(self) -> Optional[int]: - return self._version.post[1] if self._version.post else None - - @property - def dev(self) -> Optional[int]: - return self._version.dev[1] if self._version.dev else None - - @property - def local(self) -> Optional[str]: - if self._version.local: - return ".".join(str(x) for x in self._version.local) - else: - return None - - @property - def public(self) -> str: - return str(self).split("+", 1)[0] - - @property - def base_version(self) -> str: - parts = [] - - # Epoch - if self.epoch != 0: - parts.append(f"{self.epoch}!") - - # Release segment - parts.append(".".join(str(x) for x in self.release)) - - return "".join(parts) - - @property - def is_prerelease(self) -> bool: - return self.dev is not None or self.pre is not None - - @property - def is_postrelease(self) -> bool: - return self.post is not None - - @property - def is_devrelease(self) -> bool: - return self.dev is not None - - @property - def major(self) -> int: - return self.release[0] if len(self.release) >= 1 else 0 - - @property - def minor(self) -> int: - return self.release[1] if len(self.release) >= 2 else 0 - - @property - def micro(self) -> int: - return self.release[2] if len(self.release) >= 3 else 0 - - -def _parse_letter_version( - letter: str, number: Union[str, bytes, SupportsInt] -) -> Optional[Tuple[str, int]]: - - if letter: - # We consider there to be an implicit 0 in a pre-release if there is - # not a numeral associated with it. - if number is None: - number = 0 - - # We normalize any letters to their lower case form - letter = letter.lower() - - # We consider some words to be alternate spellings of other words and - # in those cases we want to normalize the spellings to our preferred - # spelling. - if letter == "alpha": - letter = "a" - elif letter == "beta": - letter = "b" - elif letter in ["c", "pre", "preview"]: - letter = "rc" - elif letter in ["rev", "r"]: - letter = "post" - - return letter, int(number) - if not letter and number: - # We assume if we are given a number, but we are not given a letter - # then this is using the implicit post release syntax (e.g. 1.0-1) - letter = "post" - - return letter, int(number) - - return None - - -_local_version_separators = re.compile(r"[\._-]") - - -def _parse_local_version(local: str) -> Optional[LocalType]: - """ - Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). - """ - if local is not None: - return tuple( - part.lower() if not part.isdigit() else int(part) - for part in _local_version_separators.split(local) - ) - return None - - -def _cmpkey( - epoch: int, - release: Tuple[int, ...], - pre: Optional[Tuple[str, int]], - post: Optional[Tuple[str, int]], - dev: Optional[Tuple[str, int]], - local: Optional[Tuple[SubLocalType]], -) -> CmpKey: - - # When we compare a release version, we want to compare it with all of the - # trailing zeros removed. So we'll use a reverse the list, drop all the now - # leading zeros until we come to something non zero, then take the rest - # re-reverse it back into the correct order and make it a tuple and use - # that for our sorting key. - _release = tuple( - reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release)))) - ) - - # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0. - # We'll do this by abusing the pre segment, but we _only_ want to do this - # if there is not a pre or a post segment. If we have one of those then - # the normal sorting rules will handle this case correctly. - if pre is None and post is None and dev is not None: - _pre: PrePostDevType = NegativeInfinity - # Versions without a pre-release (except as noted above) should sort after - # those with one. - elif pre is None: - _pre = Infinity - else: - _pre = pre - - # Versions without a post segment should sort before those with one. - if post is None: - _post: PrePostDevType = NegativeInfinity - - else: - _post = post - - # Versions without a development segment should sort after those with one. - if dev is None: - _dev: PrePostDevType = Infinity - - else: - _dev = dev - - if local is None: - # Versions without a local segment should sort before those with one. - _local: LocalType = NegativeInfinity - else: - # Versions with a local segment need that segment parsed to implement - # the sorting rules in PEP440. - # - Alpha numeric segments sort before numeric segments - # - Alpha numeric segments sort lexicographically - # - Numeric segments sort numerically - # - Shorter versions sort before longer versions when the prefixes - # match exactly - _local = tuple( - (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local - ) - - return epoch, _release, _pre, _post, _dev, _local diff --git a/spaces/CALM/Dashboard/streamlit_observable/frontend/src/streamlit/streamlit.ts b/spaces/CALM/Dashboard/streamlit_observable/frontend/src/streamlit/streamlit.ts deleted file mode 100644 index 7e77b4d80fedbe6ff8f23d45e7651e20f7164f4c..0000000000000000000000000000000000000000 --- a/spaces/CALM/Dashboard/streamlit_observable/frontend/src/streamlit/streamlit.ts +++ /dev/null @@ -1,198 +0,0 @@ -/** - * @license - * Copyright 2018-2020 Streamlit Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Safari doesn't support the EventTarget class, so we use a shim. -import { EventTarget } from "event-target-shim" -import { ArrowDataframeProto, ArrowTable } from "./ArrowTable" - -/** Data sent in the custom Streamlit render event. */ -export interface RenderData { - args: any - disabled: boolean -} - -/** Messages from Component -> Streamlit */ -enum ComponentMessageType { - // A component sends this message when it's ready to receive messages - // from Streamlit. Streamlit won't send any messages until it gets this. - // Data: { apiVersion: number } - COMPONENT_READY = "streamlit:componentReady", - - // The component has a new widget value. Send it back to Streamlit, which - // will then re-run the app. - // Data: { value: any } - SET_COMPONENT_VALUE = "streamlit:setComponentValue", - - // The component has a new height for its iframe. - // Data: { height: number } - SET_FRAME_HEIGHT = "streamlit:setFrameHeight", -} - -/** - * Streamlit communication API. - * - * Components can send data to Streamlit via the functions defined here, - * and receive data from Streamlit via the `events` property. - */ -export class Streamlit { - /** - * The Streamlit component API version we're targetting. - * There's currently only 1! - */ - public static readonly API_VERSION = 1 - - public static readonly RENDER_EVENT = "streamlit:render" - - /** Dispatches events received from Streamlit. */ - public static readonly events = new EventTarget() - - private static registeredMessageListener = false - private static lastFrameHeight?: number - - /** - * Tell Streamlit that the component is ready to start receiving data. - * Streamlit will defer emitting RENDER events until it receives the - * COMPONENT_READY message. - */ - public static setComponentReady = (): void => { - if (!Streamlit.registeredMessageListener) { - // Register for message events if we haven't already - window.addEventListener("message", Streamlit.onMessageEvent) - Streamlit.registeredMessageListener = true - } - - Streamlit.sendBackMsg(ComponentMessageType.COMPONENT_READY, { - apiVersion: Streamlit.API_VERSION, - }) - } - - /** - * Report the component's height to Streamlit. - * This should be called every time the component changes its DOM - that is, - * when it's first loaded, and any time it updates. - */ - public static setFrameHeight = (height?: number): void => { - if (height === undefined) { - // `height` is optional. If undefined, it defaults to scrollHeight, - // which is the entire height of the element minus its border, - // scrollbar, and margin. - height = document.body.scrollHeight + 10; - } - - if (height === Streamlit.lastFrameHeight) { - // Don't bother updating if our height hasn't changed. - return - } - - Streamlit.lastFrameHeight = height - Streamlit.sendBackMsg(ComponentMessageType.SET_FRAME_HEIGHT, { height }) - } - - /** - * Set the component's value. This value will be returned to the Python - * script, and the script will be re-run. - * - * For example: - * - * JavaScript: - * Streamlit.setComponentValue("ahoy!") - * - * Python: - * value = st.my_component(...) - * st.write(value) # -> "ahoy!" - * - * The value must be serializable into JSON. - */ - public static setComponentValue = (value: any): void => { - Streamlit.sendBackMsg(ComponentMessageType.SET_COMPONENT_VALUE, { value }) - } - - /** Receive a ForwardMsg from the Streamlit app */ - private static onMessageEvent = (event: MessageEvent): void => { - const type = event.data["type"] - switch (type) { - case Streamlit.RENDER_EVENT: - Streamlit.onRenderMessage(event.data) - break - } - } - - /** - * Handle an untyped Streamlit render event and redispatch it as a - * StreamlitRenderEvent. - */ - private static onRenderMessage = (data: any): void => { - let args = data["args"] - if (args == null) { - console.error( - `Got null args in onRenderMessage. This should never happen` - ) - args = {} - } - - // Parse our dataframe arguments with arrow, and merge them into our args dict - const dataframeArgs = - data["dfs"] && data["dfs"].length > 0 - ? Streamlit.argsDataframeToObject(data["dfs"]) - : {} - - args = { - ...args, - ...dataframeArgs, - } - - const disabled = Boolean(data["disabled"]) - - // Dispatch a render event! - const eventData = { disabled, args } - const event = new CustomEvent (Streamlit.RENDER_EVENT, { - detail: eventData, - }) - Streamlit.events.dispatchEvent(event) - } - - private static argsDataframeToObject = ( - argsDataframe: ArgsDataframe[] - ): object => { - const argsDataframeArrow = argsDataframe.map( - ({ key, value }: ArgsDataframe) => [key, Streamlit.toArrowTable(value)] - ) - return Object.fromEntries(argsDataframeArrow) - } - - private static toArrowTable = (df: ArrowDataframeProto): ArrowTable => { - const { data, index, columns } = df.data - return new ArrowTable(data, index, columns) - } - - /** Post a message to the Streamlit app. */ - private static sendBackMsg = (type: string, data?: any): void => { - window.parent.postMessage( - { - isStreamlitMessage: true, - type: type, - ...data, - }, - "*" - ) - } -} - -interface ArgsDataframe { - key: string - value: ArrowDataframeProto -} diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/engine/__init__.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/engine/__init__.py deleted file mode 100644 index 6a4538da3e66593e4ef8916cd9cbca3c83b8c14e..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/engine/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. - -from .launch import * -from .train_loop import * - -__all__ = [k for k in globals().keys() if not k.startswith("_")] - - -# prefer to let hooks and defaults live in separate namespaces (therefore not in __all__) -# but still make them available here -from .hooks import * -from .defaults import * diff --git a/spaces/CVPR/GFPGAN-example/gfpgan/archs/__init__.py b/spaces/CVPR/GFPGAN-example/gfpgan/archs/__init__.py deleted file mode 100644 index bec5f17bfa38729b55f57cae8e40c27310db2b7b..0000000000000000000000000000000000000000 --- a/spaces/CVPR/GFPGAN-example/gfpgan/archs/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -import importlib -from basicsr.utils import scandir -from os import path as osp - -# automatically scan and import arch modules for registry -# scan all the files that end with '_arch.py' under the archs folder -arch_folder = osp.dirname(osp.abspath(__file__)) -arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')] -# import all the arch modules -_arch_modules = [importlib.import_module(f'gfpgan.archs.{file_name}') for file_name in arch_filenames] diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/reverse.h b/spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/reverse.h deleted file mode 100644 index 1f3e0325e257c301215e62c690837433ae24c30c..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/reverse.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// this system inherits reverse -#include - diff --git a/spaces/CVPR/WALT/mmdet/core/anchor/builder.py b/spaces/CVPR/WALT/mmdet/core/anchor/builder.py deleted file mode 100644 index d79b448ebca9f2b21d455046623172c48c5c3ef0..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/core/anchor/builder.py +++ /dev/null @@ -1,7 +0,0 @@ -from mmcv.utils import Registry, build_from_cfg - -ANCHOR_GENERATORS = Registry('Anchor generator') - - -def build_anchor_generator(cfg, default_args=None): - return build_from_cfg(cfg, ANCHOR_GENERATORS, default_args) diff --git a/spaces/CVPR/WALT/mmdet/models/losses/pisa_loss.py b/spaces/CVPR/WALT/mmdet/models/losses/pisa_loss.py deleted file mode 100644 index 4a48adfcd400bb07b719a6fbd5a8af0508820629..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/models/losses/pisa_loss.py +++ /dev/null @@ -1,183 +0,0 @@ -import mmcv -import torch - -from mmdet.core import bbox_overlaps - - -@mmcv.jit(derivate=True, coderize=True) -def isr_p(cls_score, - bbox_pred, - bbox_targets, - rois, - sampling_results, - loss_cls, - bbox_coder, - k=2, - bias=0, - num_class=80): - """Importance-based Sample Reweighting (ISR_P), positive part. - - Args: - cls_score (Tensor): Predicted classification scores. - bbox_pred (Tensor): Predicted bbox deltas. - bbox_targets (tuple[Tensor]): A tuple of bbox targets, the are - labels, label_weights, bbox_targets, bbox_weights, respectively. - rois (Tensor): Anchors (single_stage) in shape (n, 4) or RoIs - (two_stage) in shape (n, 5). - sampling_results (obj): Sampling results. - loss_cls (func): Classification loss func of the head. - bbox_coder (obj): BBox coder of the head. - k (float): Power of the non-linear mapping. - bias (float): Shift of the non-linear mapping. - num_class (int): Number of classes, default: 80. - - Return: - tuple([Tensor]): labels, imp_based_label_weights, bbox_targets, - bbox_target_weights - """ - - labels, label_weights, bbox_targets, bbox_weights = bbox_targets - pos_label_inds = ((labels >= 0) & - (labels < num_class)).nonzero().reshape(-1) - pos_labels = labels[pos_label_inds] - - # if no positive samples, return the original targets - num_pos = float(pos_label_inds.size(0)) - if num_pos == 0: - return labels, label_weights, bbox_targets, bbox_weights - - # merge pos_assigned_gt_inds of per image to a single tensor - gts = list() - last_max_gt = 0 - for i in range(len(sampling_results)): - gt_i = sampling_results[i].pos_assigned_gt_inds - gts.append(gt_i + last_max_gt) - if len(gt_i) != 0: - last_max_gt = gt_i.max() + 1 - gts = torch.cat(gts) - assert len(gts) == num_pos - - cls_score = cls_score.detach() - bbox_pred = bbox_pred.detach() - - # For single stage detectors, rois here indicate anchors, in shape (N, 4) - # For two stage detectors, rois are in shape (N, 5) - if rois.size(-1) == 5: - pos_rois = rois[pos_label_inds][:, 1:] - else: - pos_rois = rois[pos_label_inds] - - if bbox_pred.size(-1) > 4: - bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4) - pos_delta_pred = bbox_pred[pos_label_inds, pos_labels].view(-1, 4) - else: - pos_delta_pred = bbox_pred[pos_label_inds].view(-1, 4) - - # compute iou of the predicted bbox and the corresponding GT - pos_delta_target = bbox_targets[pos_label_inds].view(-1, 4) - pos_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_pred) - target_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_target) - ious = bbox_overlaps(pos_bbox_pred, target_bbox_pred, is_aligned=True) - - pos_imp_weights = label_weights[pos_label_inds] - # Two steps to compute IoU-HLR. Samples are first sorted by IoU locally, - # then sorted again within the same-rank group - max_l_num = pos_labels.bincount().max() - for label in pos_labels.unique(): - l_inds = (pos_labels == label).nonzero().view(-1) - l_gts = gts[l_inds] - for t in l_gts.unique(): - t_inds = l_inds[l_gts == t] - t_ious = ious[t_inds] - _, t_iou_rank_idx = t_ious.sort(descending=True) - _, t_iou_rank = t_iou_rank_idx.sort() - ious[t_inds] += max_l_num - t_iou_rank.float() - l_ious = ious[l_inds] - _, l_iou_rank_idx = l_ious.sort(descending=True) - _, l_iou_rank = l_iou_rank_idx.sort() # IoU-HLR - # linearly map HLR to label weights - pos_imp_weights[l_inds] *= (max_l_num - l_iou_rank.float()) / max_l_num - - pos_imp_weights = (bias + pos_imp_weights * (1 - bias)).pow(k) - - # normalize to make the new weighted loss value equal to the original loss - pos_loss_cls = loss_cls( - cls_score[pos_label_inds], pos_labels, reduction_override='none') - if pos_loss_cls.dim() > 1: - ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds][:, - None] - new_pos_loss_cls = pos_loss_cls * pos_imp_weights[:, None] - else: - ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds] - new_pos_loss_cls = pos_loss_cls * pos_imp_weights - pos_loss_cls_ratio = ori_pos_loss_cls.sum() / new_pos_loss_cls.sum() - pos_imp_weights = pos_imp_weights * pos_loss_cls_ratio - label_weights[pos_label_inds] = pos_imp_weights - - bbox_targets = labels, label_weights, bbox_targets, bbox_weights - return bbox_targets - - -@mmcv.jit(derivate=True, coderize=True) -def carl_loss(cls_score, - labels, - bbox_pred, - bbox_targets, - loss_bbox, - k=1, - bias=0.2, - avg_factor=None, - sigmoid=False, - num_class=80): - """Classification-Aware Regression Loss (CARL). - - Args: - cls_score (Tensor): Predicted classification scores. - labels (Tensor): Targets of classification. - bbox_pred (Tensor): Predicted bbox deltas. - bbox_targets (Tensor): Target of bbox regression. - loss_bbox (func): Regression loss func of the head. - bbox_coder (obj): BBox coder of the head. - k (float): Power of the non-linear mapping. - bias (float): Shift of the non-linear mapping. - avg_factor (int): Average factor used in regression loss. - sigmoid (bool): Activation of the classification score. - num_class (int): Number of classes, default: 80. - - Return: - dict: CARL loss dict. - """ - pos_label_inds = ((labels >= 0) & - (labels < num_class)).nonzero().reshape(-1) - if pos_label_inds.numel() == 0: - return dict(loss_carl=cls_score.sum()[None] * 0.) - pos_labels = labels[pos_label_inds] - - # multiply pos_cls_score with the corresponding bbox weight - # and remain gradient - if sigmoid: - pos_cls_score = cls_score.sigmoid()[pos_label_inds, pos_labels] - else: - pos_cls_score = cls_score.softmax(-1)[pos_label_inds, pos_labels] - carl_loss_weights = (bias + (1 - bias) * pos_cls_score).pow(k) - - # normalize carl_loss_weight to make its sum equal to num positive - num_pos = float(pos_cls_score.size(0)) - weight_ratio = num_pos / carl_loss_weights.sum() - carl_loss_weights *= weight_ratio - - if avg_factor is None: - avg_factor = bbox_targets.size(0) - # if is class agnostic, bbox pred is in shape (N, 4) - # otherwise, bbox pred is in shape (N, #classes, 4) - if bbox_pred.size(-1) > 4: - bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4) - pos_bbox_preds = bbox_pred[pos_label_inds, pos_labels] - else: - pos_bbox_preds = bbox_pred[pos_label_inds] - ori_loss_reg = loss_bbox( - pos_bbox_preds, - bbox_targets[pos_label_inds], - reduction_override='none') / avg_factor - loss_carl = (ori_loss_reg * carl_loss_weights[:, None]).sum() - return dict(loss_carl=loss_carl[None]) diff --git a/spaces/CVPR/regionclip-demo/detectron2/data/datasets/coco.py b/spaces/CVPR/regionclip-demo/detectron2/data/datasets/coco.py deleted file mode 100644 index ed4f7ccb20efa3b54c719783e279c381ca5d8587..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/data/datasets/coco.py +++ /dev/null @@ -1,539 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import contextlib -import datetime -import io -import json -import logging -import numpy as np -import os -import shutil -import pycocotools.mask as mask_util -from fvcore.common.timer import Timer -from iopath.common.file_io import file_lock -from PIL import Image - -from detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes -from detectron2.utils.file_io import PathManager - -from .. import DatasetCatalog, MetadataCatalog - -""" -This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format". -""" - - -logger = logging.getLogger(__name__) - -__all__ = ["load_coco_json", "load_sem_seg", "convert_to_coco_json", "register_coco_instances"] - - -def load_coco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None): - """ - Load a json file with COCO's instances annotation format. - Currently supports instance detection, instance segmentation, - and person keypoints annotations. - - Args: - json_file (str): full path to the json file in COCO instances annotation format. - image_root (str or path-like): the directory where the images in this json file exists. - dataset_name (str or None): the name of the dataset (e.g., coco_2017_train). - When provided, this function will also do the following: - - * Put "thing_classes" into the metadata associated with this dataset. - * Map the category ids into a contiguous range (needed by standard dataset format), - and add "thing_dataset_id_to_contiguous_id" to the metadata associated - with this dataset. - - This option should usually be provided, unless users need to load - the original json content and apply more processing manually. - extra_annotation_keys (list[str]): list of per-annotation keys that should also be - loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints", - "category_id", "segmentation"). The values for these keys will be returned as-is. - For example, the densepose annotations are loaded in this way. - - Returns: - list[dict]: a list of dicts in Detectron2 standard dataset dicts format (See - `Using Custom Datasets `_ ) when `dataset_name` is not None. - If `dataset_name` is None, the returned `category_ids` may be - incontiguous and may not conform to the Detectron2 standard format. - - Notes: - 1. This function does not read the image files. - The results do not have the "image" field. - """ - from pycocotools.coco import COCO - - timer = Timer() - json_file = PathManager.get_local_path(json_file) - with contextlib.redirect_stdout(io.StringIO()): - coco_api = COCO(json_file) - if timer.seconds() > 1: - logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) - - id_map = None - if dataset_name is not None: - meta = MetadataCatalog.get(dataset_name) - cat_ids = sorted(coco_api.getCatIds()) - cats = coco_api.loadCats(cat_ids) - # The categories in a custom json file may not be sorted. - thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])] - meta.thing_classes = thing_classes - - # In COCO, certain category ids are artificially removed, - # and by convention they are always ignored. - # We deal with COCO's id issue and translate - # the category ids to contiguous ids in [0, 80). - - # It works by looking at the "categories" field in the json, therefore - # if users' own json also have incontiguous ids, we'll - # apply this mapping as well but print a warning. - if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)): - if "coco" not in dataset_name: - logger.warning( - """ -Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you. -""" - ) - id_map = {v: i for i, v in enumerate(cat_ids)} - meta.thing_dataset_id_to_contiguous_id = id_map - - # sort indices for reproducible results - img_ids = sorted(coco_api.imgs.keys()) - # imgs is a list of dicts, each looks something like: - # {'license': 4, - # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg', - # 'file_name': 'COCO_val2014_000000001268.jpg', - # 'height': 427, - # 'width': 640, - # 'date_captured': '2013-11-17 05:57:24', - # 'id': 1268} - imgs = coco_api.loadImgs(img_ids) - # anns is a list[list[dict]], where each dict is an annotation - # record for an object. The inner list enumerates the objects in an image - # and the outer list enumerates over images. Example of anns[0]: - # [{'segmentation': [[192.81, - # 247.09, - # ... - # 219.03, - # 249.06]], - # 'area': 1035.749, - # 'iscrowd': 0, - # 'image_id': 1268, - # 'bbox': [192.81, 224.8, 74.73, 33.43], - # 'category_id': 16, - # 'id': 42986}, - # ...] - anns = [coco_api.imgToAnns[img_id] for img_id in img_ids] - total_num_valid_anns = sum([len(x) for x in anns]) - total_num_anns = len(coco_api.anns) - if total_num_valid_anns < total_num_anns: - logger.warning( - f"{json_file} contains {total_num_anns} annotations, but only " - f"{total_num_valid_anns} of them match to images in the file." - ) - - if "minival" not in json_file: - # The popular valminusminival & minival annotations for COCO2014 contain this bug. - # However the ratio of buggy annotations there is tiny and does not affect accuracy. - # Therefore we explicitly white-list them. - ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] - assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format( - json_file - ) - - imgs_anns = list(zip(imgs, anns)) - logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file)) - - dataset_dicts = [] - - ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or []) - - num_instances_without_valid_segmentation = 0 - - for (img_dict, anno_dict_list) in imgs_anns: - record = {} - record["file_name"] = os.path.join(image_root, img_dict["file_name"]) - record["height"] = img_dict["height"] - record["width"] = img_dict["width"] - image_id = record["image_id"] = img_dict["id"] - - objs = [] - for anno in anno_dict_list: - # Check that the image_id in this annotation is the same as - # the image_id we're looking at. - # This fails only when the data parsing logic or the annotation file is buggy. - - # The original COCO valminusminival2014 & minival2014 annotation files - # actually contains bugs that, together with certain ways of using COCO API, - # can trigger this assertion. - assert anno["image_id"] == image_id - - assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.' - - obj = {key: anno[key] for key in ann_keys if key in anno} - if "bbox" in obj and len(obj["bbox"]) == 0: - raise ValueError( - f"One annotation of image {image_id} contains empty 'bbox' value! " - "This json does not have valid COCO format." - ) - - segm = anno.get("segmentation", None) - if segm: # either list[list[float]] or dict(RLE) - if isinstance(segm, dict): - if isinstance(segm["counts"], list): - # convert to compressed RLE - segm = mask_util.frPyObjects(segm, *segm["size"]) - else: - # filter out invalid polygons (< 3 points) - segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] - if len(segm) == 0: - num_instances_without_valid_segmentation += 1 - continue # ignore this instance - obj["segmentation"] = segm - - keypts = anno.get("keypoints", None) - if keypts: # list[int] - for idx, v in enumerate(keypts): - if idx % 3 != 2: - # COCO's segmentation coordinates are floating points in [0, H or W], - # but keypoint coordinates are integers in [0, H-1 or W-1] - # Therefore we assume the coordinates are "pixel indices" and - # add 0.5 to convert to floating point coordinates. - keypts[idx] = v + 0.5 - obj["keypoints"] = keypts - - obj["bbox_mode"] = BoxMode.XYWH_ABS - if id_map: - annotation_category_id = obj["category_id"] - try: - obj["category_id"] = id_map[annotation_category_id] - except KeyError as e: - raise KeyError( - f"Encountered category_id={annotation_category_id} " - "but this id does not exist in 'categories' of the json file." - ) from e - objs.append(obj) - record["annotations"] = objs - dataset_dicts.append(record) - - if num_instances_without_valid_segmentation > 0: - logger.warning( - "Filtered out {} instances without valid segmentation. ".format( - num_instances_without_valid_segmentation - ) - + "There might be issues in your dataset generation process. Please " - "check https://detectron2.readthedocs.io/en/latest/tutorials/datasets.html carefully" - ) - return dataset_dicts - - -def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"): - """ - Load semantic segmentation datasets. All files under "gt_root" with "gt_ext" extension are - treated as ground truth annotations and all files under "image_root" with "image_ext" extension - as input images. Ground truth and input images are matched using file paths relative to - "gt_root" and "image_root" respectively without taking into account file extensions. - This works for COCO as well as some other datasets. - - Args: - gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation - annotations are stored as images with integer values in pixels that represent - corresponding semantic labels. - image_root (str): the directory where the input images are. - gt_ext (str): file extension for ground truth annotations. - image_ext (str): file extension for input images. - - Returns: - list[dict]: - a list of dicts in detectron2 standard format without instance-level - annotation. - - Notes: - 1. This function does not read the image and ground truth files. - The results do not have the "image" and "sem_seg" fields. - """ - - # We match input images with ground truth based on their relative filepaths (without file - # extensions) starting from 'image_root' and 'gt_root' respectively. - def file2id(folder_path, file_path): - # extract relative path starting from `folder_path` - image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path)) - # remove file extension - image_id = os.path.splitext(image_id)[0] - return image_id - - input_files = sorted( - (os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)), - key=lambda file_path: file2id(image_root, file_path), - ) - gt_files = sorted( - (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)), - key=lambda file_path: file2id(gt_root, file_path), - ) - - assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root) - - # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images - if len(input_files) != len(gt_files): - logger.warn( - "Directory {} and {} has {} and {} files, respectively.".format( - image_root, gt_root, len(input_files), len(gt_files) - ) - ) - input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files] - gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files] - intersect = list(set(input_basenames) & set(gt_basenames)) - # sort, otherwise each worker may obtain a list[dict] in different order - intersect = sorted(intersect) - logger.warn("Will use their intersection of {} files.".format(len(intersect))) - input_files = [os.path.join(image_root, f + image_ext) for f in intersect] - gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect] - - logger.info( - "Loaded {} images with semantic segmentation from {}".format(len(input_files), image_root) - ) - - dataset_dicts = [] - for (img_path, gt_path) in zip(input_files, gt_files): - record = {} - record["file_name"] = img_path - record["sem_seg_file_name"] = gt_path - dataset_dicts.append(record) - - return dataset_dicts - - -def convert_to_coco_dict(dataset_name): - """ - Convert an instance detection/segmentation or keypoint detection dataset - in detectron2's standard format into COCO json format. - - Generic dataset description can be found here: - https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset - - COCO data format description can be found here: - http://cocodataset.org/#format-data - - Args: - dataset_name (str): - name of the source dataset - Must be registered in DatastCatalog and in detectron2's standard format. - Must have corresponding metadata "thing_classes" - Returns: - coco_dict: serializable dict in COCO json format - """ - - dataset_dicts = DatasetCatalog.get(dataset_name) - metadata = MetadataCatalog.get(dataset_name) - - # unmap the category mapping ids for COCO - if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): - reverse_id_mapping = {v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()} - reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa - else: - reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa - - categories = [ - {"id": reverse_id_mapper(id), "name": name} - for id, name in enumerate(metadata.thing_classes) - ] - - logger.info("Converting dataset dicts into COCO format") - coco_images = [] - coco_annotations = [] - - for image_id, image_dict in enumerate(dataset_dicts): - coco_image = { - "id": image_dict.get("image_id", image_id), - "width": int(image_dict["width"]), - "height": int(image_dict["height"]), - "file_name": str(image_dict["file_name"]), - } - coco_images.append(coco_image) - - anns_per_image = image_dict.get("annotations", []) - for annotation in anns_per_image: - # create a new dict with only COCO fields - coco_annotation = {} - - # COCO requirement: XYWH box format for axis-align and XYWHA for rotated - bbox = annotation["bbox"] - if isinstance(bbox, np.ndarray): - if bbox.ndim != 1: - raise ValueError(f"bbox has to be 1-dimensional. Got shape={bbox.shape}.") - bbox = bbox.tolist() - if len(bbox) not in [4, 5]: - raise ValueError(f"bbox has to has length 4 or 5. Got {bbox}.") - from_bbox_mode = annotation["bbox_mode"] - to_bbox_mode = BoxMode.XYWH_ABS if len(bbox) == 4 else BoxMode.XYWHA_ABS - bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode) - - # COCO requirement: instance area - if "segmentation" in annotation: - # Computing areas for instances by counting the pixels - segmentation = annotation["segmentation"] - # TODO: check segmentation type: RLE, BinaryMask or Polygon - if isinstance(segmentation, list): - polygons = PolygonMasks([segmentation]) - area = polygons.area()[0].item() - elif isinstance(segmentation, dict): # RLE - area = mask_util.area(segmentation).item() - else: - raise TypeError(f"Unknown segmentation type {type(segmentation)}!") - else: - # Computing areas using bounding boxes - if to_bbox_mode == BoxMode.XYWH_ABS: - bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS) - area = Boxes([bbox_xy]).area()[0].item() - else: - area = RotatedBoxes([bbox]).area()[0].item() - - if "keypoints" in annotation: - keypoints = annotation["keypoints"] # list[int] - for idx, v in enumerate(keypoints): - if idx % 3 != 2: - # COCO's segmentation coordinates are floating points in [0, H or W], - # but keypoint coordinates are integers in [0, H-1 or W-1] - # For COCO format consistency we substract 0.5 - # https://github.com/facebookresearch/detectron2/pull/175#issuecomment-551202163 - keypoints[idx] = v - 0.5 - if "num_keypoints" in annotation: - num_keypoints = annotation["num_keypoints"] - else: - num_keypoints = sum(kp > 0 for kp in keypoints[2::3]) - - # COCO requirement: - # linking annotations to images - # "id" field must start with 1 - coco_annotation["id"] = len(coco_annotations) + 1 - coco_annotation["image_id"] = coco_image["id"] - coco_annotation["bbox"] = [round(float(x), 3) for x in bbox] - coco_annotation["area"] = float(area) - coco_annotation["iscrowd"] = int(annotation.get("iscrowd", 0)) - coco_annotation["category_id"] = int(reverse_id_mapper(annotation["category_id"])) - - # Add optional fields - if "keypoints" in annotation: - coco_annotation["keypoints"] = keypoints - coco_annotation["num_keypoints"] = num_keypoints - - if "segmentation" in annotation: - seg = coco_annotation["segmentation"] = annotation["segmentation"] - if isinstance(seg, dict): # RLE - counts = seg["counts"] - if not isinstance(counts, str): - # make it json-serializable - seg["counts"] = counts.decode("ascii") - - coco_annotations.append(coco_annotation) - - logger.info( - "Conversion finished, " - f"#images: {len(coco_images)}, #annotations: {len(coco_annotations)}" - ) - - info = { - "date_created": str(datetime.datetime.now()), - "description": "Automatically generated COCO json file for Detectron2.", - } - coco_dict = {"info": info, "images": coco_images, "categories": categories, "licenses": None} - if len(coco_annotations) > 0: - coco_dict["annotations"] = coco_annotations - return coco_dict - - -def convert_to_coco_json(dataset_name, output_file, allow_cached=True): - """ - Converts dataset into COCO format and saves it to a json file. - dataset_name must be registered in DatasetCatalog and in detectron2's standard format. - - Args: - dataset_name: - reference from the config file to the catalogs - must be registered in DatasetCatalog and in detectron2's standard format - output_file: path of json file that will be saved to - allow_cached: if json file is already present then skip conversion - """ - - # TODO: The dataset or the conversion script *may* change, - # a checksum would be useful for validating the cached data - - PathManager.mkdirs(os.path.dirname(output_file)) - with file_lock(output_file): - if PathManager.exists(output_file) and allow_cached: - logger.warning( - f"Using previously cached COCO format annotations at '{output_file}'. " - "You need to clear the cache file if your dataset has been modified." - ) - else: - logger.info(f"Converting annotations of dataset '{dataset_name}' to COCO format ...)") - coco_dict = convert_to_coco_dict(dataset_name) - - logger.info(f"Caching COCO format annotations at '{output_file}' ...") - tmp_file = output_file + ".tmp" - with PathManager.open(tmp_file, "w") as f: - json.dump(coco_dict, f) - shutil.move(tmp_file, output_file) - - -def register_coco_instances(name, metadata, json_file, image_root): - """ - Register a dataset in COCO's json annotation format for - instance detection, instance segmentation and keypoint detection. - (i.e., Type 1 and 2 in http://cocodataset.org/#format-data. - `instances*.json` and `person_keypoints*.json` in the dataset). - - This is an example of how to register a new dataset. - You can do something similar to this function, to register new datasets. - - Args: - name (str): the name that identifies a dataset, e.g. "coco_2014_train". - metadata (dict): extra metadata associated with this dataset. You can - leave it as an empty dict. - json_file (str): path to the json instance annotation file. - image_root (str or path-like): directory which contains all the images. - """ - assert isinstance(name, str), name - assert isinstance(json_file, (str, os.PathLike)), json_file - assert isinstance(image_root, (str, os.PathLike)), image_root - # 1. register a function which returns dicts - DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name)) - - # 2. Optionally, add metadata about this dataset, - # since they might be useful in evaluation, visualization or logging - MetadataCatalog.get(name).set( - json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata - ) - - -if __name__ == "__main__": - """ - Test the COCO json dataset loader. - - Usage: - python -m detectron2.data.datasets.coco \ - path/to/json path/to/image_root dataset_name - - "dataset_name" can be "coco_2014_minival_100", or other - pre-registered ones - """ - from detectron2.utils.logger import setup_logger - from detectron2.utils.visualizer import Visualizer - import detectron2.data.datasets # noqa # add pre-defined metadata - import sys - - logger = setup_logger(name=__name__) - assert sys.argv[3] in DatasetCatalog.list() - meta = MetadataCatalog.get(sys.argv[3]) - - dicts = load_coco_json(sys.argv[1], sys.argv[2], sys.argv[3]) - logger.info("Done loading {} samples.".format(len(dicts))) - - dirname = "coco-data-vis" - os.makedirs(dirname, exist_ok=True) - for d in dicts: - img = np.array(Image.open(d["file_name"])) - visualizer = Visualizer(img, metadata=meta) - vis = visualizer.draw_dataset_dict(d) - fpath = os.path.join(dirname, os.path.basename(d["file_name"])) - vis.save(fpath) diff --git a/spaces/CVPR/regionclip-demo/detectron2/modeling/meta_arch/retinanet.py b/spaces/CVPR/regionclip-demo/detectron2/modeling/meta_arch/retinanet.py deleted file mode 100644 index 81992a3bc6d7f17ab64eb88a157901e69d3f0e16..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/modeling/meta_arch/retinanet.py +++ /dev/null @@ -1,609 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import logging -import math -import numpy as np -from typing import Dict, List, Tuple -import torch -from fvcore.nn import sigmoid_focal_loss_jit -from torch import Tensor, nn -from torch.nn import functional as F - -from detectron2.config import configurable -from detectron2.data.detection_utils import convert_image_to_rgb -from detectron2.layers import ShapeSpec, batched_nms, cat, get_norm, nonzero_tuple -from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou -from detectron2.utils.events import get_event_storage - -from ..anchor_generator import build_anchor_generator -from ..backbone import Backbone, build_backbone -from ..box_regression import Box2BoxTransform, _dense_box_regression_loss -from ..matcher import Matcher -from ..postprocessing import detector_postprocess -from .build import META_ARCH_REGISTRY - -__all__ = ["RetinaNet"] - - -logger = logging.getLogger(__name__) - - -def permute_to_N_HWA_K(tensor, K: int): - """ - Transpose/reshape a tensor from (N, (Ai x K), H, W) to (N, (HxWxAi), K) - """ - assert tensor.dim() == 4, tensor.shape - N, _, H, W = tensor.shape - tensor = tensor.view(N, -1, K, H, W) - tensor = tensor.permute(0, 3, 4, 1, 2) - tensor = tensor.reshape(N, -1, K) # Size=(N,HWA,K) - return tensor - - -@META_ARCH_REGISTRY.register() -class RetinaNet(nn.Module): - """ - Implement RetinaNet in :paper:`RetinaNet`. - """ - - @configurable - def __init__( - self, - *, - backbone: Backbone, - head: nn.Module, - head_in_features, - anchor_generator, - box2box_transform, - anchor_matcher, - num_classes, - focal_loss_alpha=0.25, - focal_loss_gamma=2.0, - smooth_l1_beta=0.0, - box_reg_loss_type="smooth_l1", - test_score_thresh=0.05, - test_topk_candidates=1000, - test_nms_thresh=0.5, - max_detections_per_image=100, - pixel_mean, - pixel_std, - vis_period=0, - input_format="BGR", - ): - """ - NOTE: this interface is experimental. - - Args: - backbone: a backbone module, must follow detectron2's backbone interface - head (nn.Module): a module that predicts logits and regression deltas - for each level from a list of per-level features - head_in_features (Tuple[str]): Names of the input feature maps to be used in head - anchor_generator (nn.Module): a module that creates anchors from a - list of features. Usually an instance of :class:`AnchorGenerator` - box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to - instance boxes - anchor_matcher (Matcher): label the anchors by matching them with ground truth. - num_classes (int): number of classes. Used to label background proposals. - - # Loss parameters: - focal_loss_alpha (float): focal_loss_alpha - focal_loss_gamma (float): focal_loss_gamma - smooth_l1_beta (float): smooth_l1_beta - box_reg_loss_type (str): Options are "smooth_l1", "giou" - - # Inference parameters: - test_score_thresh (float): Inference cls score threshold, only anchors with - score > INFERENCE_TH are considered for inference (to improve speed) - test_topk_candidates (int): Select topk candidates before NMS - test_nms_thresh (float): Overlap threshold used for non-maximum suppression - (suppress boxes with IoU >= this threshold) - max_detections_per_image (int): - Maximum number of detections to return per image during inference - (100 is based on the limit established for the COCO dataset). - - # Input parameters - pixel_mean (Tuple[float]): - Values to be used for image normalization (BGR order). - To train on images of different number of channels, set different mean & std. - Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675] - pixel_std (Tuple[float]): - When using pre-trained models in Detectron1 or any MSRA models, - std has been absorbed into its conv1 weights, so the std needs to be set 1. - Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std) - vis_period (int): - The period (in terms of steps) for minibatch visualization at train time. - Set to 0 to disable. - input_format (str): Whether the model needs RGB, YUV, HSV etc. - """ - super().__init__() - - self.backbone = backbone - self.head = head - self.head_in_features = head_in_features - if len(self.backbone.output_shape()) != len(self.head_in_features): - logger.warning("[RetinaNet] Backbone produces unused features.") - - # Anchors - self.anchor_generator = anchor_generator - self.box2box_transform = box2box_transform - self.anchor_matcher = anchor_matcher - - self.num_classes = num_classes - # Loss parameters: - self.focal_loss_alpha = focal_loss_alpha - self.focal_loss_gamma = focal_loss_gamma - self.smooth_l1_beta = smooth_l1_beta - self.box_reg_loss_type = box_reg_loss_type - # Inference parameters: - self.test_score_thresh = test_score_thresh - self.test_topk_candidates = test_topk_candidates - self.test_nms_thresh = test_nms_thresh - self.max_detections_per_image = max_detections_per_image - # Vis parameters - self.vis_period = vis_period - self.input_format = input_format - - self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) - self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) - - """ - In Detectron1, loss is normalized by number of foreground samples in the batch. - When batch size is 1 per GPU, #foreground has a large variance and - using it lead to lower performance. Here we maintain an EMA of #foreground to - stabilize the normalizer. - """ - self.loss_normalizer = 100 # initialize with any reasonable #fg that's not too small - self.loss_normalizer_momentum = 0.9 - - @classmethod - def from_config(cls, cfg): - backbone = build_backbone(cfg) - backbone_shape = backbone.output_shape() - feature_shapes = [backbone_shape[f] for f in cfg.MODEL.RETINANET.IN_FEATURES] - head = RetinaNetHead(cfg, feature_shapes) - anchor_generator = build_anchor_generator(cfg, feature_shapes) - return { - "backbone": backbone, - "head": head, - "anchor_generator": anchor_generator, - "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RETINANET.BBOX_REG_WEIGHTS), - "anchor_matcher": Matcher( - cfg.MODEL.RETINANET.IOU_THRESHOLDS, - cfg.MODEL.RETINANET.IOU_LABELS, - allow_low_quality_matches=True, - ), - "pixel_mean": cfg.MODEL.PIXEL_MEAN, - "pixel_std": cfg.MODEL.PIXEL_STD, - "num_classes": cfg.MODEL.RETINANET.NUM_CLASSES, - "head_in_features": cfg.MODEL.RETINANET.IN_FEATURES, - # Loss parameters: - "focal_loss_alpha": cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA, - "focal_loss_gamma": cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA, - "smooth_l1_beta": cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA, - "box_reg_loss_type": cfg.MODEL.RETINANET.BBOX_REG_LOSS_TYPE, - # Inference parameters: - "test_score_thresh": cfg.MODEL.RETINANET.SCORE_THRESH_TEST, - "test_topk_candidates": cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST, - "test_nms_thresh": cfg.MODEL.RETINANET.NMS_THRESH_TEST, - "max_detections_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, - # Vis parameters - "vis_period": cfg.VIS_PERIOD, - "input_format": cfg.INPUT.FORMAT, - } - - @property - def device(self): - return self.pixel_mean.device - - def visualize_training(self, batched_inputs, results): - """ - A function used to visualize ground truth images and final network predictions. - It shows ground truth bounding boxes on the original image and up to 20 - predicted object bounding boxes on the original image. - - Args: - batched_inputs (list): a list that contains input to the model. - results (List[Instances]): a list of #images elements. - """ - from detectron2.utils.visualizer import Visualizer - - assert len(batched_inputs) == len( - results - ), "Cannot visualize inputs and results of different sizes" - storage = get_event_storage() - max_boxes = 20 - - image_index = 0 # only visualize a single image - img = batched_inputs[image_index]["image"] - img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format) - v_gt = Visualizer(img, None) - v_gt = v_gt.overlay_instances(boxes=batched_inputs[image_index]["instances"].gt_boxes) - anno_img = v_gt.get_image() - processed_results = detector_postprocess(results[image_index], img.shape[0], img.shape[1]) - predicted_boxes = processed_results.pred_boxes.tensor.detach().cpu().numpy() - - v_pred = Visualizer(img, None) - v_pred = v_pred.overlay_instances(boxes=predicted_boxes[0:max_boxes]) - prop_img = v_pred.get_image() - vis_img = np.vstack((anno_img, prop_img)) - vis_img = vis_img.transpose(2, 0, 1) - vis_name = f"Top: GT bounding boxes; Bottom: {max_boxes} Highest Scoring Results" - storage.put_image(vis_name, vis_img) - - def forward(self, batched_inputs: List[Dict[str, Tensor]]): - """ - Args: - batched_inputs: a list, batched outputs of :class:`DatasetMapper` . - Each item in the list contains the inputs for one image. - For now, each item in the list is a dict that contains: - - * image: Tensor, image in (C, H, W) format. - * instances: Instances - - Other information that's included in the original dicts, such as: - - * "height", "width" (int): the output resolution of the model, used in inference. - See :meth:`postprocess` for details. - Returns: - In training, dict[str, Tensor]: mapping from a named loss to a tensor storing the - loss. Used during training only. In inference, the standard output format, described - in :doc:`/tutorials/models`. - """ - images = self.preprocess_image(batched_inputs) - features = self.backbone(images.tensor) - features = [features[f] for f in self.head_in_features] - - anchors = self.anchor_generator(features) - pred_logits, pred_anchor_deltas = self.head(features) - # Transpose the Hi*Wi*A dimension to the middle: - pred_logits = [permute_to_N_HWA_K(x, self.num_classes) for x in pred_logits] - pred_anchor_deltas = [permute_to_N_HWA_K(x, 4) for x in pred_anchor_deltas] - - if self.training: - assert not torch.jit.is_scripting(), "Not supported" - assert "instances" in batched_inputs[0], "Instance annotations are missing in training!" - gt_instances = [x["instances"].to(self.device) for x in batched_inputs] - - gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances) - losses = self.losses(anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes) - - if self.vis_period > 0: - storage = get_event_storage() - if storage.iter % self.vis_period == 0: - results = self.inference( - anchors, pred_logits, pred_anchor_deltas, images.image_sizes - ) - self.visualize_training(batched_inputs, results) - - return losses - else: - results = self.inference(anchors, pred_logits, pred_anchor_deltas, images.image_sizes) - if torch.jit.is_scripting(): - return results - processed_results = [] - for results_per_image, input_per_image, image_size in zip( - results, batched_inputs, images.image_sizes - ): - height = input_per_image.get("height", image_size[0]) - width = input_per_image.get("width", image_size[1]) - r = detector_postprocess(results_per_image, height, width) - processed_results.append({"instances": r}) - return processed_results - - def losses(self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes): - """ - Args: - anchors (list[Boxes]): a list of #feature level Boxes - gt_labels, gt_boxes: see output of :meth:`RetinaNet.label_anchors`. - Their shapes are (N, R) and (N, R, 4), respectively, where R is - the total number of anchors across levels, i.e. sum(Hi x Wi x Ai) - pred_logits, pred_anchor_deltas: both are list[Tensor]. Each element in the - list corresponds to one level and has shape (N, Hi * Wi * Ai, K or 4). - Where K is the number of classes used in `pred_logits`. - - Returns: - dict[str, Tensor]: - mapping from a named loss to a scalar tensor - storing the loss. Used during training only. The dict keys are: - "loss_cls" and "loss_box_reg" - """ - num_images = len(gt_labels) - gt_labels = torch.stack(gt_labels) # (N, R) - - valid_mask = gt_labels >= 0 - pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes) - num_pos_anchors = pos_mask.sum().item() - get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images) - self.loss_normalizer = self.loss_normalizer_momentum * self.loss_normalizer + ( - 1 - self.loss_normalizer_momentum - ) * max(num_pos_anchors, 1) - - # classification and regression loss - gt_labels_target = F.one_hot(gt_labels[valid_mask], num_classes=self.num_classes + 1)[ - :, :-1 - ] # no loss for the last (background) class - loss_cls = sigmoid_focal_loss_jit( - cat(pred_logits, dim=1)[valid_mask], - gt_labels_target.to(pred_logits[0].dtype), - alpha=self.focal_loss_alpha, - gamma=self.focal_loss_gamma, - reduction="sum", - ) - - loss_box_reg = _dense_box_regression_loss( - anchors, - self.box2box_transform, - pred_anchor_deltas, - gt_boxes, - pos_mask, - box_reg_loss_type=self.box_reg_loss_type, - smooth_l1_beta=self.smooth_l1_beta, - ) - - return { - "loss_cls": loss_cls / self.loss_normalizer, - "loss_box_reg": loss_box_reg / self.loss_normalizer, - } - - @torch.no_grad() - def label_anchors(self, anchors, gt_instances): - """ - Args: - anchors (list[Boxes]): A list of #feature level Boxes. - The Boxes contains anchors of this image on the specific feature level. - gt_instances (list[Instances]): a list of N `Instances`s. The i-th - `Instances` contains the ground-truth per-instance annotations - for the i-th input image. - - Returns: - list[Tensor]: List of #img tensors. i-th element is a vector of labels whose length is - the total number of anchors across all feature maps (sum(Hi * Wi * A)). - Label values are in {-1, 0, ..., K}, with -1 means ignore, and K means background. - - list[Tensor]: i-th element is a Rx4 tensor, where R is the total number of anchors - across feature maps. The values are the matched gt boxes for each anchor. - Values are undefined for those anchors not labeled as foreground. - """ - anchors = Boxes.cat(anchors) # Rx4 - - gt_labels = [] - matched_gt_boxes = [] - for gt_per_image in gt_instances: - match_quality_matrix = pairwise_iou(gt_per_image.gt_boxes, anchors) - matched_idxs, anchor_labels = self.anchor_matcher(match_quality_matrix) - del match_quality_matrix - - if len(gt_per_image) > 0: - matched_gt_boxes_i = gt_per_image.gt_boxes.tensor[matched_idxs] - - gt_labels_i = gt_per_image.gt_classes[matched_idxs] - # Anchors with label 0 are treated as background. - gt_labels_i[anchor_labels == 0] = self.num_classes - # Anchors with label -1 are ignored. - gt_labels_i[anchor_labels == -1] = -1 - else: - matched_gt_boxes_i = torch.zeros_like(anchors.tensor) - gt_labels_i = torch.zeros_like(matched_idxs) + self.num_classes - - gt_labels.append(gt_labels_i) - matched_gt_boxes.append(matched_gt_boxes_i) - - return gt_labels, matched_gt_boxes - - def inference( - self, - anchors: List[Boxes], - pred_logits: List[Tensor], - pred_anchor_deltas: List[Tensor], - image_sizes: List[Tuple[int, int]], - ): - """ - Arguments: - anchors (list[Boxes]): A list of #feature level Boxes. - The Boxes contain anchors of this image on the specific feature level. - pred_logits, pred_anchor_deltas: list[Tensor], one per level. Each - has shape (N, Hi * Wi * Ai, K or 4) - image_sizes (List[(h, w)]): the input image sizes - - Returns: - results (List[Instances]): a list of #images elements. - """ - results: List[Instances] = [] - for img_idx, image_size in enumerate(image_sizes): - pred_logits_per_image = [x[img_idx] for x in pred_logits] - deltas_per_image = [x[img_idx] for x in pred_anchor_deltas] - results_per_image = self.inference_single_image( - anchors, pred_logits_per_image, deltas_per_image, image_size - ) - results.append(results_per_image) - return results - - def inference_single_image( - self, - anchors: List[Boxes], - box_cls: List[Tensor], - box_delta: List[Tensor], - image_size: Tuple[int, int], - ): - """ - Single-image inference. Return bounding-box detection results by thresholding - on scores and applying non-maximum suppression (NMS). - - Arguments: - anchors (list[Boxes]): list of #feature levels. Each entry contains - a Boxes object, which contains all the anchors in that feature level. - box_cls (list[Tensor]): list of #feature levels. Each entry contains - tensor of size (H x W x A, K) - box_delta (list[Tensor]): Same shape as 'box_cls' except that K becomes 4. - image_size (tuple(H, W)): a tuple of the image height and width. - - Returns: - Same as `inference`, but for only one image. - """ - boxes_all = [] - scores_all = [] - class_idxs_all = [] - - # Iterate over every feature level - for box_cls_i, box_reg_i, anchors_i in zip(box_cls, box_delta, anchors): - # (HxWxAxK,) - predicted_prob = box_cls_i.flatten().sigmoid_() - - # Apply two filtering below to make NMS faster. - # 1. Keep boxes with confidence score higher than threshold - keep_idxs = predicted_prob > self.test_score_thresh - predicted_prob = predicted_prob[keep_idxs] - topk_idxs = nonzero_tuple(keep_idxs)[0] - - # 2. Keep top k top scoring boxes only - num_topk = min(self.test_topk_candidates, topk_idxs.size(0)) - # torch.sort is actually faster than .topk (at least on GPUs) - predicted_prob, idxs = predicted_prob.sort(descending=True) - predicted_prob = predicted_prob[:num_topk] - topk_idxs = topk_idxs[idxs[:num_topk]] - - anchor_idxs = topk_idxs // self.num_classes - classes_idxs = topk_idxs % self.num_classes - - box_reg_i = box_reg_i[anchor_idxs] - anchors_i = anchors_i[anchor_idxs] - # predict boxes - predicted_boxes = self.box2box_transform.apply_deltas(box_reg_i, anchors_i.tensor) - - boxes_all.append(predicted_boxes) - scores_all.append(predicted_prob) - class_idxs_all.append(classes_idxs) - - boxes_all, scores_all, class_idxs_all = [ - cat(x) for x in [boxes_all, scores_all, class_idxs_all] - ] - keep = batched_nms(boxes_all, scores_all, class_idxs_all, self.test_nms_thresh) - keep = keep[: self.max_detections_per_image] - - result = Instances(image_size) - result.pred_boxes = Boxes(boxes_all[keep]) - result.scores = scores_all[keep] - result.pred_classes = class_idxs_all[keep] - return result - - def preprocess_image(self, batched_inputs: List[Dict[str, Tensor]]): - """ - Normalize, pad and batch the input images. - """ - images = [x["image"].to(self.device) for x in batched_inputs] - images = [(x - self.pixel_mean) / self.pixel_std for x in images] - images = ImageList.from_tensors(images, self.backbone.size_divisibility) - return images - - -class RetinaNetHead(nn.Module): - """ - The head used in RetinaNet for object classification and box regression. - It has two subnets for the two tasks, with a common structure but separate parameters. - """ - - @configurable - def __init__( - self, - *, - input_shape: List[ShapeSpec], - num_classes, - num_anchors, - conv_dims: List[int], - norm="", - prior_prob=0.01, - ): - """ - NOTE: this interface is experimental. - - Args: - input_shape (List[ShapeSpec]): input shape - num_classes (int): number of classes. Used to label background proposals. - num_anchors (int): number of generated anchors - conv_dims (List[int]): dimensions for each convolution layer - norm (str or callable): - Normalization for conv layers except for the two output layers. - See :func:`detectron2.layers.get_norm` for supported types. - prior_prob (float): Prior weight for computing bias - """ - super().__init__() - - if norm == "BN" or norm == "SyncBN": - logger.warning("Shared norm does not work well for BN, SyncBN, expect poor results") - - cls_subnet = [] - bbox_subnet = [] - for in_channels, out_channels in zip( - [input_shape[0].channels] + list(conv_dims), conv_dims - ): - cls_subnet.append( - nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) - ) - if norm: - cls_subnet.append(get_norm(norm, out_channels)) - cls_subnet.append(nn.ReLU()) - bbox_subnet.append( - nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) - ) - if norm: - bbox_subnet.append(get_norm(norm, out_channels)) - bbox_subnet.append(nn.ReLU()) - - self.cls_subnet = nn.Sequential(*cls_subnet) - self.bbox_subnet = nn.Sequential(*bbox_subnet) - self.cls_score = nn.Conv2d( - conv_dims[-1], num_anchors * num_classes, kernel_size=3, stride=1, padding=1 - ) - self.bbox_pred = nn.Conv2d( - conv_dims[-1], num_anchors * 4, kernel_size=3, stride=1, padding=1 - ) - - # Initialization - for modules in [self.cls_subnet, self.bbox_subnet, self.cls_score, self.bbox_pred]: - for layer in modules.modules(): - if isinstance(layer, nn.Conv2d): - torch.nn.init.normal_(layer.weight, mean=0, std=0.01) - torch.nn.init.constant_(layer.bias, 0) - - # Use prior in model initialization to improve stability - bias_value = -(math.log((1 - prior_prob) / prior_prob)) - torch.nn.init.constant_(self.cls_score.bias, bias_value) - - @classmethod - def from_config(cls, cfg, input_shape: List[ShapeSpec]): - num_anchors = build_anchor_generator(cfg, input_shape).num_cell_anchors - assert ( - len(set(num_anchors)) == 1 - ), "Using different number of anchors between levels is not currently supported!" - num_anchors = num_anchors[0] - - return { - "input_shape": input_shape, - "num_classes": cfg.MODEL.RETINANET.NUM_CLASSES, - "conv_dims": [input_shape[0].channels] * cfg.MODEL.RETINANET.NUM_CONVS, - "prior_prob": cfg.MODEL.RETINANET.PRIOR_PROB, - "norm": cfg.MODEL.RETINANET.NORM, - "num_anchors": num_anchors, - } - - def forward(self, features: List[Tensor]): - """ - Arguments: - features (list[Tensor]): FPN feature map tensors in high to low resolution. - Each tensor in the list correspond to different feature levels. - - Returns: - logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi). - The tensor predicts the classification probability - at each spatial position for each of the A anchors and K object - classes. - bbox_reg (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi). - The tensor predicts 4-vector (dx,dy,dw,dh) box - regression values for every anchor. These values are the - relative offset between the anchor and the ground truth box. - """ - logits = [] - bbox_reg = [] - for feature in features: - logits.append(self.cls_score(self.cls_subnet(feature))) - bbox_reg.append(self.bbox_pred(self.bbox_subnet(feature))) - return logits, bbox_reg diff --git a/spaces/Chintan-Donda/KKMS-KSSW-HF/src/ner_detection.py b/spaces/Chintan-Donda/KKMS-KSSW-HF/src/ner_detection.py deleted file mode 100644 index 067a69719185a6b0c61d84e0478392141110462e..0000000000000000000000000000000000000000 --- a/spaces/Chintan-Donda/KKMS-KSSW-HF/src/ner_detection.py +++ /dev/null @@ -1,58 +0,0 @@ -import gradio as gr -import openai -import os -import re -import ast - -openai.api_key = "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" -SYSTEM_PROMPT = "You are a smart and intelligent Named Entity Recognition (NER) system. I will provide you the definition of the entities you need to extract, the sentence from where your extract the entities and the output format with examples." -USER_PROMPT_1 = "Are you clear about your role?" -ASSISTANT_PROMPT_1 = "Sure, I'm ready to help you with your NER task. Please provide me with the necessary information to get started." -GUIDELINES_PROMPT = ( - """Entity Definition:\n" - "1. PEST NAME: Name of the pest which has attacked a particular crop which may lead to crop damage.\n" - "2. CROP DISEASE: Any kind of crop disease which occurs in agriculture land in india and nearby resgions.\n" - "3. WEATHER CONDITION: Severe climate conditions like heavy rainfall, hailstorm which has destroyed crops.\n" - "\n" - "Output Format:\n" - "{{'PEST NAME': [list of entities present], 'CROP DISEASE': [list of entities present], 'WEATHER CONDITION': [list of entities present]}}\n" - "If no entities are presented in any categories keep it None\n" - "\n" - "Examples:\n" - "\n" - "1. Sentence: Pest attack on maize crop in lower Kangra : The Tribune India. Farmers in lower Kangra are a harried lot as the fall armyworm pest has attacked their maize crop. 'Kolshi' continues to affect Vidarbha's Orange crop cultivation (Citrus Black Fly) | Krishak Jagat. A total of 1,50,000 hectares of land in the Vidarbha region is planted with oranges, and of them, 25% are seriously damaged by Kolshi, a citrus black fly disease. India's June tea output drops 17% as floods hit plucking | Mint. India's June tea production fell 17.4% from a year earlier to 141.31 million kilograms, the state-run Tea Board said, as floods and pest attack dented output in the main producing region\n" - "Output: {{'PEST NAME': ['fall armyworm'], 'CROP DISEASE': ['citrus black fly disease'], 'WEATHER CONDITION': ['floods']}}\n" - "\n" - "2. Sentence: ICAR issues pest alert in Leparada, W/Siang | The Arunachal Times. 70 percent prevalence of fall army worm in maize fields in Pagi, Gori and Bam villages in Leparada district and Darka, Kombo and Jirdin villages in West Siang district was observed. After maize, Kangra vegetable crops under white fly attack : The Tribune India. Vegetable crops are under attack by white fly in the lower hills of Kangra district. The pest attack comes after the recent damage caused by fall armyworm to the maize crop in the area. Pest attacks on paddy crop worry farmers in the integrated Karimnagar district | Hindudayashankar. Crops withering due to stem borer, leaf folder and rice blast; farmers have to incur huge expenditures to control menace. Cyclone Amphan damages crop, vegetable prices shoot up | Cities News,The Indian Express. Cyclone Amphan has damaged vegetables across South Bengal. Farmers lost 80 to 90 per cent of crop as fields were flooded.\n" - "Output: {{'PEST NAME': ['fall army worm', 'white fly attack', 'stem borer', 'leaf folder'], 'CROP DISEASE': ['rice blast'], 'WEATHER CONDITION': ['Cyclone Amphan']}}\n" - "\n" - "3. Sentence: {}\n" - "Output: """ -) - -def openai_chat_completion_response(news_article_text): - final_prompt = GUIDELINES_PROMPT.format(news_article_text) - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=[ - {"role": "system", "content": SYSTEM_PROMPT}, - {"role": "user", "content": USER_PROMPT_1}, - {"role": "assistant", "content": ASSISTANT_PROMPT_1}, - {"role": "user", "content": final_prompt} - ] - ) - return response['choices'][0]['message']['content'].strip(" \n") - -# def preprocess(prompt): -# return GUIDELINES_PROMPT.format(prompt) -# def main(): -# my_sentence = "Hundreds of hectares of land under the cotton crop, once referred to as white gold, has come under attack of a wide range of insects like whitefly, pink bollworm and mealybug. This is likely to hit the cotton production this year." -# GUIDELINES_PROMPT = GUIDELINES_PROMPT.format(my_sentence) -# # print(GUIDELINES_PROMPT) -# ners = openai_chat_completion_response(GUIDELINES_PROMPT) -# print(ners) - -import gradio as gra -#define gradio interface and other parameters -app = gra.Interface(fn = openai_chat_completion_response, inputs="text", outputs="text") -app.launch(share=True) diff --git a/spaces/Chukwuka/Dog_Breed_ImageWoof/app.py b/spaces/Chukwuka/Dog_Breed_ImageWoof/app.py deleted file mode 100644 index 17cd82b5df0ec44db46a72103a9e2999af47cc0e..0000000000000000000000000000000000000000 --- a/spaces/Chukwuka/Dog_Breed_ImageWoof/app.py +++ /dev/null @@ -1,98 +0,0 @@ - -### 1. Imports and class names setup ### -import gradio as gr -import os -import numpy as np -import torch -import torchvision.transforms as T - -from model import Efficient_b2_model -from timeit import default_timer as timer -from typing import Tuple, Dict -from data_setup import classes, model_tsfm - -# Setup class names -#class_names = ['pizza', 'steak', 'sushi'] - -### 2. Model and transforms preparation ### -#test_tsfm = T.Compose([T.Resize((224,224)), -# T.ToTensor(), -# T.Normalize(mean=[0.485, 0.456, 0.406], # 3. A mean of [0.485, 0.456, 0.406] (across each colour channel) -# std=[0.229, 0.224, 0.225]) # 4. A standard deviation of [0.229, 0.224, 0.225] (across each colour channel), -# ]) - -# Create EffNetB2 Model -effnet_b2 = Efficient_b2_model(num_classes=len(classes), pretrained=True) -#effnet_b2 -#effnetb2, test_transform = create_effnet_b2(num_of_class=len(class_names), - #transform=test_tsfm, - #seed=42) - -# saved_path = 'demos\foodvision_mini\09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth' -saved_path = 'efficient_b2_checkpoint_model_2023_02_04.pth' - -print('Loading Model State Dictionary') -# Load saved weights -effnet_b2.load_state_dict( - torch.load(f=saved_path, - map_location=torch.device('cpu'), # load to CPU - ) - ) - -print('Model Loaded ...') -### 3. Predict function ### - -# Create predict function -from typing import Tuple, Dict - -def predict(img) -> Tuple[Dict, float]: - """Transforms and performs a prediction on img and returns prediction and time taken. - """ - # Start the timer - start_time = timer() - - # Transform the target image and add a batch dimension - #img = get_image(img_path, model_tsfm).unsqueeze(0) - img = model_tsfm(image=np.array(img))["image"] - img = img.unsqueeze(0) - - # Put model into evaluation mode and turn on inference mode - effnet_b2.eval() - with torch.inference_mode(): - # Pass the transformed image through the model and turn the prediction logits into prediction probabilities - pred_probs = torch.softmax(effnet_b2(img), dim=1) - - # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter) - pred_labels_and_probs = {classes[i]: float(pred_probs[0][i]) for i in range(len(classes))} - - # Calculate the prediction time - pred_time = round(timer() - start_time, 5) - - # Return the prediction dictionary and prediction time - return pred_labels_and_probs, pred_time - -### 4. Gradio App ### - -# Create title, description and article strings -title= 'DogBreed Mini 🐩🐶🦮🐕🦺' -description = "An EfficientNetB2 feature extractor computer vision model to classify images of Dog breeds." -article = " ImageWoof Created by Chukwuka
" - - -# Create examples list from "examples/" directory -example_list = [["examples/" + example] for example in os.listdir("examples")] - -# Create the Gradio demo -demo = gr.Interface(fn=predict, # mapping function from input to output - inputs=gr.Image(type='pil'), # What are the inputs? - outputs=[gr.Label(num_top_classes=10, label="Predictions"), # what are the outputs? - gr.Number(label='Prediction time (s)')], # Our fn has two outputs, therefore we have two outputs - examples=example_list, - title=title, - description=description, - article=article - ) -# Launch the demo -print('Gradio Demo Launched') -demo.launch() - diff --git a/spaces/Cicooo/vits-uma-genshin-honkai/commons.py b/spaces/Cicooo/vits-uma-genshin-honkai/commons.py deleted file mode 100644 index 40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9..0000000000000000000000000000000000000000 --- a/spaces/Cicooo/vits-uma-genshin-honkai/commons.py +++ /dev/null @@ -1,172 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/components/Config.js b/spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/components/Config.js deleted file mode 100644 index 471b247378f214c409c20d2e636f42134e124e02..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/components/Config.js +++ /dev/null @@ -1,375 +0,0 @@ - -import YAML from 'yaml' -import chokidar from 'chokidar' -import fs from 'node:fs' -import YamlReader from './YamlReader.js' -import cfg from '../../../lib/config/config.js' -import _ from 'lodash' -import { modifyWebSocket } from './WebSocket.js' -import { cfgSchema } from '../config/system/cfg_system.js' - -const Path = process.cwd() -const Plugin_Name = 'ws-plugin' -const Plugin_Path = `${Path}/plugins/${Plugin_Name}` -class Config { - constructor() { - this.config = {} - this.oldConfig = {} - /** 监听文件 */ - this.watcher = { config: {}, defSet: {} } - - this.initCfg() - } - - /** 初始化配置 */ - initCfg() { - let path = `${Plugin_Path}/config/config/` - if (!fs.existsSync(path)) fs.mkdirSync(path) - let pathDef = `${Plugin_Path}/config/default_config/` - const files = fs.readdirSync(pathDef).filter(file => file.endsWith('.yaml')) - for (let file of files) { - if (!fs.existsSync(`${path}${file}`)) { - fs.copyFileSync(`${pathDef}${file}`, `${path}${file}`) - } - this.watch(`${path}${file}`, file.replace('.yaml', ''), 'config') - } - } - - /** 主人QQ */ - get masterQQ() { - return cfg.masterQQ - } - - /** Bot账号:[主人帐号] */ - get master() { - return cfg.master - } - - /** 云崽黑名单群 */ - get blackGroup() { - return cfg.getOther().blackGroup - } - - /** 云崽白名单群 */ - get whiteGroup() { - return cfg.getOther().whiteGroup - } - - /** 心跳 */ - get heartbeatInterval() { - return this.getDefOrConfig('ws-config').heartbeatInterval - } - - /** 数据上报类型 */ - get messagePostFormat() { - return this.getDefOrConfig('ws-config').messagePostFormat - } - - /** 连接列表 */ - get servers() { - return this.getDefOrConfig('ws-config').servers - } - - get noMsgStart() { - return this.getDefOrConfig('msg-config').noMsgStart - } - - get noMsgInclude() { - return this.getDefOrConfig('msg-config').noMsgInclude - } - - get howToMaster() { - return this.getDefOrConfig('msg-config').howToMaster - } - - /**掉线时否通知主人 */ - get disconnectToMaster() { - return this.getDefOrConfig('msg-config').disconnectToMaster - } - - /**重连成功时是否通知主人 */ - get reconnectToMaster() { - return this.getDefOrConfig('msg-config').reconnectToMaster - } - - /**首次连接成功时是否通知主人 */ - get firstconnectToMaster() { - return this.getDefOrConfig('msg-config').firstconnectToMaster - } - - /**消息存储时间 */ - get msgStoreTime() { - return this.getDefOrConfig('msg-config').msgStoreTime - } - - /**禁用群聊列表 */ - get noGroup() { - return this.getDefOrConfig('msg-config').noGroup - } - - /** 白名单群聊 */ - get yesGroup() { - return this.getDefOrConfig('msg-config').yesGroup - } - - /** 禁言拦截 */ - get muteStop() { - return this.getDefOrConfig('msg-config').muteStop - } - - /** red 发送伪造转发消息方式 */ - get redSendForwardMsgType(){ - return this.getDefOrConfig('msg-config').redSendForwardMsgType - } - - /**群管理员变动是否上报 */ - get groupAdmin() { - return this.getDefOrConfig('notice-config').groupAdmin - } - - /**群成员减少是否上报 */ - get groupDecrease() { - return this.getDefOrConfig('notice-config').groupDecrease - } - - /**群成员增加是否上报 */ - get groupIncrease() { - return this.getDefOrConfig('notice-config').groupIncrease - } - - /**群禁言是否上报 */ - get groupBan() { - return this.getDefOrConfig('notice-config').groupBan - } - - /**好友添加是否上报 */ - get friendIncrease() { - return this.getDefOrConfig('notice-config').friendIncrease - } - - /**群消息撤回是否上报 */ - get groupRecall() { - return this.getDefOrConfig('notice-config').groupRecall - } - - /**好友消息撤回是否上报 */ - get friendRecall() { - return this.getDefOrConfig('notice-config').friendRecall - } - - /**群内戳一戳是否上报 */ - get groupPoke() { - return this.getDefOrConfig('notice-config').groupPoke - } - - /** 好友申请是否上报 */ - get friendAdd() { - return this.getDefOrConfig('request-config').friendAdd - } - - /** 群聊邀请是否上报 (邀请机器人入群) */ - get groupInvite() { - return this.getDefOrConfig('request-config').groupInvite - } - - /** 群聊申请是否上报 (申请加入群聊) */ - get groupAdd() { - return this.getDefOrConfig('request-config').groupAdd - } - - /** 默认配置和用户配置 */ - getDefOrConfig(name) { - let def = this.getdefSet(name) - let config = this.getConfig(name) - return { ...def, ...config } - } - - /** 默认配置 */ - getdefSet(name) { - return this.getYaml('default_config', name) - } - - /** 用户配置 */ - getConfig(name) { - return this.getYaml('config', name) - } - - /** - * 获取配置yaml - * @param type 默认跑配置-defSet,用户配置-config - * @param name 名称 - */ - getYaml(type, name) { - let file = `${Plugin_Path}/config/${type}/${name}.yaml` - let key = `${type}.${name}` - - if (this.config[key]) return this.config[key] - - this.config[key] = YAML.parse( - fs.readFileSync(file, 'utf8') - ) - - this.watch(file, name, type) - - return this.config[key] - } - - /** 监听配置文件 */ - watch(file, name, type = 'default_config') { - let key = `${type}.${name}` - if (!this.oldConfig[key]) this.oldConfig[key] = _.cloneDeep(this.config[key]) - if (this.watcher[key]) return - - const watcher = chokidar.watch(file) - watcher.on('change', path => { - delete this.config[key] - if (typeof Bot == 'undefined') return - logger.mark(`[ws-plugin][修改配置文件][${type}][${name}]`) - - if (name == 'ws-config') { - const oldConfig = this.oldConfig[key] - delete this.oldConfig[key] - const newConfig = this.getYaml(type, name) - const object = this.findDifference(oldConfig, newConfig) - // console.log(object); - for (const key in object) { - if (Object.hasOwnProperty.call(object, key)) { - const value = object[key]; - const arr = key.split('.') - if (arr[0] !== 'servers') continue - let data = newConfig.servers[arr[1]] - if (typeof data === 'undefined') data = oldConfig.servers[arr[1]] - const target = { - type: null, - data - } - if (typeof value['newValue'] === 'object' && typeof value['oldValue'] === 'undefined') { - target.type = 'add' - } - else if (typeof value['newValue'] === 'undefined' && typeof value['oldValue'] === 'object') { - target.type = 'del' - } - else if (value['newValue'] === true && (value['oldValue'] === false || typeof value['oldValue'] === 'undefined')) { - target.type = 'close' - } - else if (value['newValue'] === false && (value['oldValue'] === true || typeof value['oldValue'] === 'undefined')) { - target.type = 'open' - } - modifyWebSocket(target) - } - } - - } - }) - - this.watcher[key] = watcher - } - - getCfgSchemaMap() { - let ret = {} - _.forEach(cfgSchema, (cfgGroup) => { - _.forEach(cfgGroup.cfg, (cfgItem, cfgKey) => { - ret[cfgItem.key] = cfgItem - cfgItem.cfgKey = cfgKey - }) - }) - return ret - } - - getCfgSchema() { - return cfgSchema - } - - getCfg() { - let wsconfig = this.getDefOrConfig('ws-config') - let msgconfig = this.getDefOrConfig('msg-config') - let noticeconfig = this.getDefOrConfig('notice-config') - let requestconfig = this.getDefOrConfig('request-config') - return { - ...wsconfig, - ...msgconfig, - ...noticeconfig, - ...requestconfig - } - } - - /** - * @description: 修改设置 - * @param {String} name 文件名 - * @param {String} key 修改的key值 - * @param {String|Number} value 修改的value值 - * @param {'config'|'default_config'} type 配置文件或默认 - */ - modify(name, key, value, type = 'config') { - let path = `${Plugin_Path}/config/${type}/${name}.yaml` - new YamlReader(path).set(key, value) - this.oldConfig[key] = _.cloneDeep(this.config[key]) - delete this.config[`${type}.${name}`] - } - - /** - * @description: 修改配置数组 - * @param {String} name 文件名 - * @param {String|Number} key key值 - * @param {String|Number} value value - * @param {'add'|'del'} category 类别 add or del - * @param {'config'|'default_config'} type 配置文件或默认 - */ - modifyarr(name, key, value, category = 'add', type = 'config') { - let path = `${Plugin_Path}/config/${type}/${name}.yaml` - let yaml = new YamlReader(path) - if (category == 'add') { - yaml.addIn(key, value) - } else { - let index = yaml.jsonData[key].indexOf(value) - yaml.delete(`${key}.${index}`) - } - } - - setArr(name, key, item, value, type = 'config') { - let path = `${Plugin_Path}/config/${type}/${name}.yaml` - let yaml = new YamlReader(path) - let arr = yaml.get(key).slice(); - arr[item] = value - yaml.set(key, arr) - } - - delServersArr(value, name = 'ws-config', type = 'config') { - let path = `${Plugin_Path}/config/${type}/${name}.yaml` - let yaml = new YamlReader(path) - let key = 'servers' - // let index = yaml.jsonData[key].indexOf(value) - let index = yaml.jsonData[key].findIndex(item => item.name === value); - yaml.delete(`${key}.${index}`) - } - - /** - * @description 对比两个对象不同的值 - * @param {*} oldObj - * @param {*} newObj - * @param {*} parentKey - * @returns - */ - findDifference(obj1, obj2, parentKey = '') { - const result = {}; - for (const key in obj1) { - const fullKey = parentKey ? `${parentKey}.${key}` : key; - if (_.isObject(obj1[key]) && _.isObject(obj2[key])) { - const diff = this.findDifference(obj1[key], obj2[key], fullKey); - if (!_.isEmpty(diff)) { - Object.assign(result, diff); - } - } else if (!_.isEqual(obj1[key], obj2[key])) { - result[fullKey] = { oldValue: obj1[key], newValue: obj2[key] }; - } - } - for (const key in obj2) { - if (!obj1.hasOwnProperty(key)) { - const fullKey = parentKey ? `${parentKey}.${key}` : key; - result[fullKey] = { oldValue: undefined, newValue: obj2[key] }; - } - } - return result; - } -} -export default new Config() \ No newline at end of file diff --git a/spaces/CikeyQI/meme-api/meme_generator/memes/little_angel/__init__.py b/spaces/CikeyQI/meme-api/meme_generator/memes/little_angel/__init__.py deleted file mode 100644 index bbfe9c60b425be26ec9b1560f20f26fcbc948ede..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/meme-api/meme_generator/memes/little_angel/__init__.py +++ /dev/null @@ -1,55 +0,0 @@ -from typing import List - -from pil_utils import BuildImage - -from meme_generator import MemeArgsModel, add_meme -from meme_generator.exception import TextOverLength -from meme_generator.utils import make_jpg_or_gif - - -def little_angel(images: List[BuildImage], texts: List[str], args: MemeArgsModel): - img_w, img_h = images[0].convert("RGBA").resize_width(500).size - frame = BuildImage.new("RGBA", (600, img_h + 230), "white") - text = "非常可爱!简直就是小天使" - frame.draw_text( - (10, img_h + 120, 590, img_h + 185), text, max_fontsize=48, weight="bold" - ) - - ta = "她" - name = ta - if texts: - name = texts[0] - elif args.user_infos: - info = args.user_infos[0] - ta = "他" if info.gender == "male" else "她" - name = info.name or ta - - text = f"{ta}没失踪也没怎么样 我只是觉得你们都该看一下" - frame.draw_text( - (20, img_h + 180, 580, img_h + 215), text, max_fontsize=26, weight="bold" - ) - - text = f"请问你们看到{name}了吗?" - try: - frame.draw_text( - (20, 0, 580, 110), text, max_fontsize=70, min_fontsize=25, weight="bold" - ) - except ValueError: - raise TextOverLength(name) - - def make(img: BuildImage) -> BuildImage: - img = img.convert("RGBA").resize_width(500) - return frame.copy().paste(img, (int(300 - img_w / 2), 110), alpha=True) - - return make_jpg_or_gif(images[0], make) - - -add_meme( - "little_angel", - little_angel, - min_images=1, - max_images=1, - min_texts=0, - max_texts=1, - keywords=["小天使"], -) diff --git a/spaces/Cpp4App/Cpp4App/CDM/detect_compo/deprecated/ip_detection_utils.py b/spaces/Cpp4App/Cpp4App/CDM/detect_compo/deprecated/ip_detection_utils.py deleted file mode 100644 index 17e2140fcbb4c09ef25a53184dd9048113b0d3de..0000000000000000000000000000000000000000 --- a/spaces/Cpp4App/Cpp4App/CDM/detect_compo/deprecated/ip_detection_utils.py +++ /dev/null @@ -1,461 +0,0 @@ -import numpy as np -import cv2 -from collections import Counter - -import lib_ip.ip_draw as draw -from CDM.config.CONFIG_UIED import Config -C = Config() - - -# detect object(connected region) -# def boundary_bfs_connected_area(img, x, y, mark): -# def neighbor(img, x, y, mark, stack): -# for i in range(x - 1, x + 2): -# if i < 0 or i >= img.shape[0]: continue -# for j in range(y - 1, y + 2): -# if j < 0 or j >= img.shape[1]: continue -# if img[i, j] == 255 and mark[i, j] == 0: -# stack.append([i, j]) -# mark[i, j] = 255 -# -# stack = [[x, y]] # points waiting for inspection -# area = [[x, y]] # points of this area -# mark[x, y] = 255 # drawing broad -# -# while len(stack) > 0: -# point = stack.pop() -# area.append(point) -# neighbor(img, point[0], point[1], mark, stack) -# return area - - -# def line_check_perpendicular(lines_h, lines_v, max_thickness): -# """ -# lines: [line_h, line_v] -# -> line_h: horizontal {'head':(column_min, row), 'end':(column_max, row), 'thickness':int) -# -> line_v: vertical {'head':(column, row_min), 'end':(column, row_max), 'thickness':int} -# """ -# is_per_h = np.full(len(lines_h), False) -# is_per_v = np.full(len(lines_v), False) -# for i in range(len(lines_h)): -# # save the intersection point of h -# lines_h[i]['inter_point'] = set() -# h = lines_h[i] -# -# for j in range(len(lines_v)): -# # save the intersection point of v -# if 'inter_point' not in lines_v[j]: lines_v[j]['inter_point'] = set() -# v = lines_v[j] -# -# # if h is perpendicular to v in head of v -# if abs(h['head'][1]-v['head'][1]) <= max_thickness: -# if abs(h['head'][0] - v['head'][0]) <= max_thickness: -# lines_h[i]['inter_point'].add('head') -# lines_v[j]['inter_point'].add('head') -# is_per_h[i] = True -# is_per_v[j] = True -# elif abs(h['end'][0] - v['head'][0]) <= max_thickness: -# lines_h[i]['inter_point'].add('end') -# lines_v[j]['inter_point'].add('head') -# is_per_h[i] = True -# is_per_v[j] = True -# -# # if h is perpendicular to v in end of v -# elif abs(h['head'][1]-v['end'][1]) <= max_thickness: -# if abs(h['head'][0] - v['head'][0]) <= max_thickness: -# lines_h[i]['inter_point'].add('head') -# lines_v[j]['inter_point'].add('end') -# is_per_h[i] = True -# is_per_v[j] = True -# elif abs(h['end'][0] - v['head'][0]) <= max_thickness: -# lines_h[i]['inter_point'].add('end') -# lines_v[j]['inter_point'].add('end') -# is_per_h[i] = True -# is_per_v[j] = True -# per_h = [] -# per_v = [] -# for i in range(len(is_per_h)): -# if is_per_h[i]: -# lines_h[i]['inter_point'] = list(lines_h[i]['inter_point']) -# per_h.append(lines_h[i]) -# for i in range(len(is_per_v)): -# if is_per_v[i]: -# lines_v[i]['inter_point'] = list(lines_v[i]['inter_point']) -# per_v.append(lines_v[i]) -# return per_h, per_v - - -# def line_shrink_corners(corner, lines_h, lines_v): -# """ -# shrink the corner according to lines: -# col_min_shrink: shrink right (increase) -# col_max_shrink: shrink left (decrease) -# row_min_shrink: shrink down (increase) -# row_max_shrink: shrink up (decrease) -# :param lines_h: horizontal {'head':(column_min, row), 'end':(column_max, row), 'thickness':int) -# :param lines_v: vertical {'head':(column, row_min), 'end':(column, row_max), 'thickness':int} -# :return: shrunken corner: (top_left, bottom_right) -# """ -# (col_min, row_min), (col_max, row_max) = corner -# col_min_shrink, row_min_shrink = col_min, row_min -# col_max_shrink, row_max_shrink = col_max, row_max -# valid_frame = False -# -# for h in lines_h: -# # ignore outer border -# if len(h['inter_point']) == 2: -# valid_frame = True -# continue -# # shrink right -> col_min move to end -# if h['inter_point'][0] == 'head': -# col_min_shrink = max(h['end'][0], col_min_shrink) -# # shrink left -> col_max move to head -# elif h['inter_point'][0] == 'end': -# col_max_shrink = min(h['head'][0], col_max_shrink) -# -# for v in lines_v: -# # ignore outer border -# if len(v['inter_point']) == 2: -# valid_frame = True -# continue -# # shrink down -> row_min move to end -# if v['inter_point'][0] == 'head': -# row_min_shrink = max(v['end'][1], row_min_shrink) -# # shrink up -> row_max move to head -# elif v['inter_point'][0] == 'end': -# row_max_shrink = min(v['head'][1], row_max_shrink) -# -# # return the shrunken corner if only there is line intersecting with two other lines -# if valid_frame: -# return (col_min_shrink, row_min_shrink), (col_max_shrink, row_max_shrink) -# return corner - - -# def line_cvt_relative_position(col_min, row_min, lines_h, lines_v): -# """ -# convert the relative position of lines in the entire image -# :param col_min: based column the img lines belong to -# :param row_min: based row the img lines belong to -# :param lines_h: horizontal {'head':(column_min, row), 'end':(column_max, row), 'thickness':int) -# :param lines_v: vertical {'head':(column, row_min), 'end':(column, row_max), 'thickness':int} -# :return: lines_h_cvt, lines_v_cvt -# """ -# for h in lines_h: -# h['head'][0] += col_min -# h['head'][1] += row_min -# h['end'][0] += col_min -# h['end'][1] += row_min -# for v in lines_v: -# v['head'][0] += col_min -# v['head'][1] += row_min -# v['end'][0] += col_min -# v['end'][1] += row_min -# -# return lines_h, lines_v - - -# check if an object is so slim -# @boundary: [border_up, border_bottom, border_left, border_right] -# -> up, bottom: (column_index, min/max row border) -# -> left, right: (row_index, min/max column border) detect range of each row -def clipping_by_line(boundary, boundary_rec, lines): - boundary = boundary.copy() - for orient in lines: - # horizontal - if orient == 'h': - # column range of sub area - r1, r2 = 0, 0 - for line in lines[orient]: - if line[0] == 0: - r1 = line[1] - continue - r2 = line[0] - b_top = [] - b_bottom = [] - for i in range(len(boundary[0])): - if r2 > boundary[0][i][0] >= r1: - b_top.append(boundary[0][i]) - for i in range(len(boundary[1])): - if r2 > boundary[1][i][0] >= r1: - b_bottom.append(boundary[1][i]) - - b_left = [x for x in boundary[2]] # (row_index, min column border) - for i in range(len(b_left)): - if b_left[i][1] < r1: - b_left[i][1] = r1 - b_right = [x for x in boundary[3]] # (row_index, max column border) - for i in range(len(b_right)): - if b_right[i][1] > r2: - b_right[i][1] = r2 - - boundary_rec.append([b_top, b_bottom, b_left, b_right]) - r1 = line[1] - - -# remove imgs that contain text -# def rm_text(org, corners, compo_class, -# max_text_height=C.THRESHOLD_TEXT_MAX_HEIGHT, max_text_width=C.THRESHOLD_TEXT_MAX_WIDTH, -# ocr_padding=C.OCR_PADDING, ocr_min_word_area=C.OCR_MIN_WORD_AREA, show=False): -# """ -# Remove area that full of text -# :param org: original image -# :param corners: [(top_left, bottom_right)] -# -> top_left: (column_min, row_min) -# -> bottom_right: (column_max, row_max) -# :param compo_class: classes of corners -# :param max_text_height: Too large to be text -# :param max_text_width: Too large to be text -# :param ocr_padding: Padding for clipping -# :param ocr_min_word_area: If too text area ratio is too large -# :param show: Show or not -# :return: corners without text objects -# """ -# new_corners = [] -# new_class = [] -# for i in range(len(corners)): -# corner = corners[i] -# (top_left, bottom_right) = corner -# (col_min, row_min) = top_left -# (col_max, row_max) = bottom_right -# height = row_max - row_min -# width = col_max - col_min -# # highly likely to be block or img if too large -# if height > max_text_height and width > max_text_width: -# new_corners.append(corner) -# new_class.append(compo_class[i]) -# else: -# row_min = row_min - ocr_padding if row_min - ocr_padding >= 0 else 0 -# row_max = row_max + ocr_padding if row_max + ocr_padding < org.shape[0] else org.shape[0] -# col_min = col_min - ocr_padding if col_min - ocr_padding >= 0 else 0 -# col_max = col_max + ocr_padding if col_max + ocr_padding < org.shape[1] else org.shape[1] -# # check if this area is text -# clip = org[row_min: row_max, col_min: col_max] -# if not ocr.is_text(clip, ocr_min_word_area, show=show): -# new_corners.append(corner) -# new_class.append(compo_class[i]) -# return new_corners, new_class - - -# def rm_img_in_compo(corners_img, corners_compo): -# """ -# Remove imgs in component -# """ -# corners_img_new = [] -# for img in corners_img: -# is_nested = False -# for compo in corners_compo: -# if util.corner_relation(img, compo) == -1: -# is_nested = True -# break -# if not is_nested: -# corners_img_new.append(img) -# return corners_img_new - - -# def block_or_compo(org, binary, corners, -# max_thickness=C.THRESHOLD_BLOCK_MAX_BORDER_THICKNESS, max_block_cross_points=C.THRESHOLD_BLOCK_MAX_CROSS_POINT, -# min_compo_w_h_ratio=C.THRESHOLD_UICOMPO_MIN_W_H_RATIO, max_compo_w_h_ratio=C.THRESHOLD_UICOMPO_MAX_W_H_RATIO, -# min_block_edge=C.THRESHOLD_BLOCK_MIN_EDGE_LENGTH): -# """ -# Check if the objects are img components or just block -# :param org: Original image -# :param binary: Binary image from pre-processing -# :param corners: [(top_left, bottom_right)] -# -> top_left: (column_min, row_min) -# -> bottom_right: (column_max, row_max) -# :param max_thickness: The max thickness of border of blocks -# :param max_block_cross_points: Ratio of point of interaction -# :return: corners of blocks and imgs -# """ -# blocks = [] -# imgs = [] -# compos = [] -# for corner in corners: -# (top_left, bottom_right) = corner -# (col_min, row_min) = top_left -# (col_max, row_max) = bottom_right -# height = row_max - row_min -# width = col_max - col_min -# -# block = False -# vacancy = [0, 0, 0, 0] -# for i in range(1, max_thickness): -# try: -# # top to bottom -# if vacancy[0] == 0 and (col_max - col_min - 2 * i) is not 0 and ( -# np.sum(binary[row_min + i, col_min + i: col_max - i]) / 255) / (col_max - col_min - 2 * i) <= max_block_cross_points: -# vacancy[0] = 1 -# # bottom to top -# if vacancy[1] == 0 and (col_max - col_min - 2 * i) is not 0 and ( -# np.sum(binary[row_max - i, col_min + i: col_max - i]) / 255) / (col_max - col_min - 2 * i) <= max_block_cross_points: -# vacancy[1] = 1 -# # left to right -# if vacancy[2] == 0 and (row_max - row_min - 2 * i) is not 0 and ( -# np.sum(binary[row_min + i: row_max - i, col_min + i]) / 255) / (row_max - row_min - 2 * i) <= max_block_cross_points: -# vacancy[2] = 1 -# # right to left -# if vacancy[3] == 0 and (row_max - row_min - 2 * i) is not 0 and ( -# np.sum(binary[row_min + i: row_max - i, col_max - i]) / 255) / (row_max - row_min - 2 * i) <= max_block_cross_points: -# vacancy[3] = 1 -# if np.sum(vacancy) == 4: -# block = True -# except: -# pass -# -# # too big to be UI components -# if block: -# if height > min_block_edge and width > min_block_edge: -# blocks.append(corner) -# else: -# if min_compo_w_h_ratio < width / height < max_compo_w_h_ratio: -# compos.append(corner) -# # filter out small objects -# else: -# if height > min_block_edge: -# imgs.append(corner) -# else: -# if min_compo_w_h_ratio < width / height < max_compo_w_h_ratio: -# compos.append(corner) -# return blocks, imgs, compos - - -# def compo_on_img(processing, org, binary, clf, -# compos_corner, compos_class): -# """ -# Detect potential UI components inner img; -# Only leave non-img -# """ -# pad = 2 -# for i in range(len(compos_corner)): -# if compos_class[i] != 'img': -# continue -# ((col_min, row_min), (col_max, row_max)) = compos_corner[i] -# col_min = max(col_min - pad, 0) -# col_max = min(col_max + pad, org.shape[1]) -# row_min = max(row_min - pad, 0) -# row_max = min(row_max + pad, org.shape[0]) -# area = (col_max - col_min) * (row_max - row_min) -# if area < 600: -# continue -# -# clip_org = org[row_min:row_max, col_min:col_max] -# clip_bin_inv = pre.reverse_binary(binary[row_min:row_max, col_min:col_max]) -# -# compos_boundary_new, compos_corner_new, compos_class_new = processing(clip_org, clip_bin_inv, clf) -# compos_corner_new = util.corner_cvt_relative_position(compos_corner_new, col_min, row_min) -# -# assert len(compos_corner_new) == len(compos_class_new) -# -# # only leave non-img elements -# for i in range(len(compos_corner_new)): -# ((col_min_new, row_min_new), (col_max_new, row_max_new)) = compos_corner_new[i] -# area_new = (col_max_new - col_min_new) * (row_max_new - row_min_new) -# if compos_class_new[i] != 'img' and area_new / area < 0.8: -# compos_corner.append(compos_corner_new[i]) -# compos_class.append(compos_class_new[i]) -# -# return compos_corner, compos_class - - -# def strip_img(corners_compo, compos_class, corners_img): -# """ -# Separate img from other compos -# :return: compos without img -# """ -# corners_compo_withuot_img = [] -# compo_class_withuot_img = [] -# for i in range(len(compos_class)): -# if compos_class[i] == 'img': -# corners_img.append(corners_compo[i]) -# else: -# corners_compo_withuot_img.append(corners_compo[i]) -# compo_class_withuot_img.append(compos_class[i]) -# return corners_compo_withuot_img, compo_class_withuot_img - - -# def merge_corner(corners, compos_class, min_selected_IoU=C.THRESHOLD_MIN_IOU, is_merge_nested_same=True): -# """ -# Calculate the Intersection over Overlap (IoU) and merge corners according to the value of IoU -# :param is_merge_nested_same: if true, merge the nested corners with same class whatever the IoU is -# :param corners: corners: [(top_left, bottom_right)] -# -> top_left: (column_min, row_min) -# -> bottom_right: (column_max, row_max) -# :return: new corners -# """ -# new_corners = [] -# new_class = [] -# for i in range(len(corners)): -# is_intersected = False -# for j in range(len(new_corners)): -# r = util.corner_relation_nms(corners[i], new_corners[j], min_selected_IoU) -# # r = util.corner_relation(corners[i], new_corners[j]) -# if is_merge_nested_same: -# if compos_class[i] == new_class[j]: -# # if corners[i] is in new_corners[j], ignore corners[i] -# if r == -1: -# is_intersected = True -# break -# # if new_corners[j] is in corners[i], replace new_corners[j] with corners[i] -# elif r == 1: -# is_intersected = True -# new_corners[j] = corners[i] -# -# # if above IoU threshold, and corners[i] is in new_corners[j], ignore corners[i] -# if r == -2: -# is_intersected = True -# break -# # if above IoU threshold, and new_corners[j] is in corners[i], replace new_corners[j] with corners[i] -# elif r == 2: -# is_intersected = True -# new_corners[j] = corners[i] -# new_class[j] = compos_class[i] -# -# # containing and too small -# elif r == -3: -# is_intersected = True -# break -# elif r == 3: -# is_intersected = True -# new_corners[j] = corners[i] -# -# # if [i] and [j] are overlapped but no containing relation, merge corners when same class -# elif r == 4: -# is_intersected = True -# if compos_class[i] == new_class[j]: -# new_corners[j] = util.corner_merge_two_corners(corners[i], new_corners[j]) -# -# if not is_intersected: -# new_corners.append(corners[i]) -# new_class.append(compos_class[i]) -# return new_corners, new_class - - -# def select_corner(corners, compos_class, class_name): -# """ -# Select corners in given compo type -# """ -# corners_wanted = [] -# for i in range(len(compos_class)): -# if compos_class[i] == class_name: -# corners_wanted.append(corners[i]) -# return corners_wanted - - -# def flood_fill_bfs(img, x_start, y_start, mark, grad_thresh): -# def neighbor(x, y): -# for i in range(x - 1, x + 2): -# if i < 0 or i >= img.shape[0]: continue -# for j in range(y - 1, y + 2): -# if j < 0 or j >= img.shape[1]: continue -# if mark[i, j] == 0 and abs(img[i, j] - img[x, y]) < grad_thresh: -# stack.append([i, j]) -# mark[i, j] = 255 -# -# stack = [[x_start, y_start]] # points waiting for inspection -# region = [[x_start, y_start]] # points of this connected region -# mark[x_start, y_start] = 255 # drawing broad -# while len(stack) > 0: -# point = stack.pop() -# region.append(point) -# neighbor(point[0], point[1]) -# return region \ No newline at end of file diff --git a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/datasets/laion_dataset.py b/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/datasets/laion_dataset.py deleted file mode 100644 index 1be30abb188e1afad6fe678ccbb367931a2b3d26..0000000000000000000000000000000000000000 --- a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/datasets/laion_dataset.py +++ /dev/null @@ -1,31 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import webdataset as wds -from video_llama.datasets.datasets.base_dataset import BaseDataset - - -class LaionDataset(BaseDataset): - def __init__(self, vis_processor, text_processor, location): - super().__init__(vis_processor=vis_processor, text_processor=text_processor) - - self.inner_dataset = wds.DataPipeline( - wds.ResampledShards(location), - wds.tarfile_to_samples(handler=wds.warn_and_continue), - wds.shuffle(1000, handler=wds.warn_and_continue), - wds.decode("pilrgb", handler=wds.warn_and_continue), - wds.to_tuple("jpg", "json", handler=wds.warn_and_continue), - wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue), - wds.map(self.to_dict, handler=wds.warn_and_continue), - ) - - def to_dict(self, sample): - return { - "image": sample[0], - "text_input": self.text_processor(sample[1]["caption"]), - } - diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/__main__.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/__main__.py deleted file mode 100644 index a05323f93b6850c2f86aedb3b1a5dee16358027f..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/__main__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .features import pilinfo - -pilinfo() diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/reportLabPen.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/reportLabPen.py deleted file mode 100644 index 2cb89c8bf4c772b7a987edb0593c40c83cc2201b..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/reportLabPen.py +++ /dev/null @@ -1,80 +0,0 @@ -from fontTools.pens.basePen import BasePen -from reportlab.graphics.shapes import Path - - -__all__ = ["ReportLabPen"] - - -class ReportLabPen(BasePen): - - """A pen for drawing onto a ``reportlab.graphics.shapes.Path`` object.""" - - def __init__(self, glyphSet, path=None): - BasePen.__init__(self, glyphSet) - if path is None: - path = Path() - self.path = path - - def _moveTo(self, p): - (x, y) = p - self.path.moveTo(x, y) - - def _lineTo(self, p): - (x, y) = p - self.path.lineTo(x, y) - - def _curveToOne(self, p1, p2, p3): - (x1, y1) = p1 - (x2, y2) = p2 - (x3, y3) = p3 - self.path.curveTo(x1, y1, x2, y2, x3, y3) - - def _closePath(self): - self.path.closePath() - - -if __name__ == "__main__": - import sys - - if len(sys.argv) < 3: - print( - "Usage: reportLabPen.py[ ]" - ) - print( - " If no image file name is created, by default .png is created." - ) - print(" example: reportLabPen.py Arial.TTF R test.png") - print( - " (The file format will be PNG, regardless of the image file name supplied)" - ) - sys.exit(0) - - from fontTools.ttLib import TTFont - from reportlab.lib import colors - - path = sys.argv[1] - glyphName = sys.argv[2] - if len(sys.argv) > 3: - imageFile = sys.argv[3] - else: - imageFile = "%s.png" % glyphName - - font = TTFont(path) # it would work just as well with fontTools.t1Lib.T1Font - gs = font.getGlyphSet() - pen = ReportLabPen(gs, Path(fillColor=colors.red, strokeWidth=5)) - g = gs[glyphName] - g.draw(pen) - - w, h = g.width, 1000 - from reportlab.graphics import renderPM - from reportlab.graphics.shapes import Group, Drawing, scale - - # Everything is wrapped in a group to allow transformations. - g = Group(pen.path) - g.translate(0, 200) - g.scale(0.3, 0.3) - - d = Drawing(w, h) - d.add(g) - - renderPM.drawToFile(d, imageFile, fmt="PNG") diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/DefaultTable.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/DefaultTable.py deleted file mode 100644 index 32a4b1f258f54d78ad39eb764867a6c354939743..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/DefaultTable.py +++ /dev/null @@ -1,50 +0,0 @@ -from fontTools.misc.textTools import Tag -from fontTools.ttLib import getClassTag - - -class DefaultTable(object): - - dependencies = [] - - def __init__(self, tag=None): - if tag is None: - tag = getClassTag(self.__class__) - self.tableTag = Tag(tag) - - def decompile(self, data, ttFont): - self.data = data - - def compile(self, ttFont): - return self.data - - def toXML(self, writer, ttFont, **kwargs): - if hasattr(self, "ERROR"): - writer.comment("An error occurred during the decompilation of this table") - writer.newline() - writer.comment(self.ERROR) - writer.newline() - writer.begintag("hexdata") - writer.newline() - writer.dumphex(self.compile(ttFont)) - writer.endtag("hexdata") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - from fontTools.misc.textTools import readHex - from fontTools import ttLib - - if name != "hexdata": - raise ttLib.TTLibError("can't handle '%s' element" % name) - self.decompile(readHex(content), ttFont) - - def __repr__(self): - return "<'%s' table at %x>" % (self.tableTag, id(self)) - - def __eq__(self, other): - if type(self) != type(other): - return NotImplemented - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - result = self.__eq__(other) - return result if result is NotImplemented else not result diff --git a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/dataset/build.py b/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/dataset/build.py deleted file mode 100644 index 6460ad7debbc459b72815b1199d8381c281daf52..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/dataset/build.py +++ /dev/null @@ -1,115 +0,0 @@ -""" -@Date: 2021/07/18 -@description: -""" -import numpy as np -import torch.utils.data -from dataset.mp3d_dataset import MP3DDataset -from dataset.pano_s2d3d_dataset import PanoS2D3DDataset -from dataset.pano_s2d3d_mix_dataset import PanoS2D3DMixDataset -from dataset.zind_dataset import ZindDataset - - -def build_loader(config, logger): - name = config.DATA.DATASET - ddp = config.WORLD_SIZE > 1 - train_dataset = None - train_data_loader = None - if config.MODE == 'train': - train_dataset = build_dataset(mode='train', config=config, logger=logger) - - val_dataset = build_dataset(mode=config.VAL_NAME if config.MODE != 'test' else 'test', config=config, logger=logger) - - train_sampler = None - val_sampler = None - if ddp: - if train_dataset: - train_sampler = torch.utils.data.DistributedSampler(train_dataset, shuffle=True) - val_sampler = torch.utils.data.DistributedSampler(val_dataset, shuffle=False) - - batch_size = config.DATA.BATCH_SIZE - num_workers = 0 if config.DEBUG else config.DATA.NUM_WORKERS - pin_memory = config.DATA.PIN_MEMORY - if train_dataset: - logger.info(f'Train data loader batch size: {batch_size}') - train_data_loader = torch.utils.data.DataLoader( - train_dataset, sampler=train_sampler, - batch_size=batch_size, - shuffle=True, - num_workers=num_workers, - pin_memory=pin_memory, - drop_last=True, - ) - batch_size = batch_size - (len(val_dataset) % np.arange(batch_size, 0, -1)).tolist().index(0) - logger.info(f'Val data loader batch size: {batch_size}') - val_data_loader = torch.utils.data.DataLoader( - val_dataset, sampler=val_sampler, - batch_size=batch_size, - shuffle=False, - num_workers=num_workers, - pin_memory=pin_memory, - drop_last=False - ) - logger.info(f'Build data loader: num_workers:{num_workers} pin_memory:{pin_memory}') - return train_data_loader, val_data_loader - - -def build_dataset(mode, config, logger): - name = config.DATA.DATASET - if name == 'mp3d': - dataset = MP3DDataset( - root_dir=config.DATA.DIR, - mode=mode, - shape=config.DATA.SHAPE, - max_wall_num=config.DATA.WALL_NUM, - aug=config.DATA.AUG if mode == 'train' else None, - camera_height=config.DATA.CAMERA_HEIGHT, - logger=logger, - for_test_index=config.DATA.FOR_TEST_INDEX, - keys=config.DATA.KEYS - ) - elif name == 'pano_s2d3d': - dataset = PanoS2D3DDataset( - root_dir=config.DATA.DIR, - mode=mode, - shape=config.DATA.SHAPE, - max_wall_num=config.DATA.WALL_NUM, - aug=config.DATA.AUG if mode == 'train' else None, - camera_height=config.DATA.CAMERA_HEIGHT, - logger=logger, - for_test_index=config.DATA.FOR_TEST_INDEX, - subset=config.DATA.SUBSET, - keys=config.DATA.KEYS - ) - elif name == 'pano_s2d3d_mix': - dataset = PanoS2D3DMixDataset( - root_dir=config.DATA.DIR, - mode=mode, - shape=config.DATA.SHAPE, - max_wall_num=config.DATA.WALL_NUM, - aug=config.DATA.AUG if mode == 'train' else None, - camera_height=config.DATA.CAMERA_HEIGHT, - logger=logger, - for_test_index=config.DATA.FOR_TEST_INDEX, - subset=config.DATA.SUBSET, - keys=config.DATA.KEYS - ) - elif name == 'zind': - dataset = ZindDataset( - root_dir=config.DATA.DIR, - mode=mode, - shape=config.DATA.SHAPE, - max_wall_num=config.DATA.WALL_NUM, - aug=config.DATA.AUG if mode == 'train' else None, - camera_height=config.DATA.CAMERA_HEIGHT, - logger=logger, - for_test_index=config.DATA.FOR_TEST_INDEX, - is_simple=True, - is_ceiling_flat=False, - keys=config.DATA.KEYS, - vp_align=config.EVAL.POST_PROCESSING is not None and 'manhattan' in config.EVAL.POST_PROCESSING - ) - else: - raise NotImplementedError(f"Unknown dataset: {name}") - - return dataset diff --git a/spaces/Detomo/ai-comic-generation/src/app/interface/page/index.tsx b/spaces/Detomo/ai-comic-generation/src/app/interface/page/index.tsx deleted file mode 100644 index 9a4c4fbf9ee68d2e95234c4b33fee0b0b34fa4c1..0000000000000000000000000000000000000000 --- a/spaces/Detomo/ai-comic-generation/src/app/interface/page/index.tsx +++ /dev/null @@ -1,59 +0,0 @@ -import { allLayouts } from "@/app/layouts" -import { useStore } from "@/app/store" -import { cn } from "@/lib/utils" -import { useEffect, useRef } from "react" - -export function Page({ page }: { page: number }) { - const zoomLevel = useStore(state => state.zoomLevel) - const layouts = useStore(state => state.layouts) - // const prompt = useStore(state => state.prompt) - - const LayoutElement = (allLayouts as any)[layouts[page]] - - /* - const [canLoad, setCanLoad] = useState(false) - useEffect(() => { - if (prompt?.length) { - setCanLoad(false) - setTimeout(() => { - setCanLoad(true) - }, page * 4000) - } - }, [prompt]) - */ - - const setPage = useStore(state => state.setPage) - const pageRef = useRef (null) - - useEffect(() => { - const element = pageRef.current - if (!element) { return } - setPage(element) - }, [pageRef.current]) - - return ( - 100 ? `100`}` - }} - > -- ) -} \ No newline at end of file diff --git a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/submission/run_context.py b/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/submission/run_context.py deleted file mode 100644 index 932320e4735bde1b547ac6062b175601b7959547..0000000000000000000000000000000000000000 --- a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/submission/run_context.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -"""Helpers for managing the run/training loop.""" - -import datetime -import json -import os -import pprint -import time -import types - -from typing import Any - -from . import submit - - -class RunContext(object): - """Helper class for managing the run/training loop. - - The context will hide the implementation details of a basic run/training loop. - It will set things up properly, tell if run should be stopped, and then cleans up. - User should call update periodically and use should_stop to determine if run should be stopped. - - Args: - submit_config: The SubmitConfig that is used for the current run. - config_module: The whole config module that is used for the current run. - max_epoch: Optional cached value for the max_epoch variable used in update. - """ - - def __init__(self, submit_config: submit.SubmitConfig, config_module: types.ModuleType = None, max_epoch: Any = None): - self.submit_config = submit_config - self.should_stop_flag = False - self.has_closed = False - self.start_time = time.time() - self.last_update_time = time.time() - self.last_update_interval = 0.0 - self.max_epoch = max_epoch - - # pretty print the all the relevant content of the config module to a text file - if config_module is not None: - with open(os.path.join(submit_config.run_dir, "config.txt"), "w") as f: - filtered_dict = {k: v for k, v in config_module.__dict__.items() if not k.startswith("_") and not isinstance(v, (types.ModuleType, types.FunctionType, types.LambdaType, submit.SubmitConfig, type))} - pprint.pprint(filtered_dict, stream=f, indent=4, width=200, compact=False) - - # write out details about the run to a text file - self.run_txt_data = {"task_name": submit_config.task_name, "host_name": submit_config.host_name, "start_time": datetime.datetime.now().isoformat(sep=" ")} - with open(os.path.join(submit_config.run_dir, "run.txt"), "w") as f: - pprint.pprint(self.run_txt_data, stream=f, indent=4, width=200, compact=False) - - def __enter__(self) -> "RunContext": - return self - - def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: - self.close() - - def update(self, loss: Any = 0, cur_epoch: Any = 0, max_epoch: Any = None) -> None: - """Do general housekeeping and keep the state of the context up-to-date. - Should be called often enough but not in a tight loop.""" - assert not self.has_closed - - self.last_update_interval = time.time() - self.last_update_time - self.last_update_time = time.time() - - if os.path.exists(os.path.join(self.submit_config.run_dir, "abort.txt")): - self.should_stop_flag = True - - max_epoch_val = self.max_epoch if max_epoch is None else max_epoch - - def should_stop(self) -> bool: - """Tell whether a stopping condition has been triggered one way or another.""" - return self.should_stop_flag - - def get_time_since_start(self) -> float: - """How much time has passed since the creation of the context.""" - return time.time() - self.start_time - - def get_time_since_last_update(self) -> float: - """How much time has passed since the last call to update.""" - return time.time() - self.last_update_time - - def get_last_update_interval(self) -> float: - """How much time passed between the previous two calls to update.""" - return self.last_update_interval - - def close(self) -> None: - """Close the context and clean up. - Should only be called once.""" - if not self.has_closed: - # update the run.txt with stopping time - self.run_txt_data["stop_time"] = datetime.datetime.now().isoformat(sep=" ") - with open(os.path.join(self.submit_config.run_dir, "run.txt"), "w") as f: - pprint.pprint(self.run_txt_data, stream=f, indent=4, width=200, compact=False) - - self.has_closed = True diff --git a/spaces/DragGan/DragGan-Inversion/gui_utils/text_utils.py b/spaces/DragGan/DragGan-Inversion/gui_utils/text_utils.py deleted file mode 100644 index d1d971d9defa9a223d5b4b19def17f351a262833..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/gui_utils/text_utils.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import functools -from typing import Optional - -import dnnlib -import numpy as np -import PIL.Image -import PIL.ImageFont -import scipy.ndimage - -from . import gl_utils - -# ---------------------------------------------------------------------------- - - -def get_default_font(): - # Open Sans regular - url = 'http://fonts.gstatic.com/s/opensans/v17/mem8YaGs126MiZpBA-U1UpcaXcl0Aw.ttf' - return dnnlib.util.open_url(url, return_filename=True) - -# ---------------------------------------------------------------------------- - - -@functools.lru_cache(maxsize=None) -def get_pil_font(font=None, size=32): - if font is None: - font = get_default_font() - return PIL.ImageFont.truetype(font=font, size=size) - -# ---------------------------------------------------------------------------- - - -def get_array(string, *, dropshadow_radius: int = None, **kwargs): - if dropshadow_radius is not None: - offset_x = int(np.ceil(dropshadow_radius*2/3)) - offset_y = int(np.ceil(dropshadow_radius*2/3)) - return _get_array_priv(string, dropshadow_radius=dropshadow_radius, offset_x=offset_x, offset_y=offset_y, **kwargs) - else: - return _get_array_priv(string, **kwargs) - - -@functools.lru_cache(maxsize=10000) -def _get_array_priv( - string: str, *, - size: int = 32, - max_width: Optional[int] = None, - max_height: Optional[int] = None, - min_size=10, - shrink_coef=0.8, - dropshadow_radius: int = None, - offset_x: int = None, - offset_y: int = None, - **kwargs -): - cur_size = size - array = None - while True: - if dropshadow_radius is not None: - # separate implementation for dropshadow text rendering - array = _get_array_impl_dropshadow( - string, size=cur_size, radius=dropshadow_radius, offset_x=offset_x, offset_y=offset_y, **kwargs) - else: - array = _get_array_impl(string, size=cur_size, **kwargs) - height, width, _ = array.shape - if (max_width is None or width <= max_width) and (max_height is None or height <= max_height) or (cur_size <= min_size): - break - cur_size = max(int(cur_size * shrink_coef), min_size) - return array - -# ---------------------------------------------------------------------------- - - -@functools.lru_cache(maxsize=10000) -def _get_array_impl(string, *, font=None, size=32, outline=0, outline_pad=3, outline_coef=3, outline_exp=2, line_pad: int = None): - pil_font = get_pil_font(font=font, size=size) - lines = [pil_font.getmask(line, 'L') for line in string.split('\n')] - lines = [np.array(line, dtype=np.uint8).reshape( - [line.size[1], line.size[0]]) for line in lines] - width = max(line.shape[1] for line in lines) - lines = [np.pad(line, ((0, 0), (0, width - line.shape[1])), - mode='constant') for line in lines] - line_spacing = line_pad if line_pad is not None else size // 2 - lines = [np.pad(line, ((0, line_spacing), (0, 0)), mode='constant') - for line in lines[:-1]] + lines[-1:] - mask = np.concatenate(lines, axis=0) - alpha = mask - if outline > 0: - mask = np.pad(mask, int(np.ceil(outline * outline_pad)), - mode='constant', constant_values=0) - alpha = mask.astype(np.float32) / 255 - alpha = scipy.ndimage.gaussian_filter(alpha, outline) - alpha = 1 - np.maximum(1 - alpha * outline_coef, 0) ** outline_exp - alpha = (alpha * 255 + 0.5).clip(0, 255).astype(np.uint8) - alpha = np.maximum(alpha, mask) - return np.stack([mask, alpha], axis=-1) - -# ---------------------------------------------------------------------------- - - -@functools.lru_cache(maxsize=10000) -def _get_array_impl_dropshadow(string, *, font=None, size=32, radius: int, offset_x: int, offset_y: int, line_pad: int = None, **kwargs): - assert (offset_x > 0) and (offset_y > 0) - pil_font = get_pil_font(font=font, size=size) - lines = [pil_font.getmask(line, 'L') for line in string.split('\n')] - lines = [np.array(line, dtype=np.uint8).reshape( - [line.size[1], line.size[0]]) for line in lines] - width = max(line.shape[1] for line in lines) - lines = [np.pad(line, ((0, 0), (0, width - line.shape[1])), - mode='constant') for line in lines] - line_spacing = line_pad if line_pad is not None else size // 2 - lines = [np.pad(line, ((0, line_spacing), (0, 0)), mode='constant') - for line in lines[:-1]] + lines[-1:] - mask = np.concatenate(lines, axis=0) - alpha = mask - - mask = np.pad(mask, 2*radius + max(abs(offset_x), abs(offset_y)), - mode='constant', constant_values=0) - alpha = mask.astype(np.float32) / 255 - alpha = scipy.ndimage.gaussian_filter(alpha, radius) - alpha = 1 - np.maximum(1 - alpha * 1.5, 0) ** 1.4 - alpha = (alpha * 255 + 0.5).clip(0, 255).astype(np.uint8) - alpha = np.pad(alpha, [(offset_y, 0), (offset_x, 0)], - mode='constant')[:-offset_y, :-offset_x] - alpha = np.maximum(alpha, mask) - return np.stack([mask, alpha], axis=-1) - -# ---------------------------------------------------------------------------- - - -@functools.lru_cache(maxsize=10000) -def get_texture(string, bilinear=True, mipmap=True, **kwargs): - return gl_utils.Texture(image=get_array(string, **kwargs), bilinear=bilinear, mipmap=mipmap) - -# ---------------------------------------------------------------------------- diff --git a/spaces/ECCV2022/bytetrack/yolox/motdt_tracker/kalman_filter.py b/spaces/ECCV2022/bytetrack/yolox/motdt_tracker/kalman_filter.py deleted file mode 100644 index deda8a26292b81bc6512a8f6145afabde6c16d7a..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/yolox/motdt_tracker/kalman_filter.py +++ /dev/null @@ -1,270 +0,0 @@ -# vim: expandtab:ts=4:sw=4 -import numpy as np -import scipy.linalg - - -""" -Table for the 0.95 quantile of the chi-square distribution with N degrees of -freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv -function and used as Mahalanobis gating threshold. -""" -chi2inv95 = { - 1: 3.8415, - 2: 5.9915, - 3: 7.8147, - 4: 9.4877, - 5: 11.070, - 6: 12.592, - 7: 14.067, - 8: 15.507, - 9: 16.919} - - -class KalmanFilter(object): - """ - A simple Kalman filter for tracking bounding boxes in image space. - - The 8-dimensional state space - - x, y, a, h, vx, vy, va, vh - - contains the bounding box center position (x, y), aspect ratio a, height h, - and their respective velocities. - - Object motion follows a constant velocity model. The bounding box location - (x, y, a, h) is taken as direct observation of the state space (linear - observation model). - - """ - - def __init__(self): - ndim, dt = 4, 1. - - # Create Kalman filter model matrices. - self._motion_mat = np.eye(2 * ndim, 2 * ndim) - for i in range(ndim): - self._motion_mat[i, ndim + i] = dt - self._update_mat = np.eye(ndim, 2 * ndim) - - # Motion and observation uncertainty are chosen relative to the current - # state estimate. These weights control the amount of uncertainty in - # the model. This is a bit hacky. - self._std_weight_position = 1. / 20 - self._std_weight_velocity = 1. / 160 - - def initiate(self, measurement): - """Create track from unassociated measurement. - - Parameters - ---------- - measurement : ndarray - Bounding box coordinates (x, y, a, h) with center position (x, y), - aspect ratio a, and height h. - - Returns - ------- - (ndarray, ndarray) - Returns the mean vector (8 dimensional) and covariance matrix (8x8 - dimensional) of the new track. Unobserved velocities are initialized - to 0 mean. - - """ - mean_pos = measurement - mean_vel = np.zeros_like(mean_pos) - mean = np.r_[mean_pos, mean_vel] - - std = [ - 2 * self._std_weight_position * measurement[3], - 2 * self._std_weight_position * measurement[3], - 1e-2, - 2 * self._std_weight_position * measurement[3], - 10 * self._std_weight_velocity * measurement[3], - 10 * self._std_weight_velocity * measurement[3], - 1e-5, - 10 * self._std_weight_velocity * measurement[3]] - covariance = np.diag(np.square(std)) - return mean, covariance - - def predict(self, mean, covariance): - """Run Kalman filter prediction step. - - Parameters - ---------- - mean : ndarray - The 8 dimensional mean vector of the object state at the previous - time step. - covariance : ndarray - The 8x8 dimensional covariance matrix of the object state at the - previous time step. - - Returns - ------- - (ndarray, ndarray) - Returns the mean vector and covariance matrix of the predicted - state. Unobserved velocities are initialized to 0 mean. - - """ - std_pos = [ - self._std_weight_position * mean[3], - self._std_weight_position * mean[3], - 1e-2, - self._std_weight_position * mean[3]] - std_vel = [ - self._std_weight_velocity * mean[3], - self._std_weight_velocity * mean[3], - 1e-5, - self._std_weight_velocity * mean[3]] - motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) - - #mean = np.dot(self._motion_mat, mean) - mean = np.dot(mean, self._motion_mat.T) - covariance = np.linalg.multi_dot(( - self._motion_mat, covariance, self._motion_mat.T)) + motion_cov - - return mean, covariance - - def project(self, mean, covariance): - """Project state distribution to measurement space. - - Parameters - ---------- - mean : ndarray - The state's mean vector (8 dimensional array). - covariance : ndarray - The state's covariance matrix (8x8 dimensional). - - Returns - ------- - (ndarray, ndarray) - Returns the projected mean and covariance matrix of the given state - estimate. - - """ - std = [ - self._std_weight_position * mean[3], - self._std_weight_position * mean[3], - 1e-1, - self._std_weight_position * mean[3]] - innovation_cov = np.diag(np.square(std)) - - mean = np.dot(self._update_mat, mean) - covariance = np.linalg.multi_dot(( - self._update_mat, covariance, self._update_mat.T)) - return mean, covariance + innovation_cov - - def multi_predict(self, mean, covariance): - """Run Kalman filter prediction step (Vectorized version). - Parameters - ---------- - mean : ndarray - The Nx8 dimensional mean matrix of the object states at the previous - time step. - covariance : ndarray - The Nx8x8 dimensional covariance matrics of the object states at the - previous time step. - Returns - ------- - (ndarray, ndarray) - Returns the mean vector and covariance matrix of the predicted - state. Unobserved velocities are initialized to 0 mean. - """ - std_pos = [ - self._std_weight_position * mean[:, 3], - self._std_weight_position * mean[:, 3], - 1e-2 * np.ones_like(mean[:, 3]), - self._std_weight_position * mean[:, 3]] - std_vel = [ - self._std_weight_velocity * mean[:, 3], - self._std_weight_velocity * mean[:, 3], - 1e-5 * np.ones_like(mean[:, 3]), - self._std_weight_velocity * mean[:, 3]] - sqr = np.square(np.r_[std_pos, std_vel]).T - - motion_cov = [] - for i in range(len(mean)): - motion_cov.append(np.diag(sqr[i])) - motion_cov = np.asarray(motion_cov) - - mean = np.dot(mean, self._motion_mat.T) - left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2)) - covariance = np.dot(left, self._motion_mat.T) + motion_cov - - return mean, covariance - - def update(self, mean, covariance, measurement): - """Run Kalman filter correction step. - - Parameters - ---------- - mean : ndarray - The predicted state's mean vector (8 dimensional). - covariance : ndarray - The state's covariance matrix (8x8 dimensional). - measurement : ndarray - The 4 dimensional measurement vector (x, y, a, h), where (x, y) - is the center position, a the aspect ratio, and h the height of the - bounding box. - - Returns - ------- - (ndarray, ndarray) - Returns the measurement-corrected state distribution. - - """ - projected_mean, projected_cov = self.project(mean, covariance) - - chol_factor, lower = scipy.linalg.cho_factor( - projected_cov, lower=True, check_finite=False) - kalman_gain = scipy.linalg.cho_solve( - (chol_factor, lower), np.dot(covariance, self._update_mat.T).T, - check_finite=False).T - innovation = measurement - projected_mean - - new_mean = mean + np.dot(innovation, kalman_gain.T) - new_covariance = covariance - np.linalg.multi_dot(( - kalman_gain, projected_cov, kalman_gain.T)) - return new_mean, new_covariance - - def gating_distance(self, mean, covariance, measurements, - only_position=False, metric='maha'): - """Compute gating distance between state distribution and measurements. - A suitable distance threshold can be obtained from `chi2inv95`. If - `only_position` is False, the chi-square distribution has 4 degrees of - freedom, otherwise 2. - Parameters - ---------- - mean : ndarray - Mean vector over the state distribution (8 dimensional). - covariance : ndarray - Covariance of the state distribution (8x8 dimensional). - measurements : ndarray - An Nx4 dimensional matrix of N measurements, each in - format (x, y, a, h) where (x, y) is the bounding box center - position, a the aspect ratio, and h the height. - only_position : Optional[bool] - If True, distance computation is done with respect to the bounding - box center position only. - Returns - ------- - ndarray - Returns an array of length N, where the i-th element contains the - squared Mahalanobis distance between (mean, covariance) and - `measurements[i]`. - """ - mean, covariance = self.project(mean, covariance) - if only_position: - mean, covariance = mean[:2], covariance[:2, :2] - measurements = measurements[:, :2] - - d = measurements - mean - if metric == 'gaussian': - return np.sum(d * d, axis=1) - elif metric == 'maha': - cholesky_factor = np.linalg.cholesky(covariance) - z = scipy.linalg.solve_triangular( - cholesky_factor, d.T, lower=True, check_finite=False, - overwrite_b=True) - squared_maha = np.sum(z * z, axis=0) - return squared_maha - else: - raise ValueError('invalid distance metric') \ No newline at end of file diff --git a/spaces/EleutherAI/magma/magma/image_encoders.py b/spaces/EleutherAI/magma/magma/image_encoders.py deleted file mode 100644 index 69e5ca11cef483032e40ae5c5b5ddbb86711927d..0000000000000000000000000000000000000000 --- a/spaces/EleutherAI/magma/magma/image_encoders.py +++ /dev/null @@ -1,91 +0,0 @@ -import torch -import torch.nn as nn -from typing import Callable, Union -from torchtyping import patch_typeguard -from einops import rearrange -import timm -import clip -from functools import partial - -# ----------------------------- Utils -------------------------------------- - -clip.model.LayerNorm = ( - nn.LayerNorm -) # we need to patch this for clip to work with deepspeed -patch_typeguard() # needed for torchtyping typechecks to work - - -class Lambda(torch.nn.Module): - def __init__(self, fn: Callable): - super().__init__() - assert hasattr(fn, "__call__") - self.fn = fn - - def forward(self, x): - return self.fn(x) - - -# ------------------------- Image encoders ---------------------------------- - - -def nfresnet50( - device: Union[torch.device, str] = None, pretrained: bool = True -) -> nn.Module: - """ - Loads nfresnet50 model, removing the pooling layer and replacing it with - an adaptive pooling layer. - """ - encoder = torch.nn.Sequential( - *list(timm.create_model("nf_resnet50", pretrained=pretrained).children())[:-1] - ) - pooling = torch.nn.AdaptiveAvgPool2d((1, 1)) - encoder = torch.nn.Sequential(encoder, pooling) - if device is not None: - encoder = encoder.to(device) - return encoder - - -def clip_encoder( - device: Union[torch.device, str] = None, name: str = "clip", -) -> nn.Module: - """ - Loads clip's image encoder module, discarding the lm component. - - If the variant is a resnet model, we also remove the attention pooling. - """ - if name in ["clip", "ViT-B/32"]: - name = "ViT-B/32" - elif name in ["clip_resnet", "RN50x4"]: - name = "RN50x4" - elif name in ["clip_resnet_large", "RN50x16"]: - name = "RN50x16" - else: - raise ValueError(f"encoder {name} not recognized") - - encoder = clip.load(name, device=device)[0].visual - - if device is not None: - encoder = encoder.to(device) - - if "RN" in name: - # remove attention pooling - encoder.attnpool = Lambda( - partial(rearrange, pattern="b d h w -> b (h w) d") - ) # remove attn pooling, just use reshaped features - - return encoder - - -def get_image_encoder( - name: str, device: Union[torch.device, str] = None, pretrained: bool = False -) -> torch.nn.Module: - """ - Loads image encoder module - """ - if name == "nfresnet50": - encoder = nfresnet50(device=device, pretrained=pretrained) - elif "clip" in name: - encoder = clip_encoder(device=device, name=name) - else: - raise ValueError(f"image encoder {name} not recognized") - return encoder diff --git a/spaces/EuroPython2022/Model-Recommendation/README.md b/spaces/EuroPython2022/Model-Recommendation/README.md deleted file mode 100644 index ae7f2eabe12f5148411284e54eede4b2312b3c40..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/Model-Recommendation/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Model Recommendation -emoji: 🏃 -colorFrom: yellow -colorTo: indigo -sdk: gradio -sdk_version: 3.0.26 -app_file: App.py -pinned: false -license: afl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/FaceOnLive/Face-Liveness-Detection-SDK/facewrapper/facewrapper.py b/spaces/FaceOnLive/Face-Liveness-Detection-SDK/facewrapper/facewrapper.py deleted file mode 100644 index 4b30d971e234ad1f49f829f83872d37f6ccd7535..0000000000000000000000000000000000000000 --- a/spaces/FaceOnLive/Face-Liveness-Detection-SDK/facewrapper/facewrapper.py +++ /dev/null @@ -1,31 +0,0 @@ -import ctypes, ctypes.util -from ctypes import * -from numpy.ctypeslib import ndpointer -import sys -import os -sys.path.append('/opt/intel/openvino_2022/runtime/lib/intel64') - -lib_path = os.path.abspath(os.path.dirname(__file__)) + '/libs/libttvfaceengine7.so' -liveness_engine = cdll.LoadLibrary(lib_path) - -ttv_version = liveness_engine.ttv_version -ttv_version.argtypes = [] -ttv_version.restype = ctypes.c_char_p - -ttv_get_hwid = liveness_engine.ttv_get_hwid -ttv_get_hwid.argtypes = [] -ttv_get_hwid.restype = ctypes.c_char_p - -ttv_init = liveness_engine.ttv_init -ttv_init.argtypes = [ctypes.c_char_p, ctypes.c_char_p] -ttv_init.restype = ctypes.c_int32 - -ttv_init_offline = liveness_engine.ttv_init_offline -ttv_init_offline.argtypes = [ctypes.c_char_p, ctypes.c_char_p] -ttv_init_offline.restype = ctypes.c_int32 - - -ttv_detect_face = liveness_engine.ttv_detect_face -ttv_detect_face.argtypes = [ndpointer(ctypes.c_ubyte, flags='C_CONTIGUOUS'), ctypes.c_int32, ctypes.c_int32, ndpointer(ctypes.c_int32, flags='C_CONTIGUOUS'), ndpointer(ctypes.c_double, flags='C_CONTIGUOUS'), ndpointer(ctypes.c_double, flags='C_CONTIGUOUS')] -ttv_detect_face.restype = ctypes.c_int32 - diff --git a/spaces/Fengbinbin/gpt-academic/docs/waifu_plugin/jquery-ui.min.js b/spaces/Fengbinbin/gpt-academic/docs/waifu_plugin/jquery-ui.min.js deleted file mode 100644 index 25398a167415050ae8bfb0bfebac6aa3ab790909..0000000000000000000000000000000000000000 --- a/spaces/Fengbinbin/gpt-academic/docs/waifu_plugin/jquery-ui.min.js +++ /dev/null @@ -1,13 +0,0 @@ -/*! jQuery UI - v1.12.1 - 2016-09-14 -* http://jqueryui.com -* Includes: widget.js, position.js, data.js, disable-selection.js, effect.js, effects/effect-blind.js, effects/effect-bounce.js, effects/effect-clip.js, effects/effect-drop.js, effects/effect-explode.js, effects/effect-fade.js, effects/effect-fold.js, effects/effect-highlight.js, effects/effect-puff.js, effects/effect-pulsate.js, effects/effect-scale.js, effects/effect-shake.js, effects/effect-size.js, effects/effect-slide.js, effects/effect-transfer.js, focusable.js, form-reset-mixin.js, jquery-1-7.js, keycode.js, labels.js, scroll-parent.js, tabbable.js, unique-id.js, widgets/accordion.js, widgets/autocomplete.js, widgets/button.js, widgets/checkboxradio.js, widgets/controlgroup.js, widgets/datepicker.js, widgets/dialog.js, widgets/draggable.js, widgets/droppable.js, widgets/menu.js, widgets/mouse.js, widgets/progressbar.js, widgets/resizable.js, widgets/selectable.js, widgets/selectmenu.js, widgets/slider.js, widgets/sortable.js, widgets/spinner.js, widgets/tabs.js, widgets/tooltip.js -* Copyright jQuery Foundation and other contributors; Licensed MIT */ - -(function(t){"function"==typeof define&&define.amd?define(["jquery"],t):t(jQuery)})(function(t){function e(t){for(var e=t.css("visibility");"inherit"===e;)t=t.parent(),e=t.css("visibility");return"hidden"!==e}function i(t){for(var e,i;t.length&&t[0]!==document;){if(e=t.css("position"),("absolute"===e||"relative"===e||"fixed"===e)&&(i=parseInt(t.css("zIndex"),10),!isNaN(i)&&0!==i))return i;t=t.parent()}return 0}function s(){this._curInst=null,this._keyEvent=!1,this._disabledInputs=[],this._datepickerShowing=!1,this._inDialog=!1,this._mainDivId="ui-datepicker-div",this._inlineClass="ui-datepicker-inline",this._appendClass="ui-datepicker-append",this._triggerClass="ui-datepicker-trigger",this._dialogClass="ui-datepicker-dialog",this._disableClass="ui-datepicker-disabled",this._unselectableClass="ui-datepicker-unselectable",this._currentClass="ui-datepicker-current-day",this._dayOverClass="ui-datepicker-days-cell-over",this.regional=[],this.regional[""]={closeText:"Done",prevText:"Prev",nextText:"Next",currentText:"Today",monthNames:["January","February","March","April","May","June","July","August","September","October","November","December"],monthNamesShort:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],dayNames:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],dayNamesShort:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],dayNamesMin:["Su","Mo","Tu","We","Th","Fr","Sa"],weekHeader:"Wk",dateFormat:"mm/dd/yy",firstDay:0,isRTL:!1,showMonthAfterYear:!1,yearSuffix:""},this._defaults={showOn:"focus",showAnim:"fadeIn",showOptions:{},defaultDate:null,appendText:"",buttonText:"...",buttonImage:"",buttonImageOnly:!1,hideIfNoPrevNext:!1,navigationAsDateFormat:!1,gotoCurrent:!1,changeMonth:!1,changeYear:!1,yearRange:"c-10:c+10",showOtherMonths:!1,selectOtherMonths:!1,showWeek:!1,calculateWeek:this.iso8601Week,shortYearCutoff:"+10",minDate:null,maxDate:null,duration:"fast",beforeShowDay:null,beforeShow:null,onSelect:null,onChangeMonthYear:null,onClose:null,numberOfMonths:1,showCurrentAtPos:0,stepMonths:1,stepBigMonths:12,altField:"",altFormat:"",constrainInput:!0,showButtonPanel:!1,autoSize:!1,disabled:!1},t.extend(this._defaults,this.regional[""]),this.regional.en=t.extend(!0,{},this.regional[""]),this.regional["en-US"]=t.extend(!0,{},this.regional.en),this.dpDiv=n(t(""))}function n(e){var i="button, .ui-datepicker-prev, .ui-datepicker-next, .ui-datepicker-calendar td a";return e.on("mouseout",i,function(){t(this).removeClass("ui-state-hover"),-1!==this.className.indexOf("ui-datepicker-prev")&&t(this).removeClass("ui-datepicker-prev-hover"),-1!==this.className.indexOf("ui-datepicker-next")&&t(this).removeClass("ui-datepicker-next-hover")}).on("mouseover",i,o)}function o(){t.datepicker._isDisabledDatepicker(m.inline?m.dpDiv.parent()[0]:m.input[0])||(t(this).parents(".ui-datepicker-calendar").find("a").removeClass("ui-state-hover"),t(this).addClass("ui-state-hover"),-1!==this.className.indexOf("ui-datepicker-prev")&&t(this).addClass("ui-datepicker-prev-hover"),-1!==this.className.indexOf("ui-datepicker-next")&&t(this).addClass("ui-datepicker-next-hover"))}function a(e,i){t.extend(e,i);for(var s in i)null==i[s]&&(e[s]=i[s]);return e}function r(t){return function(){var e=this.element.val();t.apply(this,arguments),this._refresh(),e!==this.element.val()&&this._trigger("change")}}t.ui=t.ui||{},t.ui.version="1.12.1";var h=0,l=Array.prototype.slice;t.cleanData=function(e){return function(i){var s,n,o;for(o=0;null!=(n=i[o]);o++)try{s=t._data(n,"events"),s&&s.remove&&t(n).triggerHandler("remove")}catch(a){}e(i)}}(t.cleanData),t.widget=function(e,i,s){var n,o,a,r={},h=e.split(".")[0];e=e.split(".")[1];var l=h+"-"+e;return s||(s=i,i=t.Widget),t.isArray(s)&&(s=t.extend.apply(null,[{}].concat(s))),t.expr[":"][l.toLowerCase()]=function(e){return!!t.data(e,l)},t[h]=t[h]||{},n=t[h][e],o=t[h][e]=function(t,e){return this._createWidget?(arguments.length&&this._createWidget(t,e),void 0):new o(t,e)},t.extend(o,n,{version:s.version,_proto:t.extend({},s),_childConstructors:[]}),a=new i,a.options=t.widget.extend({},a.options),t.each(s,function(e,s){return t.isFunction(s)?(r[e]=function(){function t(){return i.prototype[e].apply(this,arguments)}function n(t){return i.prototype[e].apply(this,t)}return function(){var e,i=this._super,o=this._superApply;return this._super=t,this._superApply=n,e=s.apply(this,arguments),this._super=i,this._superApply=o,e}}(),void 0):(r[e]=s,void 0)}),o.prototype=t.widget.extend(a,{widgetEventPrefix:n?a.widgetEventPrefix||e:e},r,{constructor:o,namespace:h,widgetName:e,widgetFullName:l}),n?(t.each(n._childConstructors,function(e,i){var s=i.prototype;t.widget(s.namespace+"."+s.widgetName,o,i._proto)}),delete n._childConstructors):i._childConstructors.push(o),t.widget.bridge(e,o),o},t.widget.extend=function(e){for(var i,s,n=l.call(arguments,1),o=0,a=n.length;a>o;o++)for(i in n[o])s=n[o][i],n[o].hasOwnProperty(i)&&void 0!==s&&(e[i]=t.isPlainObject(s)?t.isPlainObject(e[i])?t.widget.extend({},e[i],s):t.widget.extend({},s):s);return e},t.widget.bridge=function(e,i){var s=i.prototype.widgetFullName||e;t.fn[e]=function(n){var o="string"==typeof n,a=l.call(arguments,1),r=this;return o?this.length||"instance"!==n?this.each(function(){var i,o=t.data(this,s);return"instance"===n?(r=o,!1):o?t.isFunction(o[n])&&"_"!==n.charAt(0)?(i=o[n].apply(o,a),i!==o&&void 0!==i?(r=i&&i.jquery?r.pushStack(i.get()):i,!1):void 0):t.error("no such method '"+n+"' for "+e+" widget instance"):t.error("cannot call methods on "+e+" prior to initialization; "+"attempted to call method '"+n+"'")}):r=void 0:(a.length&&(n=t.widget.extend.apply(null,[n].concat(a))),this.each(function(){var e=t.data(this,s);e?(e.option(n||{}),e._init&&e._init()):t.data(this,s,new i(n,this))})),r}},t.Widget=function(){},t.Widget._childConstructors=[],t.Widget.prototype={widgetName:"widget",widgetEventPrefix:"",defaultElement:"- ",options:{classes:{},disabled:!1,create:null},_createWidget:function(e,i){i=t(i||this.defaultElement||this)[0],this.element=t(i),this.uuid=h++,this.eventNamespace="."+this.widgetName+this.uuid,this.bindings=t(),this.hoverable=t(),this.focusable=t(),this.classesElementLookup={},i!==this&&(t.data(i,this.widgetFullName,this),this._on(!0,this.element,{remove:function(t){t.target===i&&this.destroy()}}),this.document=t(i.style?i.ownerDocument:i.document||i),this.window=t(this.document[0].defaultView||this.document[0].parentWindow)),this.options=t.widget.extend({},this.options,this._getCreateOptions(),e),this._create(),this.options.disabled&&this._setOptionDisabled(this.options.disabled),this._trigger("create",null,this._getCreateEventData()),this._init()},_getCreateOptions:function(){return{}},_getCreateEventData:t.noop,_create:t.noop,_init:t.noop,destroy:function(){var e=this;this._destroy(),t.each(this.classesElementLookup,function(t,i){e._removeClass(i,t)}),this.element.off(this.eventNamespace).removeData(this.widgetFullName),this.widget().off(this.eventNamespace).removeAttr("aria-disabled"),this.bindings.off(this.eventNamespace)},_destroy:t.noop,widget:function(){return this.element},option:function(e,i){var s,n,o,a=e;if(0===arguments.length)return t.widget.extend({},this.options);if("string"==typeof e)if(a={},s=e.split("."),e=s.shift(),s.length){for(n=a[e]=t.widget.extend({},this.options[e]),o=0;s.length-1>o;o++)n[s[o]]=n[s[o]]||{},n=n[s[o]];if(e=s.pop(),1===arguments.length)return void 0===n[e]?null:n[e];n[e]=i}else{if(1===arguments.length)return void 0===this.options[e]?null:this.options[e];a[e]=i}return this._setOptions(a),this},_setOptions:function(t){var e;for(e in t)this._setOption(e,t[e]);return this},_setOption:function(t,e){return"classes"===t&&this._setOptionClasses(e),this.options[t]=e,"disabled"===t&&this._setOptionDisabled(e),this},_setOptionClasses:function(e){var i,s,n;for(i in e)n=this.classesElementLookup[i],e[i]!==this.options.classes[i]&&n&&n.length&&(s=t(n.get()),this._removeClass(n,i),s.addClass(this._classes({element:s,keys:i,classes:e,add:!0})))},_setOptionDisabled:function(t){this._toggleClass(this.widget(),this.widgetFullName+"-disabled",null,!!t),t&&(this._removeClass(this.hoverable,null,"ui-state-hover"),this._removeClass(this.focusable,null,"ui-state-focus"))},enable:function(){return this._setOptions({disabled:!1})},disable:function(){return this._setOptions({disabled:!0})},_classes:function(e){function i(i,o){var a,r;for(r=0;i.length>r;r++)a=n.classesElementLookup[i[r]]||t(),a=e.add?t(t.unique(a.get().concat(e.element.get()))):t(a.not(e.element).get()),n.classesElementLookup[i[r]]=a,s.push(i[r]),o&&e.classes[i[r]]&&s.push(e.classes[i[r]])}var s=[],n=this;return e=t.extend({element:this.element,classes:this.options.classes||{}},e),this._on(e.element,{remove:"_untrackClassesElement"}),e.keys&&i(e.keys.match(/\S+/g)||[],!0),e.extra&&i(e.extra.match(/\S+/g)||[]),s.join(" ")},_untrackClassesElement:function(e){var i=this;t.each(i.classesElementLookup,function(s,n){-1!==t.inArray(e.target,n)&&(i.classesElementLookup[s]=t(n.not(e.target).get()))})},_removeClass:function(t,e,i){return this._toggleClass(t,e,i,!1)},_addClass:function(t,e,i){return this._toggleClass(t,e,i,!0)},_toggleClass:function(t,e,i,s){s="boolean"==typeof s?s:i;var n="string"==typeof t||null===t,o={extra:n?e:i,keys:n?t:e,element:n?this.element:t,add:s};return o.element.toggleClass(this._classes(o),s),this},_on:function(e,i,s){var n,o=this;"boolean"!=typeof e&&(s=i,i=e,e=!1),s?(i=n=t(i),this.bindings=this.bindings.add(i)):(s=i,i=this.element,n=this.widget()),t.each(s,function(s,a){function r(){return e||o.options.disabled!==!0&&!t(this).hasClass("ui-state-disabled")?("string"==typeof a?o[a]:a).apply(o,arguments):void 0}"string"!=typeof a&&(r.guid=a.guid=a.guid||r.guid||t.guid++);var h=s.match(/^([\w:-]*)\s*(.*)$/),l=h[1]+o.eventNamespace,c=h[2];c?n.on(l,c,r):i.on(l,r)})},_off:function(e,i){i=(i||"").split(" ").join(this.eventNamespace+" ")+this.eventNamespace,e.off(i).off(i),this.bindings=t(this.bindings.not(e).get()),this.focusable=t(this.focusable.not(e).get()),this.hoverable=t(this.hoverable.not(e).get())},_delay:function(t,e){function i(){return("string"==typeof t?s[t]:t).apply(s,arguments)}var s=this;return setTimeout(i,e||0)},_hoverable:function(e){this.hoverable=this.hoverable.add(e),this._on(e,{mouseenter:function(e){this._addClass(t(e.currentTarget),null,"ui-state-hover")},mouseleave:function(e){this._removeClass(t(e.currentTarget),null,"ui-state-hover")}})},_focusable:function(e){this.focusable=this.focusable.add(e),this._on(e,{focusin:function(e){this._addClass(t(e.currentTarget),null,"ui-state-focus")},focusout:function(e){this._removeClass(t(e.currentTarget),null,"ui-state-focus")}})},_trigger:function(e,i,s){var n,o,a=this.options[e];if(s=s||{},i=t.Event(i),i.type=(e===this.widgetEventPrefix?e:this.widgetEventPrefix+e).toLowerCase(),i.target=this.element[0],o=i.originalEvent)for(n in o)n in i||(i[n]=o[n]);return this.element.trigger(i,s),!(t.isFunction(a)&&a.apply(this.element[0],[i].concat(s))===!1||i.isDefaultPrevented())}},t.each({show:"fadeIn",hide:"fadeOut"},function(e,i){t.Widget.prototype["_"+e]=function(s,n,o){"string"==typeof n&&(n={effect:n});var a,r=n?n===!0||"number"==typeof n?i:n.effect||i:e;n=n||{},"number"==typeof n&&(n={duration:n}),a=!t.isEmptyObject(n),n.complete=o,n.delay&&s.delay(n.delay),a&&t.effects&&t.effects.effect[r]?s[e](n):r!==e&&s[r]?s[r](n.duration,n.easing,o):s.queue(function(i){t(this)[e](),o&&o.call(s[0]),i()})}}),t.widget,function(){function e(t,e,i){return[parseFloat(t[0])*(u.test(t[0])?e/100:1),parseFloat(t[1])*(u.test(t[1])?i/100:1)]}function i(e,i){return parseInt(t.css(e,i),10)||0}function s(e){var i=e[0];return 9===i.nodeType?{width:e.width(),height:e.height(),offset:{top:0,left:0}}:t.isWindow(i)?{width:e.width(),height:e.height(),offset:{top:e.scrollTop(),left:e.scrollLeft()}}:i.preventDefault?{width:0,height:0,offset:{top:i.pageY,left:i.pageX}}:{width:e.outerWidth(),height:e.outerHeight(),offset:e.offset()}}var n,o=Math.max,a=Math.abs,r=/left|center|right/,h=/top|center|bottom/,l=/[\+\-]\d+(\.[\d]+)?%?/,c=/^\w+/,u=/%$/,d=t.fn.position;t.position={scrollbarWidth:function(){if(void 0!==n)return n;var e,i,s=t(""),o=s.children()[0];return t("body").append(s),e=o.offsetWidth,s.css("overflow","scroll"),i=o.offsetWidth,e===i&&(i=s[0].clientWidth),s.remove(),n=e-i},getScrollInfo:function(e){var i=e.isWindow||e.isDocument?"":e.element.css("overflow-x"),s=e.isWindow||e.isDocument?"":e.element.css("overflow-y"),n="scroll"===i||"auto"===i&&e.widthi?"left":e>0?"right":"center",vertical:0>r?"top":s>0?"bottom":"middle"};l>p&&p>a(e+i)&&(u.horizontal="center"),c>f&&f>a(s+r)&&(u.vertical="middle"),u.important=o(a(e),a(i))>o(a(s),a(r))?"horizontal":"vertical",n.using.call(this,t,u)}),h.offset(t.extend(D,{using:r}))})},t.ui.position={fit:{left:function(t,e){var i,s=e.within,n=s.isWindow?s.scrollLeft:s.offset.left,a=s.width,r=t.left-e.collisionPosition.marginLeft,h=n-r,l=r+e.collisionWidth-a-n;e.collisionWidth>a?h>0&&0>=l?(i=t.left+h+e.collisionWidth-a-n,t.left+=h-i):t.left=l>0&&0>=h?n:h>l?n+a-e.collisionWidth:n:h>0?t.left+=h:l>0?t.left-=l:t.left=o(t.left-r,t.left)},top:function(t,e){var i,s=e.within,n=s.isWindow?s.scrollTop:s.offset.top,a=e.within.height,r=t.top-e.collisionPosition.marginTop,h=n-r,l=r+e.collisionHeight-a-n;e.collisionHeight>a?h>0&&0>=l?(i=t.top+h+e.collisionHeight-a-n,t.top+=h-i):t.top=l>0&&0>=h?n:h>l?n+a-e.collisionHeight:n:h>0?t.top+=h:l>0?t.top-=l:t.top=o(t.top-r,t.top)}},flip:{left:function(t,e){var i,s,n=e.within,o=n.offset.left+n.scrollLeft,r=n.width,h=n.isWindow?n.scrollLeft:n.offset.left,l=t.left-e.collisionPosition.marginLeft,c=l-h,u=l+e.collisionWidth-r-h,d="left"===e.my[0]?-e.elemWidth:"right"===e.my[0]?e.elemWidth:0,p="left"===e.at[0]?e.targetWidth:"right"===e.at[0]?-e.targetWidth:0,f=-2*e.offset[0];0>c?(i=t.left+d+p+f+e.collisionWidth-r-o,(0>i||a(c)>i)&&(t.left+=d+p+f)):u>0&&(s=t.left-e.collisionPosition.marginLeft+d+p+f-h,(s>0||u>a(s))&&(t.left+=d+p+f))},top:function(t,e){var i,s,n=e.within,o=n.offset.top+n.scrollTop,r=n.height,h=n.isWindow?n.scrollTop:n.offset.top,l=t.top-e.collisionPosition.marginTop,c=l-h,u=l+e.collisionHeight-r-h,d="top"===e.my[1],p=d?-e.elemHeight:"bottom"===e.my[1]?e.elemHeight:0,f="top"===e.at[1]?e.targetHeight:"bottom"===e.at[1]?-e.targetHeight:0,g=-2*e.offset[1];0>c?(s=t.top+p+f+g+e.collisionHeight-r-o,(0>s||a(c)>s)&&(t.top+=p+f+g)):u>0&&(i=t.top-e.collisionPosition.marginTop+p+f+g-h,(i>0||u>a(i))&&(t.top+=p+f+g))}},flipfit:{left:function(){t.ui.position.flip.left.apply(this,arguments),t.ui.position.fit.left.apply(this,arguments)},top:function(){t.ui.position.flip.top.apply(this,arguments),t.ui.position.fit.top.apply(this,arguments)}}}}(),t.ui.position,t.extend(t.expr[":"],{data:t.expr.createPseudo?t.expr.createPseudo(function(e){return function(i){return!!t.data(i,e)}}):function(e,i,s){return!!t.data(e,s[3])}}),t.fn.extend({disableSelection:function(){var t="onselectstart"in document.createElement("div")?"selectstart":"mousedown";return function(){return this.on(t+".ui-disableSelection",function(t){t.preventDefault()})}}(),enableSelection:function(){return this.off(".ui-disableSelection")}});var c="ui-effects-",u="ui-effects-style",d="ui-effects-animated",p=t;t.effects={effect:{}},function(t,e){function i(t,e,i){var s=u[e.type]||{};return null==t?i||!e.def?null:e.def:(t=s.floor?~~t:parseFloat(t),isNaN(t)?e.def:s.mod?(t+s.mod)%s.mod:0>t?0:t>s.max?s.max:t)}function s(i){var s=l(),n=s._rgba=[];return i=i.toLowerCase(),f(h,function(t,o){var a,r=o.re.exec(i),h=r&&o.parse(r),l=o.space||"rgba";return h?(a=s[l](h),s[c[l].cache]=a[c[l].cache],n=s._rgba=a._rgba,!1):e}),n.length?("0,0,0,0"===n.join()&&t.extend(n,o.transparent),s):o[i]}function n(t,e,i){return i=(i+1)%1,1>6*i?t+6*(e-t)*i:1>2*i?e:2>3*i?t+6*(e-t)*(2/3-i):t}var o,a="backgroundColor borderBottomColor borderLeftColor borderRightColor borderTopColor color columnRuleColor outlineColor textDecorationColor textEmphasisColor",r=/^([\-+])=\s*(\d+\.?\d*)/,h=[{re:/rgba?\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,parse:function(t){return[t[1],t[2],t[3],t[4]]}},{re:/rgba?\(\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,parse:function(t){return[2.55*t[1],2.55*t[2],2.55*t[3],t[4]]}},{re:/#([a-f0-9]{2})([a-f0-9]{2})([a-f0-9]{2})/,parse:function(t){return[parseInt(t[1],16),parseInt(t[2],16),parseInt(t[3],16)]}},{re:/#([a-f0-9])([a-f0-9])([a-f0-9])/,parse:function(t){return[parseInt(t[1]+t[1],16),parseInt(t[2]+t[2],16),parseInt(t[3]+t[3],16)]}},{re:/hsla?\(\s*(\d+(?:\.\d+)?)\s*,\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,space:"hsla",parse:function(t){return[t[1],t[2]/100,t[3]/100,t[4]]}}],l=t.Color=function(e,i,s,n){return new t.Color.fn.parse(e,i,s,n)},c={rgba:{props:{red:{idx:0,type:"byte"},green:{idx:1,type:"byte"},blue:{idx:2,type:"byte"}}},hsla:{props:{hue:{idx:0,type:"degrees"},saturation:{idx:1,type:"percent"},lightness:{idx:2,type:"percent"}}}},u={"byte":{floor:!0,max:255},percent:{max:1},degrees:{mod:360,floor:!0}},d=l.support={},p=t(" ")[0],f=t.each;p.style.cssText="background-color:rgba(1,1,1,.5)",d.rgba=p.style.backgroundColor.indexOf("rgba")>-1,f(c,function(t,e){e.cache="_"+t,e.props.alpha={idx:3,type:"percent",def:1}}),l.fn=t.extend(l.prototype,{parse:function(n,a,r,h){if(n===e)return this._rgba=[null,null,null,null],this;(n.jquery||n.nodeType)&&(n=t(n).css(a),a=e);var u=this,d=t.type(n),p=this._rgba=[];return a!==e&&(n=[n,a,r,h],d="array"),"string"===d?this.parse(s(n)||o._default):"array"===d?(f(c.rgba.props,function(t,e){p[e.idx]=i(n[e.idx],e)}),this):"object"===d?(n instanceof l?f(c,function(t,e){n[e.cache]&&(u[e.cache]=n[e.cache].slice())}):f(c,function(e,s){var o=s.cache;f(s.props,function(t,e){if(!u[o]&&s.to){if("alpha"===t||null==n[t])return;u[o]=s.to(u._rgba)}u[o][e.idx]=i(n[t],e,!0)}),u[o]&&0>t.inArray(null,u[o].slice(0,3))&&(u[o][3]=1,s.from&&(u._rgba=s.from(u[o])))}),this):e},is:function(t){var i=l(t),s=!0,n=this;return f(c,function(t,o){var a,r=i[o.cache];return r&&(a=n[o.cache]||o.to&&o.to(n._rgba)||[],f(o.props,function(t,i){return null!=r[i.idx]?s=r[i.idx]===a[i.idx]:e})),s}),s},_space:function(){var t=[],e=this;return f(c,function(i,s){e[s.cache]&&t.push(i)}),t.pop()},transition:function(t,e){var s=l(t),n=s._space(),o=c[n],a=0===this.alpha()?l("transparent"):this,r=a[o.cache]||o.to(a._rgba),h=r.slice();return s=s[o.cache],f(o.props,function(t,n){var o=n.idx,a=r[o],l=s[o],c=u[n.type]||{};null!==l&&(null===a?h[o]=l:(c.mod&&(l-a>c.mod/2?a+=c.mod:a-l>c.mod/2&&(a-=c.mod)),h[o]=i((l-a)*e+a,n)))}),this[n](h)},blend:function(e){if(1===this._rgba[3])return this;var i=this._rgba.slice(),s=i.pop(),n=l(e)._rgba;return l(t.map(i,function(t,e){return(1-s)*n[e]+s*t}))},toRgbaString:function(){var e="rgba(",i=t.map(this._rgba,function(t,e){return null==t?e>2?1:0:t});return 1===i[3]&&(i.pop(),e="rgb("),e+i.join()+")"},toHslaString:function(){var e="hsla(",i=t.map(this.hsla(),function(t,e){return null==t&&(t=e>2?1:0),e&&3>e&&(t=Math.round(100*t)+"%"),t});return 1===i[3]&&(i.pop(),e="hsl("),e+i.join()+")"},toHexString:function(e){var i=this._rgba.slice(),s=i.pop();return e&&i.push(~~(255*s)),"#"+t.map(i,function(t){return t=(t||0).toString(16),1===t.length?"0"+t:t}).join("")},toString:function(){return 0===this._rgba[3]?"transparent":this.toRgbaString()}}),l.fn.parse.prototype=l.fn,c.hsla.to=function(t){if(null==t[0]||null==t[1]||null==t[2])return[null,null,null,t[3]];var e,i,s=t[0]/255,n=t[1]/255,o=t[2]/255,a=t[3],r=Math.max(s,n,o),h=Math.min(s,n,o),l=r-h,c=r+h,u=.5*c;return e=h===r?0:s===r?60*(n-o)/l+360:n===r?60*(o-s)/l+120:60*(s-n)/l+240,i=0===l?0:.5>=u?l/c:l/(2-c),[Math.round(e)%360,i,u,null==a?1:a]},c.hsla.from=function(t){if(null==t[0]||null==t[1]||null==t[2])return[null,null,null,t[3]];var e=t[0]/360,i=t[1],s=t[2],o=t[3],a=.5>=s?s*(1+i):s+i-s*i,r=2*s-a;return[Math.round(255*n(r,a,e+1/3)),Math.round(255*n(r,a,e)),Math.round(255*n(r,a,e-1/3)),o]},f(c,function(s,n){var o=n.props,a=n.cache,h=n.to,c=n.from;l.fn[s]=function(s){if(h&&!this[a]&&(this[a]=h(this._rgba)),s===e)return this[a].slice();var n,r=t.type(s),u="array"===r||"object"===r?s:arguments,d=this[a].slice();return f(o,function(t,e){var s=u["object"===r?t:e.idx];null==s&&(s=d[e.idx]),d[e.idx]=i(s,e)}),c?(n=l(c(d)),n[a]=d,n):l(d)},f(o,function(e,i){l.fn[e]||(l.fn[e]=function(n){var o,a=t.type(n),h="alpha"===e?this._hsla?"hsla":"rgba":s,l=this[h](),c=l[i.idx];return"undefined"===a?c:("function"===a&&(n=n.call(this,c),a=t.type(n)),null==n&&i.empty?this:("string"===a&&(o=r.exec(n),o&&(n=c+parseFloat(o[2])*("+"===o[1]?1:-1))),l[i.idx]=n,this[h](l)))})})}),l.hook=function(e){var i=e.split(" ");f(i,function(e,i){t.cssHooks[i]={set:function(e,n){var o,a,r="";if("transparent"!==n&&("string"!==t.type(n)||(o=s(n)))){if(n=l(o||n),!d.rgba&&1!==n._rgba[3]){for(a="backgroundColor"===i?e.parentNode:e;(""===r||"transparent"===r)&&a&&a.style;)try{r=t.css(a,"backgroundColor"),a=a.parentNode}catch(h){}n=n.blend(r&&"transparent"!==r?r:"_default")}n=n.toRgbaString()}try{e.style[i]=n}catch(h){}}},t.fx.step[i]=function(e){e.colorInit||(e.start=l(e.elem,i),e.end=l(e.end),e.colorInit=!0),t.cssHooks[i].set(e.elem,e.start.transition(e.end,e.pos))}})},l.hook(a),t.cssHooks.borderColor={expand:function(t){var e={};return f(["Top","Right","Bottom","Left"],function(i,s){e["border"+s+"Color"]=t}),e}},o=t.Color.names={aqua:"#00ffff",black:"#000000",blue:"#0000ff",fuchsia:"#ff00ff",gray:"#808080",green:"#008000",lime:"#00ff00",maroon:"#800000",navy:"#000080",olive:"#808000",purple:"#800080",red:"#ff0000",silver:"#c0c0c0",teal:"#008080",white:"#ffffff",yellow:"#ffff00",transparent:[null,null,null,0],_default:"#ffffff"}}(p),function(){function e(e){var i,s,n=e.ownerDocument.defaultView?e.ownerDocument.defaultView.getComputedStyle(e,null):e.currentStyle,o={};if(n&&n.length&&n[0]&&n[n[0]])for(s=n.length;s--;)i=n[s],"string"==typeof n[i]&&(o[t.camelCase(i)]=n[i]);else for(i in n)"string"==typeof n[i]&&(o[i]=n[i]);return o}function i(e,i){var s,o,a={};for(s in i)o=i[s],e[s]!==o&&(n[s]||(t.fx.step[s]||!isNaN(parseFloat(o)))&&(a[s]=o));return a}var s=["add","remove","toggle"],n={border:1,borderBottom:1,borderColor:1,borderLeft:1,borderRight:1,borderTop:1,borderWidth:1,margin:1,padding:1};t.each(["borderLeftStyle","borderRightStyle","borderBottomStyle","borderTopStyle"],function(e,i){t.fx.step[i]=function(t){("none"!==t.end&&!t.setAttr||1===t.pos&&!t.setAttr)&&(p.style(t.elem,i,t.end),t.setAttr=!0)}}),t.fn.addBack||(t.fn.addBack=function(t){return this.add(null==t?this.prevObject:this.prevObject.filter(t))}),t.effects.animateClass=function(n,o,a,r){var h=t.speed(o,a,r);return this.queue(function(){var o,a=t(this),r=a.attr("class")||"",l=h.children?a.find("*").addBack():a;l=l.map(function(){var i=t(this);return{el:i,start:e(this)}}),o=function(){t.each(s,function(t,e){n[e]&&a[e+"Class"](n[e])})},o(),l=l.map(function(){return this.end=e(this.el[0]),this.diff=i(this.start,this.end),this}),a.attr("class",r),l=l.map(function(){var e=this,i=t.Deferred(),s=t.extend({},h,{queue:!1,complete:function(){i.resolve(e)}});return this.el.animate(this.diff,s),i.promise()}),t.when.apply(t,l.get()).done(function(){o(),t.each(arguments,function(){var e=this.el;t.each(this.diff,function(t){e.css(t,"")})}),h.complete.call(a[0])})})},t.fn.extend({addClass:function(e){return function(i,s,n,o){return s?t.effects.animateClass.call(this,{add:i},s,n,o):e.apply(this,arguments)}}(t.fn.addClass),removeClass:function(e){return function(i,s,n,o){return arguments.length>1?t.effects.animateClass.call(this,{remove:i},s,n,o):e.apply(this,arguments)}}(t.fn.removeClass),toggleClass:function(e){return function(i,s,n,o,a){return"boolean"==typeof s||void 0===s?n?t.effects.animateClass.call(this,s?{add:i}:{remove:i},n,o,a):e.apply(this,arguments):t.effects.animateClass.call(this,{toggle:i},s,n,o)}}(t.fn.toggleClass),switchClass:function(e,i,s,n,o){return t.effects.animateClass.call(this,{add:i,remove:e},s,n,o)}})}(),function(){function e(e,i,s,n){return t.isPlainObject(e)&&(i=e,e=e.effect),e={effect:e},null==i&&(i={}),t.isFunction(i)&&(n=i,s=null,i={}),("number"==typeof i||t.fx.speeds[i])&&(n=s,s=i,i={}),t.isFunction(s)&&(n=s,s=null),i&&t.extend(e,i),s=s||i.duration,e.duration=t.fx.off?0:"number"==typeof s?s:s in t.fx.speeds?t.fx.speeds[s]:t.fx.speeds._default,e.complete=n||i.complete,e}function i(e){return!e||"number"==typeof e||t.fx.speeds[e]?!0:"string"!=typeof e||t.effects.effect[e]?t.isFunction(e)?!0:"object"!=typeof e||e.effect?!1:!0:!0}function s(t,e){var i=e.outerWidth(),s=e.outerHeight(),n=/^rect\((-?\d*\.?\d*px|-?\d+%|auto),?\s*(-?\d*\.?\d*px|-?\d+%|auto),?\s*(-?\d*\.?\d*px|-?\d+%|auto),?\s*(-?\d*\.?\d*px|-?\d+%|auto)\)$/,o=n.exec(t)||["",0,i,s,0];return{top:parseFloat(o[1])||0,right:"auto"===o[2]?i:parseFloat(o[2]),bottom:"auto"===o[3]?s:parseFloat(o[3]),left:parseFloat(o[4])||0}}t.expr&&t.expr.filters&&t.expr.filters.animated&&(t.expr.filters.animated=function(e){return function(i){return!!t(i).data(d)||e(i)}}(t.expr.filters.animated)),t.uiBackCompat!==!1&&t.extend(t.effects,{save:function(t,e){for(var i=0,s=e.length;s>i;i++)null!==e[i]&&t.data(c+e[i],t[0].style[e[i]])},restore:function(t,e){for(var i,s=0,n=e.length;n>s;s++)null!==e[s]&&(i=t.data(c+e[s]),t.css(e[s],i))},setMode:function(t,e){return"toggle"===e&&(e=t.is(":hidden")?"show":"hide"),e},createWrapper:function(e){if(e.parent().is(".ui-effects-wrapper"))return e.parent();var i={width:e.outerWidth(!0),height:e.outerHeight(!0),"float":e.css("float")},s=t("
").addClass("ui-effects-wrapper").css({fontSize:"100%",background:"transparent",border:"none",margin:0,padding:0}),n={width:e.width(),height:e.height()},o=document.activeElement;try{o.id}catch(a){o=document.body}return e.wrap(s),(e[0]===o||t.contains(e[0],o))&&t(o).trigger("focus"),s=e.parent(),"static"===e.css("position")?(s.css({position:"relative"}),e.css({position:"relative"})):(t.extend(i,{position:e.css("position"),zIndex:e.css("z-index")}),t.each(["top","left","bottom","right"],function(t,s){i[s]=e.css(s),isNaN(parseInt(i[s],10))&&(i[s]="auto")}),e.css({position:"relative",top:0,left:0,right:"auto",bottom:"auto"})),e.css(n),s.css(i).show()},removeWrapper:function(e){var i=document.activeElement;return e.parent().is(".ui-effects-wrapper")&&(e.parent().replaceWith(e),(e[0]===i||t.contains(e[0],i))&&t(i).trigger("focus")),e}}),t.extend(t.effects,{version:"1.12.1",define:function(e,i,s){return s||(s=i,i="effect"),t.effects.effect[e]=s,t.effects.effect[e].mode=i,s},scaledDimensions:function(t,e,i){if(0===e)return{height:0,width:0,outerHeight:0,outerWidth:0};var s="horizontal"!==i?(e||100)/100:1,n="vertical"!==i?(e||100)/100:1;return{height:t.height()*n,width:t.width()*s,outerHeight:t.outerHeight()*n,outerWidth:t.outerWidth()*s}},clipToBox:function(t){return{width:t.clip.right-t.clip.left,height:t.clip.bottom-t.clip.top,left:t.clip.left,top:t.clip.top}},unshift:function(t,e,i){var s=t.queue();e>1&&s.splice.apply(s,[1,0].concat(s.splice(e,i))),t.dequeue()},saveStyle:function(t){t.data(u,t[0].style.cssText)},restoreStyle:function(t){t[0].style.cssText=t.data(u)||"",t.removeData(u)},mode:function(t,e){var i=t.is(":hidden");return"toggle"===e&&(e=i?"show":"hide"),(i?"hide"===e:"show"===e)&&(e="none"),e},getBaseline:function(t,e){var i,s;switch(t[0]){case"top":i=0;break;case"middle":i=.5;break;case"bottom":i=1;break;default:i=t[0]/e.height}switch(t[1]){case"left":s=0;break;case"center":s=.5;break;case"right":s=1;break;default:s=t[1]/e.width}return{x:s,y:i}},createPlaceholder:function(e){var i,s=e.css("position"),n=e.position();return e.css({marginTop:e.css("marginTop"),marginBottom:e.css("marginBottom"),marginLeft:e.css("marginLeft"),marginRight:e.css("marginRight")}).outerWidth(e.outerWidth()).outerHeight(e.outerHeight()),/^(static|relative)/.test(s)&&(s="absolute",i=t("<"+e[0].nodeName+">").insertAfter(e).css({display:/^(inline|ruby)/.test(e.css("display"))?"inline-block":"block",visibility:"hidden",marginTop:e.css("marginTop"),marginBottom:e.css("marginBottom"),marginLeft:e.css("marginLeft"),marginRight:e.css("marginRight"),"float":e.css("float")}).outerWidth(e.outerWidth()).outerHeight(e.outerHeight()).addClass("ui-effects-placeholder"),e.data(c+"placeholder",i)),e.css({position:s,left:n.left,top:n.top}),i},removePlaceholder:function(t){var e=c+"placeholder",i=t.data(e);i&&(i.remove(),t.removeData(e))},cleanUp:function(e){t.effects.restoreStyle(e),t.effects.removePlaceholder(e)},setTransition:function(e,i,s,n){return n=n||{},t.each(i,function(t,i){var o=e.cssUnit(i);o[0]>0&&(n[i]=o[0]*s+o[1])}),n}}),t.fn.extend({effect:function(){function i(e){function i(){r.removeData(d),t.effects.cleanUp(r),"hide"===s.mode&&r.hide(),a()}function a(){t.isFunction(h)&&h.call(r[0]),t.isFunction(e)&&e()}var r=t(this);s.mode=c.shift(),t.uiBackCompat===!1||o?"none"===s.mode?(r[l](),a()):n.call(r[0],s,i):(r.is(":hidden")?"hide"===l:"show"===l)?(r[l](),a()):n.call(r[0],s,a)}var s=e.apply(this,arguments),n=t.effects.effect[s.effect],o=n.mode,a=s.queue,r=a||"fx",h=s.complete,l=s.mode,c=[],u=function(e){var i=t(this),s=t.effects.mode(i,l)||o;i.data(d,!0),c.push(s),o&&("show"===s||s===o&&"hide"===s)&&i.show(),o&&"none"===s||t.effects.saveStyle(i),t.isFunction(e)&&e()};return t.fx.off||!n?l?this[l](s.duration,h):this.each(function(){h&&h.call(this)}):a===!1?this.each(u).each(i):this.queue(r,u).queue(r,i)},show:function(t){return function(s){if(i(s))return t.apply(this,arguments);var n=e.apply(this,arguments);return n.mode="show",this.effect.call(this,n) -}}(t.fn.show),hide:function(t){return function(s){if(i(s))return t.apply(this,arguments);var n=e.apply(this,arguments);return n.mode="hide",this.effect.call(this,n)}}(t.fn.hide),toggle:function(t){return function(s){if(i(s)||"boolean"==typeof s)return t.apply(this,arguments);var n=e.apply(this,arguments);return n.mode="toggle",this.effect.call(this,n)}}(t.fn.toggle),cssUnit:function(e){var i=this.css(e),s=[];return t.each(["em","px","%","pt"],function(t,e){i.indexOf(e)>0&&(s=[parseFloat(i),e])}),s},cssClip:function(t){return t?this.css("clip","rect("+t.top+"px "+t.right+"px "+t.bottom+"px "+t.left+"px)"):s(this.css("clip"),this)},transfer:function(e,i){var s=t(this),n=t(e.to),o="fixed"===n.css("position"),a=t("body"),r=o?a.scrollTop():0,h=o?a.scrollLeft():0,l=n.offset(),c={top:l.top-r,left:l.left-h,height:n.innerHeight(),width:n.innerWidth()},u=s.offset(),d=t("").appendTo("body").addClass(e.className).css({top:u.top-r,left:u.left-h,height:s.innerHeight(),width:s.innerWidth(),position:o?"fixed":"absolute"}).animate(c,e.duration,e.easing,function(){d.remove(),t.isFunction(i)&&i()})}}),t.fx.step.clip=function(e){e.clipInit||(e.start=t(e.elem).cssClip(),"string"==typeof e.end&&(e.end=s(e.end,e.elem)),e.clipInit=!0),t(e.elem).cssClip({top:e.pos*(e.end.top-e.start.top)+e.start.top,right:e.pos*(e.end.right-e.start.right)+e.start.right,bottom:e.pos*(e.end.bottom-e.start.bottom)+e.start.bottom,left:e.pos*(e.end.left-e.start.left)+e.start.left})}}(),function(){var e={};t.each(["Quad","Cubic","Quart","Quint","Expo"],function(t,i){e[i]=function(e){return Math.pow(e,t+2)}}),t.extend(e,{Sine:function(t){return 1-Math.cos(t*Math.PI/2)},Circ:function(t){return 1-Math.sqrt(1-t*t)},Elastic:function(t){return 0===t||1===t?t:-Math.pow(2,8*(t-1))*Math.sin((80*(t-1)-7.5)*Math.PI/15)},Back:function(t){return t*t*(3*t-2)},Bounce:function(t){for(var e,i=4;((e=Math.pow(2,--i))-1)/11>t;);return 1/Math.pow(4,3-i)-7.5625*Math.pow((3*e-2)/22-t,2)}}),t.each(e,function(e,i){t.easing["easeIn"+e]=i,t.easing["easeOut"+e]=function(t){return 1-i(1-t)},t.easing["easeInOut"+e]=function(t){return.5>t?i(2*t)/2:1-i(-2*t+2)/2}})}();var f=t.effects;t.effects.define("blind","hide",function(e,i){var s={up:["bottom","top"],vertical:["bottom","top"],down:["top","bottom"],left:["right","left"],horizontal:["right","left"],right:["left","right"]},n=t(this),o=e.direction||"up",a=n.cssClip(),r={clip:t.extend({},a)},h=t.effects.createPlaceholder(n);r.clip[s[o][0]]=r.clip[s[o][1]],"show"===e.mode&&(n.cssClip(r.clip),h&&h.css(t.effects.clipToBox(r)),r.clip=a),h&&h.animate(t.effects.clipToBox(r),e.duration,e.easing),n.animate(r,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("bounce",function(e,i){var s,n,o,a=t(this),r=e.mode,h="hide"===r,l="show"===r,c=e.direction||"up",u=e.distance,d=e.times||5,p=2*d+(l||h?1:0),f=e.duration/p,g=e.easing,m="up"===c||"down"===c?"top":"left",_="up"===c||"left"===c,v=0,b=a.queue().length;for(t.effects.createPlaceholder(a),o=a.css(m),u||(u=a["top"===m?"outerHeight":"outerWidth"]()/3),l&&(n={opacity:1},n[m]=o,a.css("opacity",0).css(m,_?2*-u:2*u).animate(n,f,g)),h&&(u/=Math.pow(2,d-1)),n={},n[m]=o;d>v;v++)s={},s[m]=(_?"-=":"+=")+u,a.animate(s,f,g).animate(n,f,g),u=h?2*u:u/2;h&&(s={opacity:0},s[m]=(_?"-=":"+=")+u,a.animate(s,f,g)),a.queue(i),t.effects.unshift(a,b,p+1)}),t.effects.define("clip","hide",function(e,i){var s,n={},o=t(this),a=e.direction||"vertical",r="both"===a,h=r||"horizontal"===a,l=r||"vertical"===a;s=o.cssClip(),n.clip={top:l?(s.bottom-s.top)/2:s.top,right:h?(s.right-s.left)/2:s.right,bottom:l?(s.bottom-s.top)/2:s.bottom,left:h?(s.right-s.left)/2:s.left},t.effects.createPlaceholder(o),"show"===e.mode&&(o.cssClip(n.clip),n.clip=s),o.animate(n,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("drop","hide",function(e,i){var s,n=t(this),o=e.mode,a="show"===o,r=e.direction||"left",h="up"===r||"down"===r?"top":"left",l="up"===r||"left"===r?"-=":"+=",c="+="===l?"-=":"+=",u={opacity:0};t.effects.createPlaceholder(n),s=e.distance||n["top"===h?"outerHeight":"outerWidth"](!0)/2,u[h]=l+s,a&&(n.css(u),u[h]=c+s,u.opacity=1),n.animate(u,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("explode","hide",function(e,i){function s(){b.push(this),b.length===u*d&&n()}function n(){p.css({visibility:"visible"}),t(b).remove(),i()}var o,a,r,h,l,c,u=e.pieces?Math.round(Math.sqrt(e.pieces)):3,d=u,p=t(this),f=e.mode,g="show"===f,m=p.show().css("visibility","hidden").offset(),_=Math.ceil(p.outerWidth()/d),v=Math.ceil(p.outerHeight()/u),b=[];for(o=0;u>o;o++)for(h=m.top+o*v,c=o-(u-1)/2,a=0;d>a;a++)r=m.left+a*_,l=a-(d-1)/2,p.clone().appendTo("body").wrap("").css({position:"absolute",visibility:"visible",left:-a*_,top:-o*v}).parent().addClass("ui-effects-explode").css({position:"absolute",overflow:"hidden",width:_,height:v,left:r+(g?l*_:0),top:h+(g?c*v:0),opacity:g?0:1}).animate({left:r+(g?0:l*_),top:h+(g?0:c*v),opacity:g?1:0},e.duration||500,e.easing,s)}),t.effects.define("fade","toggle",function(e,i){var s="show"===e.mode;t(this).css("opacity",s?0:1).animate({opacity:s?1:0},{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("fold","hide",function(e,i){var s=t(this),n=e.mode,o="show"===n,a="hide"===n,r=e.size||15,h=/([0-9]+)%/.exec(r),l=!!e.horizFirst,c=l?["right","bottom"]:["bottom","right"],u=e.duration/2,d=t.effects.createPlaceholder(s),p=s.cssClip(),f={clip:t.extend({},p)},g={clip:t.extend({},p)},m=[p[c[0]],p[c[1]]],_=s.queue().length;h&&(r=parseInt(h[1],10)/100*m[a?0:1]),f.clip[c[0]]=r,g.clip[c[0]]=r,g.clip[c[1]]=0,o&&(s.cssClip(g.clip),d&&d.css(t.effects.clipToBox(g)),g.clip=p),s.queue(function(i){d&&d.animate(t.effects.clipToBox(f),u,e.easing).animate(t.effects.clipToBox(g),u,e.easing),i()}).animate(f,u,e.easing).animate(g,u,e.easing).queue(i),t.effects.unshift(s,_,4)}),t.effects.define("highlight","show",function(e,i){var s=t(this),n={backgroundColor:s.css("backgroundColor")};"hide"===e.mode&&(n.opacity=0),t.effects.saveStyle(s),s.css({backgroundImage:"none",backgroundColor:e.color||"#ffff99"}).animate(n,{queue:!1,duration:e.duration,easing:e.easing,complete:i})}),t.effects.define("size",function(e,i){var s,n,o,a=t(this),r=["fontSize"],h=["borderTopWidth","borderBottomWidth","paddingTop","paddingBottom"],l=["borderLeftWidth","borderRightWidth","paddingLeft","paddingRight"],c=e.mode,u="effect"!==c,d=e.scale||"both",p=e.origin||["middle","center"],f=a.css("position"),g=a.position(),m=t.effects.scaledDimensions(a),_=e.from||m,v=e.to||t.effects.scaledDimensions(a,0);t.effects.createPlaceholder(a),"show"===c&&(o=_,_=v,v=o),n={from:{y:_.height/m.height,x:_.width/m.width},to:{y:v.height/m.height,x:v.width/m.width}},("box"===d||"both"===d)&&(n.from.y!==n.to.y&&(_=t.effects.setTransition(a,h,n.from.y,_),v=t.effects.setTransition(a,h,n.to.y,v)),n.from.x!==n.to.x&&(_=t.effects.setTransition(a,l,n.from.x,_),v=t.effects.setTransition(a,l,n.to.x,v))),("content"===d||"both"===d)&&n.from.y!==n.to.y&&(_=t.effects.setTransition(a,r,n.from.y,_),v=t.effects.setTransition(a,r,n.to.y,v)),p&&(s=t.effects.getBaseline(p,m),_.top=(m.outerHeight-_.outerHeight)*s.y+g.top,_.left=(m.outerWidth-_.outerWidth)*s.x+g.left,v.top=(m.outerHeight-v.outerHeight)*s.y+g.top,v.left=(m.outerWidth-v.outerWidth)*s.x+g.left),a.css(_),("content"===d||"both"===d)&&(h=h.concat(["marginTop","marginBottom"]).concat(r),l=l.concat(["marginLeft","marginRight"]),a.find("*[width]").each(function(){var i=t(this),s=t.effects.scaledDimensions(i),o={height:s.height*n.from.y,width:s.width*n.from.x,outerHeight:s.outerHeight*n.from.y,outerWidth:s.outerWidth*n.from.x},a={height:s.height*n.to.y,width:s.width*n.to.x,outerHeight:s.height*n.to.y,outerWidth:s.width*n.to.x};n.from.y!==n.to.y&&(o=t.effects.setTransition(i,h,n.from.y,o),a=t.effects.setTransition(i,h,n.to.y,a)),n.from.x!==n.to.x&&(o=t.effects.setTransition(i,l,n.from.x,o),a=t.effects.setTransition(i,l,n.to.x,a)),u&&t.effects.saveStyle(i),i.css(o),i.animate(a,e.duration,e.easing,function(){u&&t.effects.restoreStyle(i)})})),a.animate(v,{queue:!1,duration:e.duration,easing:e.easing,complete:function(){var e=a.offset();0===v.opacity&&a.css("opacity",_.opacity),u||(a.css("position","static"===f?"relative":f).offset(e),t.effects.saveStyle(a)),i()}})}),t.effects.define("scale",function(e,i){var s=t(this),n=e.mode,o=parseInt(e.percent,10)||(0===parseInt(e.percent,10)?0:"effect"!==n?0:100),a=t.extend(!0,{from:t.effects.scaledDimensions(s),to:t.effects.scaledDimensions(s,o,e.direction||"both"),origin:e.origin||["middle","center"]},e);e.fade&&(a.from.opacity=1,a.to.opacity=0),t.effects.effect.size.call(this,a,i)}),t.effects.define("puff","hide",function(e,i){var s=t.extend(!0,{},e,{fade:!0,percent:parseInt(e.percent,10)||150});t.effects.effect.scale.call(this,s,i)}),t.effects.define("pulsate","show",function(e,i){var s=t(this),n=e.mode,o="show"===n,a="hide"===n,r=o||a,h=2*(e.times||5)+(r?1:0),l=e.duration/h,c=0,u=1,d=s.queue().length;for((o||!s.is(":visible"))&&(s.css("opacity",0).show(),c=1);h>u;u++)s.animate({opacity:c},l,e.easing),c=1-c;s.animate({opacity:c},l,e.easing),s.queue(i),t.effects.unshift(s,d,h+1)}),t.effects.define("shake",function(e,i){var s=1,n=t(this),o=e.direction||"left",a=e.distance||20,r=e.times||3,h=2*r+1,l=Math.round(e.duration/h),c="up"===o||"down"===o?"top":"left",u="up"===o||"left"===o,d={},p={},f={},g=n.queue().length;for(t.effects.createPlaceholder(n),d[c]=(u?"-=":"+=")+a,p[c]=(u?"+=":"-=")+2*a,f[c]=(u?"-=":"+=")+2*a,n.animate(d,l,e.easing);r>s;s++)n.animate(p,l,e.easing).animate(f,l,e.easing);n.animate(p,l,e.easing).animate(d,l/2,e.easing).queue(i),t.effects.unshift(n,g,h+1)}),t.effects.define("slide","show",function(e,i){var s,n,o=t(this),a={up:["bottom","top"],down:["top","bottom"],left:["right","left"],right:["left","right"]},r=e.mode,h=e.direction||"left",l="up"===h||"down"===h?"top":"left",c="up"===h||"left"===h,u=e.distance||o["top"===l?"outerHeight":"outerWidth"](!0),d={};t.effects.createPlaceholder(o),s=o.cssClip(),n=o.position()[l],d[l]=(c?-1:1)*u+n,d.clip=o.cssClip(),d.clip[a[h][1]]=d.clip[a[h][0]],"show"===r&&(o.cssClip(d.clip),o.css(l,d[l]),d.clip=s,d[l]=n),o.animate(d,{queue:!1,duration:e.duration,easing:e.easing,complete:i})});var f;t.uiBackCompat!==!1&&(f=t.effects.define("transfer",function(e,i){t(this).transfer(e,i)})),t.ui.focusable=function(i,s){var n,o,a,r,h,l=i.nodeName.toLowerCase();return"area"===l?(n=i.parentNode,o=n.name,i.href&&o&&"map"===n.nodeName.toLowerCase()?(a=t("img[usemap='#"+o+"']"),a.length>0&&a.is(":visible")):!1):(/^(input|select|textarea|button|object)$/.test(l)?(r=!i.disabled,r&&(h=t(i).closest("fieldset")[0],h&&(r=!h.disabled))):r="a"===l?i.href||s:s,r&&t(i).is(":visible")&&e(t(i)))},t.extend(t.expr[":"],{focusable:function(e){return t.ui.focusable(e,null!=t.attr(e,"tabindex"))}}),t.ui.focusable,t.fn.form=function(){return"string"==typeof this[0].form?this.closest("form"):t(this[0].form)},t.ui.formResetMixin={_formResetHandler:function(){var e=t(this);setTimeout(function(){var i=e.data("ui-form-reset-instances");t.each(i,function(){this.refresh()})})},_bindFormResetHandler:function(){if(this.form=this.element.form(),this.form.length){var t=this.form.data("ui-form-reset-instances")||[];t.length||this.form.on("reset.ui-form-reset",this._formResetHandler),t.push(this),this.form.data("ui-form-reset-instances",t)}},_unbindFormResetHandler:function(){if(this.form.length){var e=this.form.data("ui-form-reset-instances");e.splice(t.inArray(this,e),1),e.length?this.form.data("ui-form-reset-instances",e):this.form.removeData("ui-form-reset-instances").off("reset.ui-form-reset")}}},"1.7"===t.fn.jquery.substring(0,3)&&(t.each(["Width","Height"],function(e,i){function s(e,i,s,o){return t.each(n,function(){i-=parseFloat(t.css(e,"padding"+this))||0,s&&(i-=parseFloat(t.css(e,"border"+this+"Width"))||0),o&&(i-=parseFloat(t.css(e,"margin"+this))||0)}),i}var n="Width"===i?["Left","Right"]:["Top","Bottom"],o=i.toLowerCase(),a={innerWidth:t.fn.innerWidth,innerHeight:t.fn.innerHeight,outerWidth:t.fn.outerWidth,outerHeight:t.fn.outerHeight};t.fn["inner"+i]=function(e){return void 0===e?a["inner"+i].call(this):this.each(function(){t(this).css(o,s(this,e)+"px")})},t.fn["outer"+i]=function(e,n){return"number"!=typeof e?a["outer"+i].call(this,e):this.each(function(){t(this).css(o,s(this,e,!0,n)+"px")})}}),t.fn.addBack=function(t){return this.add(null==t?this.prevObject:this.prevObject.filter(t))}),t.ui.keyCode={BACKSPACE:8,COMMA:188,DELETE:46,DOWN:40,END:35,ENTER:13,ESCAPE:27,HOME:36,LEFT:37,PAGE_DOWN:34,PAGE_UP:33,PERIOD:190,RIGHT:39,SPACE:32,TAB:9,UP:38},t.ui.escapeSelector=function(){var t=/([!"#$%&'()*+,.\/:;<=>?@[\]^`{|}~])/g;return function(e){return e.replace(t,"\\$1")}}(),t.fn.labels=function(){var e,i,s,n,o;return this[0].labels&&this[0].labels.length?this.pushStack(this[0].labels):(n=this.eq(0).parents("label"),s=this.attr("id"),s&&(e=this.eq(0).parents().last(),o=e.add(e.length?e.siblings():this.siblings()),i="label[for='"+t.ui.escapeSelector(s)+"']",n=n.add(o.find(i).addBack(i))),this.pushStack(n))},t.fn.scrollParent=function(e){var i=this.css("position"),s="absolute"===i,n=e?/(auto|scroll|hidden)/:/(auto|scroll)/,o=this.parents().filter(function(){var e=t(this);return s&&"static"===e.css("position")?!1:n.test(e.css("overflow")+e.css("overflow-y")+e.css("overflow-x"))}).eq(0);return"fixed"!==i&&o.length?o:t(this[0].ownerDocument||document)},t.extend(t.expr[":"],{tabbable:function(e){var i=t.attr(e,"tabindex"),s=null!=i;return(!s||i>=0)&&t.ui.focusable(e,s)}}),t.fn.extend({uniqueId:function(){var t=0;return function(){return this.each(function(){this.id||(this.id="ui-id-"+ ++t)})}}(),removeUniqueId:function(){return this.each(function(){/^ui-id-\d+$/.test(this.id)&&t(this).removeAttr("id")})}}),t.widget("ui.accordion",{version:"1.12.1",options:{active:0,animate:{},classes:{"ui-accordion-header":"ui-corner-top","ui-accordion-header-collapsed":"ui-corner-all","ui-accordion-content":"ui-corner-bottom"},collapsible:!1,event:"click",header:"> li > :first-child, > :not(li):even",heightStyle:"auto",icons:{activeHeader:"ui-icon-triangle-1-s",header:"ui-icon-triangle-1-e"},activate:null,beforeActivate:null},hideProps:{borderTopWidth:"hide",borderBottomWidth:"hide",paddingTop:"hide",paddingBottom:"hide",height:"hide"},showProps:{borderTopWidth:"show",borderBottomWidth:"show",paddingTop:"show",paddingBottom:"show",height:"show"},_create:function(){var e=this.options;this.prevShow=this.prevHide=t(),this._addClass("ui-accordion","ui-widget ui-helper-reset"),this.element.attr("role","tablist"),e.collapsible||e.active!==!1&&null!=e.active||(e.active=0),this._processPanels(),0>e.active&&(e.active+=this.headers.length),this._refresh()},_getCreateEventData:function(){return{header:this.active,panel:this.active.length?this.active.next():t()}},_createIcons:function(){var e,i,s=this.options.icons;s&&(e=t(""),this._addClass(e,"ui-accordion-header-icon","ui-icon "+s.header),e.prependTo(this.headers),i=this.active.children(".ui-accordion-header-icon"),this._removeClass(i,s.header)._addClass(i,null,s.activeHeader)._addClass(this.headers,"ui-accordion-icons"))},_destroyIcons:function(){this._removeClass(this.headers,"ui-accordion-icons"),this.headers.children(".ui-accordion-header-icon").remove()},_destroy:function(){var t;this.element.removeAttr("role"),this.headers.removeAttr("role aria-expanded aria-selected aria-controls tabIndex").removeUniqueId(),this._destroyIcons(),t=this.headers.next().css("display","").removeAttr("role aria-hidden aria-labelledby").removeUniqueId(),"content"!==this.options.heightStyle&&t.css("height","")},_setOption:function(t,e){return"active"===t?(this._activate(e),void 0):("event"===t&&(this.options.event&&this._off(this.headers,this.options.event),this._setupEvents(e)),this._super(t,e),"collapsible"!==t||e||this.options.active!==!1||this._activate(0),"icons"===t&&(this._destroyIcons(),e&&this._createIcons()),void 0)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",t),this._toggleClass(null,"ui-state-disabled",!!t),this._toggleClass(this.headers.add(this.headers.next()),null,"ui-state-disabled",!!t)},_keydown:function(e){if(!e.altKey&&!e.ctrlKey){var i=t.ui.keyCode,s=this.headers.length,n=this.headers.index(e.target),o=!1;switch(e.keyCode){case i.RIGHT:case i.DOWN:o=this.headers[(n+1)%s];break;case i.LEFT:case i.UP:o=this.headers[(n-1+s)%s];break;case i.SPACE:case i.ENTER:this._eventHandler(e);break;case i.HOME:o=this.headers[0];break;case i.END:o=this.headers[s-1]}o&&(t(e.target).attr("tabIndex",-1),t(o).attr("tabIndex",0),t(o).trigger("focus"),e.preventDefault())}},_panelKeyDown:function(e){e.keyCode===t.ui.keyCode.UP&&e.ctrlKey&&t(e.currentTarget).prev().trigger("focus")},refresh:function(){var e=this.options;this._processPanels(),e.active===!1&&e.collapsible===!0||!this.headers.length?(e.active=!1,this.active=t()):e.active===!1?this._activate(0):this.active.length&&!t.contains(this.element[0],this.active[0])?this.headers.length===this.headers.find(".ui-state-disabled").length?(e.active=!1,this.active=t()):this._activate(Math.max(0,e.active-1)):e.active=this.headers.index(this.active),this._destroyIcons(),this._refresh()},_processPanels:function(){var t=this.headers,e=this.panels;this.headers=this.element.find(this.options.header),this._addClass(this.headers,"ui-accordion-header ui-accordion-header-collapsed","ui-state-default"),this.panels=this.headers.next().filter(":not(.ui-accordion-content-active)").hide(),this._addClass(this.panels,"ui-accordion-content","ui-helper-reset ui-widget-content"),e&&(this._off(t.not(this.headers)),this._off(e.not(this.panels)))},_refresh:function(){var e,i=this.options,s=i.heightStyle,n=this.element.parent();this.active=this._findActive(i.active),this._addClass(this.active,"ui-accordion-header-active","ui-state-active")._removeClass(this.active,"ui-accordion-header-collapsed"),this._addClass(this.active.next(),"ui-accordion-content-active"),this.active.next().show(),this.headers.attr("role","tab").each(function(){var e=t(this),i=e.uniqueId().attr("id"),s=e.next(),n=s.uniqueId().attr("id");e.attr("aria-controls",n),s.attr("aria-labelledby",i)}).next().attr("role","tabpanel"),this.headers.not(this.active).attr({"aria-selected":"false","aria-expanded":"false",tabIndex:-1}).next().attr({"aria-hidden":"true"}).hide(),this.active.length?this.active.attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0}).next().attr({"aria-hidden":"false"}):this.headers.eq(0).attr("tabIndex",0),this._createIcons(),this._setupEvents(i.event),"fill"===s?(e=n.height(),this.element.siblings(":visible").each(function(){var i=t(this),s=i.css("position");"absolute"!==s&&"fixed"!==s&&(e-=i.outerHeight(!0))}),this.headers.each(function(){e-=t(this).outerHeight(!0)}),this.headers.next().each(function(){t(this).height(Math.max(0,e-t(this).innerHeight()+t(this).height()))}).css("overflow","auto")):"auto"===s&&(e=0,this.headers.next().each(function(){var i=t(this).is(":visible");i||t(this).show(),e=Math.max(e,t(this).css("height","").height()),i||t(this).hide()}).height(e))},_activate:function(e){var i=this._findActive(e)[0];i!==this.active[0]&&(i=i||this.active[0],this._eventHandler({target:i,currentTarget:i,preventDefault:t.noop}))},_findActive:function(e){return"number"==typeof e?this.headers.eq(e):t()},_setupEvents:function(e){var i={keydown:"_keydown"};e&&t.each(e.split(" "),function(t,e){i[e]="_eventHandler"}),this._off(this.headers.add(this.headers.next())),this._on(this.headers,i),this._on(this.headers.next(),{keydown:"_panelKeyDown"}),this._hoverable(this.headers),this._focusable(this.headers)},_eventHandler:function(e){var i,s,n=this.options,o=this.active,a=t(e.currentTarget),r=a[0]===o[0],h=r&&n.collapsible,l=h?t():a.next(),c=o.next(),u={oldHeader:o,oldPanel:c,newHeader:h?t():a,newPanel:l};e.preventDefault(),r&&!n.collapsible||this._trigger("beforeActivate",e,u)===!1||(n.active=h?!1:this.headers.index(a),this.active=r?t():a,this._toggle(u),this._removeClass(o,"ui-accordion-header-active","ui-state-active"),n.icons&&(i=o.children(".ui-accordion-header-icon"),this._removeClass(i,null,n.icons.activeHeader)._addClass(i,null,n.icons.header)),r||(this._removeClass(a,"ui-accordion-header-collapsed")._addClass(a,"ui-accordion-header-active","ui-state-active"),n.icons&&(s=a.children(".ui-accordion-header-icon"),this._removeClass(s,null,n.icons.header)._addClass(s,null,n.icons.activeHeader)),this._addClass(a.next(),"ui-accordion-content-active")))},_toggle:function(e){var i=e.newPanel,s=this.prevShow.length?this.prevShow:e.oldPanel;this.prevShow.add(this.prevHide).stop(!0,!0),this.prevShow=i,this.prevHide=s,this.options.animate?this._animate(i,s,e):(s.hide(),i.show(),this._toggleComplete(e)),s.attr({"aria-hidden":"true"}),s.prev().attr({"aria-selected":"false","aria-expanded":"false"}),i.length&&s.length?s.prev().attr({tabIndex:-1,"aria-expanded":"false"}):i.length&&this.headers.filter(function(){return 0===parseInt(t(this).attr("tabIndex"),10)}).attr("tabIndex",-1),i.attr("aria-hidden","false").prev().attr({"aria-selected":"true","aria-expanded":"true",tabIndex:0})},_animate:function(t,e,i){var s,n,o,a=this,r=0,h=t.css("box-sizing"),l=t.length&&(!e.length||t.index()",delay:300,options:{icons:{submenu:"ui-icon-caret-1-e"},items:"> *",menus:"ul",position:{my:"left top",at:"right top"},role:"menu",blur:null,focus:null,select:null},_create:function(){this.activeMenu=this.element,this.mouseHandled=!1,this.element.uniqueId().attr({role:this.options.role,tabIndex:0}),this._addClass("ui-menu","ui-widget ui-widget-content"),this._on({"mousedown .ui-menu-item":function(t){t.preventDefault()},"click .ui-menu-item":function(e){var i=t(e.target),s=t(t.ui.safeActiveElement(this.document[0]));!this.mouseHandled&&i.not(".ui-state-disabled").length&&(this.select(e),e.isPropagationStopped()||(this.mouseHandled=!0),i.has(".ui-menu").length?this.expand(e):!this.element.is(":focus")&&s.closest(".ui-menu").length&&(this.element.trigger("focus",[!0]),this.active&&1===this.active.parents(".ui-menu").length&&clearTimeout(this.timer)))},"mouseenter .ui-menu-item":function(e){if(!this.previousFilter){var i=t(e.target).closest(".ui-menu-item"),s=t(e.currentTarget);i[0]===s[0]&&(this._removeClass(s.siblings().children(".ui-state-active"),null,"ui-state-active"),this.focus(e,s))}},mouseleave:"collapseAll","mouseleave .ui-menu":"collapseAll",focus:function(t,e){var i=this.active||this.element.find(this.options.items).eq(0);e||this.focus(t,i)},blur:function(e){this._delay(function(){var i=!t.contains(this.element[0],t.ui.safeActiveElement(this.document[0]));i&&this.collapseAll(e)})},keydown:"_keydown"}),this.refresh(),this._on(this.document,{click:function(t){this._closeOnDocumentClick(t)&&this.collapseAll(t),this.mouseHandled=!1}})},_destroy:function(){var e=this.element.find(".ui-menu-item").removeAttr("role aria-disabled"),i=e.children(".ui-menu-item-wrapper").removeUniqueId().removeAttr("tabIndex role aria-haspopup");this.element.removeAttr("aria-activedescendant").find(".ui-menu").addBack().removeAttr("role aria-labelledby aria-expanded aria-hidden aria-disabled tabIndex").removeUniqueId().show(),i.children().each(function(){var e=t(this);e.data("ui-menu-submenu-caret")&&e.remove()})},_keydown:function(e){var i,s,n,o,a=!0;switch(e.keyCode){case t.ui.keyCode.PAGE_UP:this.previousPage(e);break;case t.ui.keyCode.PAGE_DOWN:this.nextPage(e);break;case t.ui.keyCode.HOME:this._move("first","first",e);break;case t.ui.keyCode.END:this._move("last","last",e);break;case t.ui.keyCode.UP:this.previous(e);break;case t.ui.keyCode.DOWN:this.next(e);break;case t.ui.keyCode.LEFT:this.collapse(e);break;case t.ui.keyCode.RIGHT:this.active&&!this.active.is(".ui-state-disabled")&&this.expand(e);break;case t.ui.keyCode.ENTER:case t.ui.keyCode.SPACE:this._activate(e);break;case t.ui.keyCode.ESCAPE:this.collapse(e);break;default:a=!1,s=this.previousFilter||"",o=!1,n=e.keyCode>=96&&105>=e.keyCode?""+(e.keyCode-96):String.fromCharCode(e.keyCode),clearTimeout(this.filterTimer),n===s?o=!0:n=s+n,i=this._filterMenuItems(n),i=o&&-1!==i.index(this.active.next())?this.active.nextAll(".ui-menu-item"):i,i.length||(n=String.fromCharCode(e.keyCode),i=this._filterMenuItems(n)),i.length?(this.focus(e,i),this.previousFilter=n,this.filterTimer=this._delay(function(){delete this.previousFilter},1e3)):delete this.previousFilter}a&&e.preventDefault()},_activate:function(t){this.active&&!this.active.is(".ui-state-disabled")&&(this.active.children("[aria-haspopup='true']").length?this.expand(t):this.select(t))},refresh:function(){var e,i,s,n,o,a=this,r=this.options.icons.submenu,h=this.element.find(this.options.menus);this._toggleClass("ui-menu-icons",null,!!this.element.find(".ui-icon").length),s=h.filter(":not(.ui-menu)").hide().attr({role:this.options.role,"aria-hidden":"true","aria-expanded":"false"}).each(function(){var e=t(this),i=e.prev(),s=t("").data("ui-menu-submenu-caret",!0);a._addClass(s,"ui-menu-icon","ui-icon "+r),i.attr("aria-haspopup","true").prepend(s),e.attr("aria-labelledby",i.attr("id"))}),this._addClass(s,"ui-menu","ui-widget ui-widget-content ui-front"),e=h.add(this.element),i=e.find(this.options.items),i.not(".ui-menu-item").each(function(){var e=t(this);a._isDivider(e)&&a._addClass(e,"ui-menu-divider","ui-widget-content")}),n=i.not(".ui-menu-item, .ui-menu-divider"),o=n.children().not(".ui-menu").uniqueId().attr({tabIndex:-1,role:this._itemRole()}),this._addClass(n,"ui-menu-item")._addClass(o,"ui-menu-item-wrapper"),i.filter(".ui-state-disabled").attr("aria-disabled","true"),this.active&&!t.contains(this.element[0],this.active[0])&&this.blur()},_itemRole:function(){return{menu:"menuitem",listbox:"option"}[this.options.role]},_setOption:function(t,e){if("icons"===t){var i=this.element.find(".ui-menu-icon");this._removeClass(i,null,this.options.icons.submenu)._addClass(i,null,e.submenu)}this._super(t,e)},_setOptionDisabled:function(t){this._super(t),this.element.attr("aria-disabled",t+""),this._toggleClass(null,"ui-state-disabled",!!t)},focus:function(t,e){var i,s,n;this.blur(t,t&&"focus"===t.type),this._scrollIntoView(e),this.active=e.first(),s=this.active.children(".ui-menu-item-wrapper"),this._addClass(s,null,"ui-state-active"),this.options.role&&this.element.attr("aria-activedescendant",s.attr("id")),n=this.active.parent().closest(".ui-menu-item").children(".ui-menu-item-wrapper"),this._addClass(n,null,"ui-state-active"),t&&"keydown"===t.type?this._close():this.timer=this._delay(function(){this._close()},this.delay),i=e.children(".ui-menu"),i.length&&t&&/^mouse/.test(t.type)&&this._startOpening(i),this.activeMenu=e.parent(),this._trigger("focus",t,{item:e})},_scrollIntoView:function(e){var i,s,n,o,a,r;this._hasScroll()&&(i=parseFloat(t.css(this.activeMenu[0],"borderTopWidth"))||0,s=parseFloat(t.css(this.activeMenu[0],"paddingTop"))||0,n=e.offset().top-this.activeMenu.offset().top-i-s,o=this.activeMenu.scrollTop(),a=this.activeMenu.height(),r=e.outerHeight(),0>n?this.activeMenu.scrollTop(o+n):n+r>a&&this.activeMenu.scrollTop(o+n-a+r))},blur:function(t,e){e||clearTimeout(this.timer),this.active&&(this._removeClass(this.active.children(".ui-menu-item-wrapper"),null,"ui-state-active"),this._trigger("blur",t,{item:this.active}),this.active=null)},_startOpening:function(t){clearTimeout(this.timer),"true"===t.attr("aria-hidden")&&(this.timer=this._delay(function(){this._close(),this._open(t)},this.delay))},_open:function(e){var i=t.extend({of:this.active},this.options.position);clearTimeout(this.timer),this.element.find(".ui-menu").not(e.parents(".ui-menu")).hide().attr("aria-hidden","true"),e.show().removeAttr("aria-hidden").attr("aria-expanded","true").position(i)},collapseAll:function(e,i){clearTimeout(this.timer),this.timer=this._delay(function(){var s=i?this.element:t(e&&e.target).closest(this.element.find(".ui-menu"));s.length||(s=this.element),this._close(s),this.blur(e),this._removeClass(s.find(".ui-state-active"),null,"ui-state-active"),this.activeMenu=s},this.delay)},_close:function(t){t||(t=this.active?this.active.parent():this.element),t.find(".ui-menu").hide().attr("aria-hidden","true").attr("aria-expanded","false")},_closeOnDocumentClick:function(e){return!t(e.target).closest(".ui-menu").length},_isDivider:function(t){return!/[^\-\u2014\u2013\s]/.test(t.text())},collapse:function(t){var e=this.active&&this.active.parent().closest(".ui-menu-item",this.element);e&&e.length&&(this._close(),this.focus(t,e))},expand:function(t){var e=this.active&&this.active.children(".ui-menu ").find(this.options.items).first();e&&e.length&&(this._open(e.parent()),this._delay(function(){this.focus(t,e)}))},next:function(t){this._move("next","first",t)},previous:function(t){this._move("prev","last",t)},isFirstItem:function(){return this.active&&!this.active.prevAll(".ui-menu-item").length},isLastItem:function(){return this.active&&!this.active.nextAll(".ui-menu-item").length},_move:function(t,e,i){var s;this.active&&(s="first"===t||"last"===t?this.active["first"===t?"prevAll":"nextAll"](".ui-menu-item").eq(-1):this.active[t+"All"](".ui-menu-item").eq(0)),s&&s.length&&this.active||(s=this.activeMenu.find(this.options.items)[e]()),this.focus(i,s)},nextPage:function(e){var i,s,n;return this.active?(this.isLastItem()||(this._hasScroll()?(s=this.active.offset().top,n=this.element.height(),this.active.nextAll(".ui-menu-item").each(function(){return i=t(this),0>i.offset().top-s-n}),this.focus(e,i)):this.focus(e,this.activeMenu.find(this.options.items)[this.active?"last":"first"]())),void 0):(this.next(e),void 0)},previousPage:function(e){var i,s,n;return this.active?(this.isFirstItem()||(this._hasScroll()?(s=this.active.offset().top,n=this.element.height(),this.active.prevAll(".ui-menu-item").each(function(){return i=t(this),i.offset().top-s+n>0}),this.focus(e,i)):this.focus(e,this.activeMenu.find(this.options.items).first())),void 0):(this.next(e),void 0)},_hasScroll:function(){return this.element.outerHeight() ",options:{appendTo:null,autoFocus:!1,delay:300,minLength:1,position:{my:"left top",at:"left bottom",collision:"none"},source:null,change:null,close:null,focus:null,open:null,response:null,search:null,select:null},requestIndex:0,pending:0,_create:function(){var e,i,s,n=this.element[0].nodeName.toLowerCase(),o="textarea"===n,a="input"===n; -this.isMultiLine=o||!a&&this._isContentEditable(this.element),this.valueMethod=this.element[o||a?"val":"text"],this.isNewMenu=!0,this._addClass("ui-autocomplete-input"),this.element.attr("autocomplete","off"),this._on(this.element,{keydown:function(n){if(this.element.prop("readOnly"))return e=!0,s=!0,i=!0,void 0;e=!1,s=!1,i=!1;var o=t.ui.keyCode;switch(n.keyCode){case o.PAGE_UP:e=!0,this._move("previousPage",n);break;case o.PAGE_DOWN:e=!0,this._move("nextPage",n);break;case o.UP:e=!0,this._keyEvent("previous",n);break;case o.DOWN:e=!0,this._keyEvent("next",n);break;case o.ENTER:this.menu.active&&(e=!0,n.preventDefault(),this.menu.select(n));break;case o.TAB:this.menu.active&&this.menu.select(n);break;case o.ESCAPE:this.menu.element.is(":visible")&&(this.isMultiLine||this._value(this.term),this.close(n),n.preventDefault());break;default:i=!0,this._searchTimeout(n)}},keypress:function(s){if(e)return e=!1,(!this.isMultiLine||this.menu.element.is(":visible"))&&s.preventDefault(),void 0;if(!i){var n=t.ui.keyCode;switch(s.keyCode){case n.PAGE_UP:this._move("previousPage",s);break;case n.PAGE_DOWN:this._move("nextPage",s);break;case n.UP:this._keyEvent("previous",s);break;case n.DOWN:this._keyEvent("next",s)}}},input:function(t){return s?(s=!1,t.preventDefault(),void 0):(this._searchTimeout(t),void 0)},focus:function(){this.selectedItem=null,this.previous=this._value()},blur:function(t){return this.cancelBlur?(delete this.cancelBlur,void 0):(clearTimeout(this.searching),this.close(t),this._change(t),void 0)}}),this._initSource(),this.menu=t(" ").appendTo(this._appendTo()).menu({role:null}).hide().menu("instance"),this._addClass(this.menu.element,"ui-autocomplete","ui-front"),this._on(this.menu.element,{mousedown:function(e){e.preventDefault(),this.cancelBlur=!0,this._delay(function(){delete this.cancelBlur,this.element[0]!==t.ui.safeActiveElement(this.document[0])&&this.element.trigger("focus")})},menufocus:function(e,i){var s,n;return this.isNewMenu&&(this.isNewMenu=!1,e.originalEvent&&/^mouse/.test(e.originalEvent.type))?(this.menu.blur(),this.document.one("mousemove",function(){t(e.target).trigger(e.originalEvent)}),void 0):(n=i.item.data("ui-autocomplete-item"),!1!==this._trigger("focus",e,{item:n})&&e.originalEvent&&/^key/.test(e.originalEvent.type)&&this._value(n.value),s=i.item.attr("aria-label")||n.value,s&&t.trim(s).length&&(this.liveRegion.children().hide(),t("
").text(s).appendTo(this.liveRegion)),void 0)},menuselect:function(e,i){var s=i.item.data("ui-autocomplete-item"),n=this.previous;this.element[0]!==t.ui.safeActiveElement(this.document[0])&&(this.element.trigger("focus"),this.previous=n,this._delay(function(){this.previous=n,this.selectedItem=s})),!1!==this._trigger("select",e,{item:s})&&this._value(s.value),this.term=this._value(),this.close(e),this.selectedItem=s}}),this.liveRegion=t("",{role:"status","aria-live":"assertive","aria-relevant":"additions"}).appendTo(this.document[0].body),this._addClass(this.liveRegion,null,"ui-helper-hidden-accessible"),this._on(this.window,{beforeunload:function(){this.element.removeAttr("autocomplete")}})},_destroy:function(){clearTimeout(this.searching),this.element.removeAttr("autocomplete"),this.menu.element.remove(),this.liveRegion.remove()},_setOption:function(t,e){this._super(t,e),"source"===t&&this._initSource(),"appendTo"===t&&this.menu.element.appendTo(this._appendTo()),"disabled"===t&&e&&this.xhr&&this.xhr.abort()},_isEventTargetInWidget:function(e){var i=this.menu.element[0];return e.target===this.element[0]||e.target===i||t.contains(i,e.target)},_closeOnClickOutside:function(t){this._isEventTargetInWidget(t)||this.close()},_appendTo:function(){var e=this.options.appendTo;return e&&(e=e.jquery||e.nodeType?t(e):this.document.find(e).eq(0)),e&&e[0]||(e=this.element.closest(".ui-front, dialog")),e.length||(e=this.document[0].body),e},_initSource:function(){var e,i,s=this;t.isArray(this.options.source)?(e=this.options.source,this.source=function(i,s){s(t.ui.autocomplete.filter(e,i.term))}):"string"==typeof this.options.source?(i=this.options.source,this.source=function(e,n){s.xhr&&s.xhr.abort(),s.xhr=t.ajax({url:i,data:e,dataType:"json",success:function(t){n(t)},error:function(){n([])}})}):this.source=this.options.source},_searchTimeout:function(t){clearTimeout(this.searching),this.searching=this._delay(function(){var e=this.term===this._value(),i=this.menu.element.is(":visible"),s=t.altKey||t.ctrlKey||t.metaKey||t.shiftKey;(!e||e&&!i&&!s)&&(this.selectedItem=null,this.search(null,t))},this.options.delay)},search:function(t,e){return t=null!=t?t:this._value(),this.term=this._value(),t.length").append(t(" ").text(i.label)).appendTo(e)},_move:function(t,e){return this.menu.element.is(":visible")?this.menu.isFirstItem()&&/^previous/.test(t)||this.menu.isLastItem()&&/^next/.test(t)?(this.isMultiLine||this._value(this.term),this.menu.blur(),void 0):(this.menu[t](e),void 0):(this.search(null,e),void 0)},widget:function(){return this.menu.element},_value:function(){return this.valueMethod.apply(this.element,arguments)},_keyEvent:function(t,e){(!this.isMultiLine||this.menu.element.is(":visible"))&&(this._move(t,e),e.preventDefault())},_isContentEditable:function(t){if(!t.length)return!1;var e=t.prop("contentEditable");return"inherit"===e?this._isContentEditable(t.parent()):"true"===e}}),t.extend(t.ui.autocomplete,{escapeRegex:function(t){return t.replace(/[\-\[\]{}()*+?.,\\\^$|#\s]/g,"\\$&")},filter:function(e,i){var s=RegExp(t.ui.autocomplete.escapeRegex(i),"i");return t.grep(e,function(t){return s.test(t.label||t.value||t)})}}),t.widget("ui.autocomplete",t.ui.autocomplete,{options:{messages:{noResults:"No search results.",results:function(t){return t+(t>1?" results are":" result is")+" available, use up and down arrow keys to navigate."}}},__response:function(e){var i;this._superApply(arguments),this.options.disabled||this.cancelSearch||(i=e&&e.length?this.options.messages.results(e.length):this.options.messages.noResults,this.liveRegion.children().hide(),t("").text(i).appendTo(this.liveRegion))}}),t.ui.autocomplete;var g=/ui-corner-([a-z]){2,6}/g;t.widget("ui.controlgroup",{version:"1.12.1",defaultElement:"",options:{direction:"horizontal",disabled:null,onlyVisible:!0,items:{button:"input[type=button], input[type=submit], input[type=reset], button, a",controlgroupLabel:".ui-controlgroup-label",checkboxradio:"input[type='checkbox'], input[type='radio']",selectmenu:"select",spinner:".ui-spinner-input"}},_create:function(){this._enhance()},_enhance:function(){this.element.attr("role","toolbar"),this.refresh()},_destroy:function(){this._callChildMethod("destroy"),this.childWidgets.removeData("ui-controlgroup-data"),this.element.removeAttr("role"),this.options.items.controlgroupLabel&&this.element.find(this.options.items.controlgroupLabel).find(".ui-controlgroup-label-contents").contents().unwrap()},_initWidgets:function(){var e=this,i=[];t.each(this.options.items,function(s,n){var o,a={};return n?"controlgroupLabel"===s?(o=e.element.find(n),o.each(function(){var e=t(this);e.children(".ui-controlgroup-label-contents").length||e.contents().wrapAll("")}),e._addClass(o,null,"ui-widget ui-widget-content ui-state-default"),i=i.concat(o.get()),void 0):(t.fn[s]&&(a=e["_"+s+"Options"]?e["_"+s+"Options"]("middle"):{classes:{}},e.element.find(n).each(function(){var n=t(this),o=n[s]("instance"),r=t.widget.extend({},a);if("button"!==s||!n.parent(".ui-spinner").length){o||(o=n[s]()[s]("instance")),o&&(r.classes=e._resolveClassesValues(r.classes,o)),n[s](r);var h=n[s]("widget");t.data(h[0],"ui-controlgroup-data",o?o:n[s]("instance")),i.push(h[0])}})),void 0):void 0}),this.childWidgets=t(t.unique(i)),this._addClass(this.childWidgets,"ui-controlgroup-item")},_callChildMethod:function(e){this.childWidgets.each(function(){var i=t(this),s=i.data("ui-controlgroup-data");s&&s[e]&&s[e]()})},_updateCornerClass:function(t,e){var i="ui-corner-top ui-corner-bottom ui-corner-left ui-corner-right ui-corner-all",s=this._buildSimpleOptions(e,"label").classes.label;this._removeClass(t,null,i),this._addClass(t,null,s)},_buildSimpleOptions:function(t,e){var i="vertical"===this.options.direction,s={classes:{}};return s.classes[e]={middle:"",first:"ui-corner-"+(i?"top":"left"),last:"ui-corner-"+(i?"bottom":"right"),only:"ui-corner-all"}[t],s},_spinnerOptions:function(t){var e=this._buildSimpleOptions(t,"ui-spinner");return e.classes["ui-spinner-up"]="",e.classes["ui-spinner-down"]="",e},_buttonOptions:function(t){return this._buildSimpleOptions(t,"ui-button")},_checkboxradioOptions:function(t){return this._buildSimpleOptions(t,"ui-checkboxradio-label")},_selectmenuOptions:function(t){var e="vertical"===this.options.direction;return{width:e?"auto":!1,classes:{middle:{"ui-selectmenu-button-open":"","ui-selectmenu-button-closed":""},first:{"ui-selectmenu-button-open":"ui-corner-"+(e?"top":"tl"),"ui-selectmenu-button-closed":"ui-corner-"+(e?"top":"left")},last:{"ui-selectmenu-button-open":e?"":"ui-corner-tr","ui-selectmenu-button-closed":"ui-corner-"+(e?"bottom":"right")},only:{"ui-selectmenu-button-open":"ui-corner-top","ui-selectmenu-button-closed":"ui-corner-all"}}[t]}},_resolveClassesValues:function(e,i){var s={};return t.each(e,function(n){var o=i.options.classes[n]||"";o=t.trim(o.replace(g,"")),s[n]=(o+" "+e[n]).replace(/\s+/g," ")}),s},_setOption:function(t,e){return"direction"===t&&this._removeClass("ui-controlgroup-"+this.options.direction),this._super(t,e),"disabled"===t?(this._callChildMethod(e?"disable":"enable"),void 0):(this.refresh(),void 0)},refresh:function(){var e,i=this;this._addClass("ui-controlgroup ui-controlgroup-"+this.options.direction),"horizontal"===this.options.direction&&this._addClass(null,"ui-helper-clearfix"),this._initWidgets(),e=this.childWidgets,this.options.onlyVisible&&(e=e.filter(":visible")),e.length&&(t.each(["first","last"],function(t,s){var n=e[s]().data("ui-controlgroup-data");if(n&&i["_"+n.widgetName+"Options"]){var o=i["_"+n.widgetName+"Options"](1===e.length?"only":s);o.classes=i._resolveClassesValues(o.classes,n),n.element[n.widgetName](o)}else i._updateCornerClass(e[s](),s)}),this._callChildMethod("refresh"))}}),t.widget("ui.checkboxradio",[t.ui.formResetMixin,{version:"1.12.1",options:{disabled:null,label:null,icon:!0,classes:{"ui-checkboxradio-label":"ui-corner-all","ui-checkboxradio-icon":"ui-corner-all"}},_getCreateOptions:function(){var e,i,s=this,n=this._super()||{};return this._readType(),i=this.element.labels(),this.label=t(i[i.length-1]),this.label.length||t.error("No label found for checkboxradio widget"),this.originalLabel="",this.label.contents().not(this.element[0]).each(function(){s.originalLabel+=3===this.nodeType?t(this).text():this.outerHTML}),this.originalLabel&&(n.label=this.originalLabel),e=this.element[0].disabled,null!=e&&(n.disabled=e),n},_create:function(){var t=this.element[0].checked;this._bindFormResetHandler(),null==this.options.disabled&&(this.options.disabled=this.element[0].disabled),this._setOption("disabled",this.options.disabled),this._addClass("ui-checkboxradio","ui-helper-hidden-accessible"),this._addClass(this.label,"ui-checkboxradio-label","ui-button ui-widget"),"radio"===this.type&&this._addClass(this.label,"ui-checkboxradio-radio-label"),this.options.label&&this.options.label!==this.originalLabel?this._updateLabel():this.originalLabel&&(this.options.label=this.originalLabel),this._enhance(),t&&(this._addClass(this.label,"ui-checkboxradio-checked","ui-state-active"),this.icon&&this._addClass(this.icon,null,"ui-state-hover")),this._on({change:"_toggleClasses",focus:function(){this._addClass(this.label,null,"ui-state-focus ui-visual-focus")},blur:function(){this._removeClass(this.label,null,"ui-state-focus ui-visual-focus")}})},_readType:function(){var e=this.element[0].nodeName.toLowerCase();this.type=this.element[0].type,"input"===e&&/radio|checkbox/.test(this.type)||t.error("Can't create checkboxradio on element.nodeName="+e+" and element.type="+this.type)},_enhance:function(){this._updateIcon(this.element[0].checked)},widget:function(){return this.label},_getRadioGroup:function(){var e,i=this.element[0].name,s="input[name='"+t.ui.escapeSelector(i)+"']";return i?(e=this.form.length?t(this.form[0].elements).filter(s):t(s).filter(function(){return 0===t(this).form().length}),e.not(this.element)):t([])},_toggleClasses:function(){var e=this.element[0].checked;this._toggleClass(this.label,"ui-checkboxradio-checked","ui-state-active",e),this.options.icon&&"checkbox"===this.type&&this._toggleClass(this.icon,null,"ui-icon-check ui-state-checked",e)._toggleClass(this.icon,null,"ui-icon-blank",!e),"radio"===this.type&&this._getRadioGroup().each(function(){var e=t(this).checkboxradio("instance");e&&e._removeClass(e.label,"ui-checkboxradio-checked","ui-state-active")})},_destroy:function(){this._unbindFormResetHandler(),this.icon&&(this.icon.remove(),this.iconSpace.remove())},_setOption:function(t,e){return"label"!==t||e?(this._super(t,e),"disabled"===t?(this._toggleClass(this.label,null,"ui-state-disabled",e),this.element[0].disabled=e,void 0):(this.refresh(),void 0)):void 0},_updateIcon:function(e){var i="ui-icon ui-icon-background ";this.options.icon?(this.icon||(this.icon=t(""),this.iconSpace=t(" "),this._addClass(this.iconSpace,"ui-checkboxradio-icon-space")),"checkbox"===this.type?(i+=e?"ui-icon-check ui-state-checked":"ui-icon-blank",this._removeClass(this.icon,null,e?"ui-icon-blank":"ui-icon-check")):i+="ui-icon-blank",this._addClass(this.icon,"ui-checkboxradio-icon",i),e||this._removeClass(this.icon,null,"ui-icon-check ui-state-checked"),this.icon.prependTo(this.label).after(this.iconSpace)):void 0!==this.icon&&(this.icon.remove(),this.iconSpace.remove(),delete this.icon)},_updateLabel:function(){var t=this.label.contents().not(this.element[0]);this.icon&&(t=t.not(this.icon[0])),this.iconSpace&&(t=t.not(this.iconSpace[0])),t.remove(),this.label.append(this.options.label)},refresh:function(){var t=this.element[0].checked,e=this.element[0].disabled;this._updateIcon(t),this._toggleClass(this.label,"ui-checkboxradio-checked","ui-state-active",t),null!==this.options.label&&this._updateLabel(),e!==this.options.disabled&&this._setOptions({disabled:e})}}]),t.ui.checkboxradio,t.widget("ui.button",{version:"1.12.1",defaultElement:"