Leonard Bruns commited on
Commit
d323598
·
0 Parent(s):

Add Vista example

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
.gitattributes ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ * text=auto eol=lf
2
+ Cargo.lock linguist-generated=false
.github/pull_request_template.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--
2
+ * Keep your PR:s small and focused.
3
+ * The PR title is what ends up in the changelog, so make it descriptive!
4
+ * If applicable, add a screenshot or gif.
5
+ * Do NOT open PR:s from your `main` branch, as that makes it hard for maintainers to test and add commits to your PR.
6
+ * Remember to run `cargo fmt` and `cargo clippy`.
7
+ * Open the PR as a draft until you have self-reviewed it and it passes CI.
8
+ * When you have addressed a PR comment, mark it as resolved.
9
+
10
+ Please be patient!
11
+ -->
12
+
13
+ * Closes #ISSUE_NUMBER
.github/workflows/labels.yml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copied from https://github.com/rerun-io/rerun_template
2
+
3
+ # https://github.com/marketplace/actions/require-labels
4
+ # Check for existence of labels
5
+ # See all our labels at https://github.com/rerun-io/rerun/issues/labels
6
+
7
+ name: PR Labels
8
+
9
+ on:
10
+ pull_request:
11
+ types:
12
+ - opened
13
+ - synchronize
14
+ - reopened
15
+ - labeled
16
+ - unlabeled
17
+
18
+ jobs:
19
+ label:
20
+ runs-on: ubuntu-latest
21
+ steps:
22
+ - name: Check for a "do-not-merge" label
23
+ uses: mheap/github-action-required-labels@v3
24
+ with:
25
+ mode: exactly
26
+ count: 0
27
+ labels: "do-not-merge"
.github/workflows/links.yml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copied from https://github.com/rerun-io/rerun_template
2
+ on: [push, pull_request]
3
+
4
+ name: Link checker
5
+
6
+ jobs:
7
+ link-checker:
8
+ name: Check links
9
+ runs-on: ubuntu-latest
10
+ steps:
11
+ - uses: actions/checkout@v4
12
+
13
+ - name: Restore link checker cache
14
+ uses: actions/cache@v3
15
+ with:
16
+ path: .lycheecache
17
+ key: cache-lychee-${{ github.sha }}
18
+ restore-keys: cache-lychee-
19
+
20
+ # Check https://github.com/lycheeverse/lychee on how to run locally.
21
+ - name: Link Checker
22
+ id: lychee
23
+ uses: lycheeverse/lychee-action@v1.9.0
24
+ with:
25
+ fail: true
26
+ lycheeVersion: "0.14.3"
27
+ # When given a directory, lychee checks only markdown, html and text files, everything else we have to glob in manually.
28
+ args: |
29
+ --base . --cache --max-cache-age 1d . "**/*.rs" "**/*.toml" "**/*.hpp" "**/*.cpp" "**/CMakeLists.txt" "**/*.py" "**/*.yml"
.github/workflows/python.yml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copied from https://github.com/rerun-io/rerun_template
2
+ # Disabled since this contains a lot of non-conforming code from the original repository
3
+ on: []
4
+
5
+ name: C++
6
+
7
+ jobs:
8
+ python-check:
9
+ name: Python
10
+ runs-on: ubuntu-latest
11
+ steps:
12
+ - uses: actions/checkout@v4
13
+
14
+ - uses: prefix-dev/setup-pixi@v0.5.2
15
+ with:
16
+ pixi-version: v0.19.0
17
+ cache: true
18
+
19
+ - run: pixi run py-fmt-check
20
+
21
+ - run: pixi run py-lint
.github/workflows/typos.yml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copied from https://github.com/rerun-io/rerun_template
2
+
3
+ # https://github.com/crate-ci/typos
4
+ # Add exceptions to `.typos.toml`
5
+ # install and run locally: cargo install typos-cli && typos
6
+
7
+ name: Spell Check
8
+ on: [pull_request]
9
+
10
+ jobs:
11
+ run:
12
+ name: Spell Check
13
+ runs-on: ubuntu-latest
14
+ steps:
15
+ - name: Checkout Actions Repository
16
+ uses: actions/checkout@v4
17
+
18
+ - name: Check spelling of entire workspace
19
+ uses: crate-ci/typos@master
.gitignore ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Mac stuff:
2
+ .DS_Store
3
+
4
+ # C++ build directory
5
+ build
6
+
7
+ # Rust compile target directories:
8
+ target
9
+ target_ra
10
+ target_wasm
11
+
12
+ # https://github.com/lycheeverse/lychee
13
+ .lycheecache
14
+
15
+ # Pixi environment
16
+ .pixi
17
+
18
+ # Python stuff:
19
+ __pycache__
20
+ .mypy_cache
21
+ .ruff_cache
22
+ venv
23
+ .python-version
.mypy.ini ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [mypy]
2
+ files = .
3
+ exclude = build
4
+ namespace_packages = True
5
+ show_error_codes = True
6
+ strict = True
7
+ enable_error_code = redundant-expr, truthy-bool, ignore-without-code
8
+ ; plugins = numpy.typing.mypy_plugin
9
+ ignore_missing_imports = True
10
+ no_implicit_reexport = False
11
+ disallow_untyped_calls = False
.typos.toml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # https://github.com/crate-ci/typos
2
+ # install: cargo install typos-cli
3
+ # run: typos
4
+
5
+ [default.extend-words]
6
+ teh = "teh" # part of @teh-cmc
.vscode/extensions.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ // See https://go.microsoft.com/fwlink/?LinkId=827846
3
+ // for the documentation about the extensions.json format
4
+ "recommendations": [
5
+ "charliermarsh.ruff",
6
+ "gaborv.flatbuffers",
7
+ "github.vscode-github-actions",
8
+ "josetr.cmake-language-support-vscode",
9
+ "ms-python.mypy-type-checker",
10
+ "ms-python.python",
11
+ "ms-vscode.cmake-tools",
12
+ "ms-vscode.cpptools-extension-pack",
13
+ "ms-vsliveshare.vsliveshare",
14
+ "polymeilex.wgsl",
15
+ "rust-lang.rust-analyzer",
16
+ "serayuzgur.crates",
17
+ "streetsidesoftware.code-spell-checker",
18
+ "tamasfe.even-better-toml",
19
+ "vadimcn.vscode-lldb",
20
+ "wayou.vscode-todo-highlight",
21
+ "webfreak.debug",
22
+ "xaver.clang-format", // C++ formatter
23
+ "zxh404.vscode-proto3",
24
+ "esbenp.prettier-vscode"
25
+ ]
26
+ }
.vscode/launch.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ // Use IntelliSense to learn about possible attributes.
3
+ // Hover to view descriptions of existing attributes.
4
+ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5
+ "version": "0.2.0",
6
+ "configurations": [
7
+ // Python
8
+ {
9
+ "name": "Python Debugger: Current File",
10
+ "type": "debugpy",
11
+ "request": "launch",
12
+ "program": "${file}",
13
+ "console": "integratedTerminal"
14
+ },
15
+ // Rust:
16
+ {
17
+ "name": "Debug 'PROJ_NAME'",
18
+ "type": "lldb",
19
+ "request": "launch",
20
+ "cargo": {
21
+ "args": [
22
+ "build"
23
+ ],
24
+ "filter": {
25
+ "name": "PROJ_NAME",
26
+ "kind": "bin"
27
+ }
28
+ },
29
+ "args": [],
30
+ "cwd": "${workspaceFolder}",
31
+ "env": {
32
+ "RUST_LOG": "debug"
33
+ }
34
+ },
35
+ {
36
+ "name": "Launch Rust tests",
37
+ "type": "lldb",
38
+ "request": "launch",
39
+ "cargo": {
40
+ "args": [
41
+ "test",
42
+ "--no-run",
43
+ "--lib",
44
+ "--all-features"
45
+ ],
46
+ "filter": {
47
+ "kind": "lib"
48
+ }
49
+ },
50
+ "cwd": "${workspaceFolder}",
51
+ "env": {
52
+ "RUST_LOG": "debug"
53
+ }
54
+ },
55
+ ]
56
+ }
.vscode/settings.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "editor.formatOnSave": true,
3
+ "editor.semanticTokenColorCustomizations": {
4
+ "rules": {
5
+ "*.unsafe:rust": "#eb5046"
6
+ }
7
+ },
8
+ "files.autoGuessEncoding": true,
9
+ "files.insertFinalNewline": true,
10
+ "files.trimTrailingWhitespace": true,
11
+ // don't share a cargo lock with rust-analyzer.
12
+ // see https://github.com/rerun-io/rerun/pull/519 for rationale
13
+ "rust-analyzer.check.overrideCommand": [
14
+ "cargo",
15
+ "clippy",
16
+ "--target-dir=target_ra",
17
+ "--workspace",
18
+ "--message-format=json",
19
+ "--all-targets",
20
+ "--all-features"
21
+ ],
22
+ "rust-analyzer.cargo.buildScripts.overrideCommand": [
23
+ "cargo",
24
+ "check",
25
+ "--quiet",
26
+ "--target-dir=target_ra",
27
+ "--workspace",
28
+ "--message-format=json",
29
+ "--all-targets",
30
+ "--all-features",
31
+ ],
32
+ // Our build scripts are generating code.
33
+ // Having Rust Analyzer do this while doing other builds can lead to catastrophic failures.
34
+ // INCLUDING attempts to publish a new release!
35
+ "rust-analyzer.cargo.buildScripts.enable": false,
36
+ "C_Cpp.default.configurationProvider": "ms-vscode.cmake-tools", // Use cmake-tools to grab configs.
37
+ "C_Cpp.autoAddFileAssociations": false,
38
+ "cmake.buildDirectory": "${workspaceRoot}/build/debug",
39
+ "cmake.generator": "Ninja", // Use Ninja, just like we do in our just/pixi command.
40
+ "rust-analyzer.showUnlinkedFileNotification": false,
41
+ "ruff.format.args": [
42
+ "--config=pyproject.toml"
43
+ ],
44
+ "ruff.lint.args": [
45
+ "--config=pyproject.toml"
46
+ ],
47
+ "prettier.requireConfig": true,
48
+ "prettier.configPath": ".prettierrc.toml",
49
+ "[python]": {
50
+ "editor.defaultFormatter": "charliermarsh.ruff"
51
+ },
52
+ }
CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributor Covenant Code of Conduct
2
+
3
+ ## Our Pledge
4
+
5
+ We as members, contributors, and leaders pledge to make participation in our
6
+ community a harassment-free experience for everyone, regardless of age, body
7
+ size, visible or invisible disability, ethnicity, sex characteristics, gender
8
+ identity and expression, level of experience, education, socio-economic status,
9
+ nationality, personal appearance, race, caste, color, religion, or sexual
10
+ identity and orientation.
11
+
12
+ We pledge to act and interact in ways that contribute to an open, welcoming,
13
+ diverse, inclusive, and healthy community.
14
+
15
+ ## Our Standards
16
+
17
+ Examples of behavior that contributes to a positive environment for our
18
+ community include:
19
+
20
+ * Demonstrating empathy and kindness toward other people
21
+ * Being respectful of differing opinions, viewpoints, and experiences
22
+ * Giving and gracefully accepting constructive feedback
23
+ * Accepting responsibility and apologizing to those affected by our mistakes,
24
+ and learning from the experience
25
+ * Focusing on what is best not just for us as individuals, but for the overall
26
+ community
27
+
28
+ Examples of unacceptable behavior include:
29
+
30
+ * The use of sexualized language or imagery, and sexual attention or advances of
31
+ any kind
32
+ * Trolling, insulting or derogatory comments, and personal or political attacks
33
+ * Public or private harassment
34
+ * Publishing others' private information, such as a physical or email address,
35
+ without their explicit permission
36
+ * Other conduct which could reasonably be considered inappropriate in a
37
+ professional setting
38
+
39
+ ## Enforcement Responsibilities
40
+
41
+ Community leaders are responsible for clarifying and enforcing our standards of
42
+ acceptable behavior and will take appropriate and fair corrective action in
43
+ response to any behavior that they deem inappropriate, threatening, offensive,
44
+ or harmful.
45
+
46
+ Community leaders have the right and responsibility to remove, edit, or reject
47
+ comments, commits, code, wiki edits, issues, and other contributions that are
48
+ not aligned to this Code of Conduct, and will communicate reasons for moderation
49
+ decisions when appropriate.
50
+
51
+ ## Scope
52
+
53
+ This Code of Conduct applies within all community spaces, and also applies when
54
+ an individual is officially representing the community in public spaces.
55
+ Examples of representing our community include using an official e-mail address,
56
+ posting via an official social media account, or acting as an appointed
57
+ representative at an online or offline event.
58
+
59
+ ## Enforcement
60
+
61
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be
62
+ reported to the community leaders responsible for enforcement at
63
+ opensource@rerun.io.
64
+ All complaints will be reviewed and investigated promptly and fairly.
65
+
66
+ All community leaders are obligated to respect the privacy and security of the
67
+ reporter of any incident.
68
+
69
+ ## Enforcement Guidelines
70
+
71
+ Community leaders will follow these Community Impact Guidelines in determining
72
+ the consequences for any action they deem in violation of this Code of Conduct:
73
+
74
+ ### 1. Correction
75
+
76
+ **Community Impact**: Use of inappropriate language or other behavior deemed
77
+ unprofessional or unwelcome in the community.
78
+
79
+ **Consequence**: A private, written warning from community leaders, providing
80
+ clarity around the nature of the violation and an explanation of why the
81
+ behavior was inappropriate. A public apology may be requested.
82
+
83
+ ### 2. Warning
84
+
85
+ **Community Impact**: A violation through a single incident or series of
86
+ actions.
87
+
88
+ **Consequence**: A warning with consequences for continued behavior. No
89
+ interaction with the people involved, including unsolicited interaction with
90
+ those enforcing the Code of Conduct, for a specified period of time. This
91
+ includes avoiding interactions in community spaces as well as external channels
92
+ like social media. Violating these terms may lead to a temporary or permanent
93
+ ban.
94
+
95
+ ### 3. Temporary Ban
96
+
97
+ **Community Impact**: A serious violation of community standards, including
98
+ sustained inappropriate behavior.
99
+
100
+ **Consequence**: A temporary ban from any sort of interaction or public
101
+ communication with the community for a specified period of time. No public or
102
+ private interaction with the people involved, including unsolicited interaction
103
+ with those enforcing the Code of Conduct, is allowed during this period.
104
+ Violating these terms may lead to a permanent ban.
105
+
106
+ ### 4. Permanent Ban
107
+
108
+ **Community Impact**: Demonstrating a pattern of violation of community
109
+ standards, including sustained inappropriate behavior, harassment of an
110
+ individual, or aggression toward or disparagement of classes of individuals.
111
+
112
+ **Consequence**: A permanent ban from any sort of public interaction within the
113
+ community.
114
+
115
+ ## Attribution
116
+
117
+ This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118
+ version 2.1, available at
119
+ [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
120
+
121
+ Community Impact Guidelines were inspired by
122
+ [Mozilla's code of conduct enforcement ladder][Mozilla CoC].
123
+
124
+ For answers to common questions about this code of conduct, see the FAQ at
125
+ [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
126
+ [https://www.contributor-covenant.org/translations][translations].
127
+
128
+ [homepage]: https://www.contributor-covenant.org
129
+ [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
130
+ [Mozilla CoC]: https://github.com/mozilla/diversity
131
+ [FAQ]: https://www.contributor-covenant.org/faq
132
+ [translations]: https://www.contributor-covenant.org/translations
LICENSE-APACHE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
LICENSE-MIT ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2024 Rerun Technologies AB <opensource@rerun.io>
2
+
3
+ Permission is hereby granted, free of charge, to any
4
+ person obtaining a copy of this software and associated
5
+ documentation files (the "Software"), to deal in the
6
+ Software without restriction, including without
7
+ limitation the rights to use, copy, modify, merge,
8
+ publish, distribute, sublicense, and/or sell copies of
9
+ the Software, and to permit persons to whom the Software
10
+ is furnished to do so, subject to the following
11
+ conditions:
12
+
13
+ The above copyright notice and this permission notice
14
+ shall be included in all copies or substantial portions
15
+ of the Software.
16
+
17
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
18
+ ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
19
+ TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
20
+ PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
21
+ SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
22
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
24
+ IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25
+ DEALINGS IN THE SOFTWARE.
README.md ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Vista
3
+ emoji: 🚗
4
+ colorFrom: blue
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 4.36.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ ---
12
+
13
+ # Vista: A Generalizable Driving World Model with High Fidelity and Versatile Controllability
14
+
15
+ https://github.com/rerun-io/hf-example-vista/assets/9785832/0b9a01ca-90a2-4b36-98fc-a7a7b378fd54
16
+
17
+ [Shenyuan Gao](https://github.com/Little-Podi), [Jiazhi Yang](https://scholar.google.com/citations?user=Ju7nGX8AAAAJ&hl=en), [Li Chen](https://scholar.google.com/citations?user=ulZxvY0AAAAJ&hl=en), [Kashyap Chitta](https://kashyap7x.github.io/), [Yihang Qiu](https://scholar.google.com/citations?user=qgRUOdIAAAAJ&hl=en), [Andreas Geiger](https://www.cvlibs.net/), [Jun Zhang](https://eejzhang.people.ust.hk/), [Hongyang Li](https://lihongyang.info/)
18
+
19
+ This is a demo of the [Vista model](https://github.com/OpenDriveLab/Vista), a driving world model that can be used to simulate a variety of driving scenarios. This demo uses [Rerun](https://rerun.io/)'s custom [gradio component](https://www.gradio.app/custom-components/gallery?id=radames%2Fgradio_rerun) to livestream the model's output and show intermediate results.
20
+
21
+ [📜technical report](https://arxiv.org/abs/2405.17398), [🎬video demos](https://vista-demo.github.io/), [🤗model weights](https://huggingface.co/OpenDriveLab/Vista)
22
+
23
+ Please refer to the [original repository](https://github.com/OpenDriveLab/Vista) for the original code base and README.
24
+
25
+ You can try the example on Rerun's HuggingFace space [here](https://huggingface.co/spaces/rerun/Vista).
26
+
27
+ ## Run the example locally
28
+ To run this example locally use the following command (you need a GPU with at least 20GB of memory, tested with an RTX 4090):
29
+ ```bash
30
+ pixi run example
31
+ ```
32
+
33
+ You can specify the first image, the number of generated segments, and the number of diffusion steps per segment:
34
+ ```bash
35
+ pixi run example --img-path "example_images/streetview.png" --num-segments 10 --num-steps 100
36
+ ```
37
+
38
+ To see other all options, use the following:
39
+ ```bash
40
+ pixi run example --help
41
+ ```
app.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Gradio interface for Vista model."""
2
+ from __future__ import annotations
3
+
4
+ import glob
5
+ import os
6
+ import queue
7
+ import threading
8
+
9
+ import gradio as gr
10
+ import gradio_rerun
11
+ import rerun as rr
12
+ import spaces
13
+
14
+ import vista
15
+
16
+
17
+ @spaces.GPU(duration=400)
18
+ @rr.thread_local_stream("Vista")
19
+ def generate_gradio(
20
+ first_frame_file_name: str,
21
+ n_rounds: float=3,
22
+ n_steps: float=10,
23
+ height=576,
24
+ width=1024,
25
+ n_frames=25,
26
+ cfg_scale=2.5,
27
+ cond_aug=0.0,
28
+ ):
29
+ global model
30
+
31
+ n_rounds = int(n_rounds)
32
+ n_steps = int(n_steps)
33
+
34
+ # Use a queue to log immediately from internals
35
+ log_queue = queue.SimpleQueue()
36
+
37
+ stream = rr.binary_stream()
38
+
39
+ blueprint = vista.generate_blueprint(n_rounds)
40
+ rr.send_blueprint(blueprint)
41
+ yield stream.read()
42
+
43
+ handle = threading.Thread(
44
+ target=vista.run_sampling,
45
+ args=[
46
+ log_queue,
47
+ first_frame_file_name,
48
+ height,
49
+ width,
50
+ n_rounds,
51
+ n_frames,
52
+ n_steps,
53
+ cfg_scale,
54
+ cond_aug,
55
+ model,
56
+ ],
57
+ )
58
+ handle.start()
59
+ while True:
60
+ msg = log_queue.get()
61
+ if msg == "done":
62
+ break
63
+ else:
64
+ entity_path, entity, times = msg
65
+ rr.reset_time()
66
+ for timeline, time in times:
67
+ if isinstance(time, int):
68
+ rr.set_time_sequence(timeline, time)
69
+ else:
70
+ rr.set_time_seconds(timeline, time)
71
+ rr.log(entity_path, entity)
72
+ yield stream.read()
73
+ handle.join()
74
+
75
+
76
+ model = vista.create_model()
77
+
78
+ with gr.Blocks(css="style.css") as demo:
79
+ gr.Markdown(
80
+ """
81
+ # Vista: A Generalizable Driving World Model with High Fidelity and Versatile Controllability
82
+
83
+ [Shenyuan Gao](https://github.com/Little-Podi), [Jiazhi Yang](https://scholar.google.com/citations?user=Ju7nGX8AAAAJ&hl=en), [Li Chen](https://scholar.google.com/citations?user=ulZxvY0AAAAJ&hl=en), [Kashyap Chitta](https://kashyap7x.github.io/), [Yihang Qiu](https://scholar.google.com/citations?user=qgRUOdIAAAAJ&hl=en), [Andreas Geiger](https://www.cvlibs.net/), [Jun Zhang](https://eejzhang.people.ust.hk/), [Hongyang Li](https://lihongyang.info/)
84
+
85
+ This is a demo of the [Vista model](https://github.com/OpenDriveLab/Vista), a driving world model that can be used to simulate a variety of driving scenarios. This demo uses [Rerun](https://rerun.io/)'s custom [gradio component](https://www.gradio.app/custom-components/gallery?id=radames%2Fgradio_rerun) to livestream the model's output and show intermediate results.
86
+
87
+ [📜technical report](https://arxiv.org/abs/2405.17398), [🎬video demos](https://vista-demo.github.io/), [🤗model weights](https://huggingface.co/OpenDriveLab/Vista)
88
+
89
+ Note that the GPU time is limited to 400 seconds per run. If you need more time, you can run the model locally or on your own server.
90
+ """
91
+ )
92
+ first_frame = gr.Image(sources="upload", type="filepath")
93
+ example_dir_path = os.path.join(os.path.dirname(__file__), "example_images")
94
+ example_file_paths = sorted(glob.glob(os.path.join(example_dir_path, "*.*")))
95
+ example_gallery = gr.Examples(
96
+ examples=example_file_paths,
97
+ inputs=first_frame,
98
+ cache_examples=False,
99
+ )
100
+
101
+ btn = gr.Button("Generate video")
102
+ num_rounds = gr.Slider(
103
+ label="Segments",
104
+ info="Number of 25 frame segments to generate. Higher values lead to longer videos. Try to keep the product of segments and steps below 30 to avoid running out of time.",
105
+ minimum=1,
106
+ maximum=5,
107
+ value=2,
108
+ step=1
109
+ )
110
+ num_steps = gr.Slider(
111
+ label="Diffusion Steps",
112
+ info="Number of diffusion steps per segment. Higher values lead to more detailed videos. Try to keep the product of segments and steps below 30 to avoid running out of time.",
113
+ minimum=1,
114
+ maximum=50,
115
+ value=15,
116
+ step=1
117
+ )
118
+
119
+ with gr.Row():
120
+ viewer = gradio_rerun.Rerun(streaming=True)
121
+ btn.click(
122
+ generate_gradio,
123
+ inputs=[first_frame, num_rounds, num_steps],
124
+ outputs=[viewer],
125
+ )
126
+
127
+ demo.launch()
example_images/nus-0.jpg ADDED
example_images/nus-1.jpg ADDED
example_images/nus-2.jpg ADDED
example_images/nus-3.jpg ADDED
example_images/nus-4.jpg ADDED
example_images/streetview.jpg ADDED
lychee.toml ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copied from https://github.com/rerun-io/rerun_template
2
+
3
+ ################################################################################
4
+ # Config for the link checker lychee.
5
+ #
6
+ # Download & learn more at:
7
+ # https://github.com/lycheeverse/lychee
8
+ #
9
+ # Example config:
10
+ # https://github.com/lycheeverse/lychee/blob/master/lychee.example.toml
11
+ #
12
+ # Run `lychee . --dump` to list all found links that are being checked.
13
+ #
14
+ # Note that by default lychee will only check markdown and html files,
15
+ # to check any other files you have to point to them explicitly, e.g.:
16
+ # `lychee **/*.rs`
17
+ # To make things worse, `exclude_path` is ignored for these globs,
18
+ # so local runs with lots of gitignored files will be slow.
19
+ # (https://github.com/lycheeverse/lychee/issues/1405)
20
+ #
21
+ # This unfortunately doesn't list anything for non-glob checks.
22
+ ################################################################################
23
+
24
+ # Maximum number of concurrent link checks.
25
+ # Workaround for "too many open files" error on MacOS, see https://github.com/lycheeverse/lychee/issues/1248
26
+ max_concurrency = 32
27
+
28
+ # Check links inside `<code>` and `<pre>` blocks as well as Markdown code blocks.
29
+ include_verbatim = true
30
+
31
+ # Proceed for server connections considered insecure (invalid TLS).
32
+ insecure = true
33
+
34
+ # Exclude these filesystem paths from getting checked.
35
+ exclude_path = [
36
+ # Unfortunately lychee doesn't yet read .gitignore https://github.com/lycheeverse/lychee/issues/1331
37
+ # The following entries are there because of that:
38
+ ".git",
39
+ "__pycache__",
40
+ "_deps/",
41
+ ".pixi",
42
+ "build",
43
+ "target_ra",
44
+ "target_wasm",
45
+ "target",
46
+ "venv",
47
+ ]
48
+
49
+ # Exclude URLs and mail addresses from checking (supports regex).
50
+ exclude = [
51
+ # Skip speculative links
52
+ '.*?speculative-link',
53
+
54
+ # Strings with replacements.
55
+ '/__VIEWER_VERSION__/', # Replacement variable __VIEWER_VERSION__.
56
+ '/\$', # Replacement variable $.
57
+ '/GIT_HASH/', # Replacement variable GIT_HASH.
58
+ '\{\}', # Ignore links with string interpolation.
59
+ '\$relpath\^', # Relative paths as used by rerun_cpp's doc header.
60
+ '%7B.+%7D', # Ignore strings that look like ready to use links but contain a replacement strings. The URL escaping is for '{.+}' (this seems to be needed for html embedded urls since lychee assumes they use this encoding).
61
+ '%7B%7D', # Ignore links with string interpolation, escaped variant.
62
+
63
+ # Local links that require further setup.
64
+ 'http://127.0.0.1',
65
+ 'http://localhost',
66
+ 'recording:/', # rrd recording link.
67
+ 'ws:/',
68
+ 're_viewer.js', # Build artifact that html is linking to.
69
+
70
+ # Api endpoints.
71
+ 'https://fonts.googleapis.com/', # Font API entrypoint, not a link.
72
+ 'https://fonts.gstatic.com/', # Font API entrypoint, not a link.
73
+ 'https://tel.rerun.io/', # Analytics endpoint.
74
+
75
+ # Avoid rate limiting.
76
+ 'https://crates.io/crates/.*', # Avoid crates.io rate-limiting
77
+ 'https://github.com/rerun-io/rerun/commit/\.*', # Ignore links to our own commits (typically in changelog).
78
+ 'https://github.com/rerun-io/rerun/pull/\.*', # Ignore links to our own pull requests (typically in changelog).
79
+
80
+ # Used in rerun_template repo until the user search-replaces `new_repo_name`
81
+ 'https://github.com/rerun-io/new_repo_name',
82
+ ]
main.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Command line interface for generating videos from the model."""
2
+ from __future__ import annotations
3
+
4
+ import argparse
5
+ import queue
6
+ import threading
7
+
8
+ import rerun as rr
9
+
10
+ import vista
11
+
12
+
13
+ def generate_local(
14
+ first_frame_file_name: str,
15
+ height=576,
16
+ width=1024,
17
+ n_rounds=4,
18
+ n_frames=25,
19
+ n_steps=10,
20
+ cfg_scale=2.5,
21
+ cond_aug=0.0,
22
+ ):
23
+ # Use a queue to log immediately from internals
24
+ log_queue = queue.SimpleQueue()
25
+
26
+ handle = threading.Thread(
27
+ target=vista.run_sampling,
28
+ args=[
29
+ log_queue,
30
+ first_frame_file_name,
31
+ height,
32
+ width,
33
+ n_rounds,
34
+ n_frames,
35
+ n_steps,
36
+ cfg_scale,
37
+ cond_aug,
38
+ ],
39
+ )
40
+ handle.start()
41
+ while True:
42
+ msg = log_queue.get()
43
+ if msg == "done":
44
+ break
45
+ else:
46
+ entity_path, entity, times = msg
47
+ rr.reset_time()
48
+ for timeline, time in times:
49
+ if isinstance(time, int):
50
+ rr.set_time_sequence(timeline, time)
51
+ else:
52
+ rr.set_time_seconds(timeline, time)
53
+ rr.log(entity_path, entity)
54
+ handle.join()
55
+
56
+
57
+ if __name__ == "__main__":
58
+ parser = argparse.ArgumentParser(
59
+ description="Generate video conditioned on a single image using the Vista model."
60
+ )
61
+ parser.add_argument(
62
+ "--img-path",
63
+ type=str,
64
+ help="Path to image used as input for Canny edge detector.",
65
+ default="./example_images/nus-0.jpg",
66
+ )
67
+ parser.add_argument(
68
+ "--num-steps",
69
+ type=int,
70
+ help="Number of diffusion steps per image. Recommended range: 10-50. Higher values result in more detailed images and less blurry results.",
71
+ default=20,
72
+ )
73
+ parser.add_argument(
74
+ "--num-segments",
75
+ type=int,
76
+ help="Number of segments to generate. Each segment consists of 25 frames.",
77
+ default=3,
78
+ )
79
+ rr.script_add_args(parser)
80
+ args = parser.parse_args()
81
+ rr.script_setup(
82
+ args,
83
+ "rerun_example_vista",
84
+ default_blueprint=vista.generate_blueprint(args.num_segments),
85
+ )
86
+
87
+ generate_local(args.img_path, n_steps=args.num_steps, n_rounds=args.num_segments)
pixi.lock ADDED
@@ -0,0 +1,2130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: 5
2
+ environments:
3
+ default:
4
+ channels:
5
+ - url: https://conda.anaconda.org/conda-forge/
6
+ packages:
7
+ linux-64:
8
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/_libgcc_mutex-0.1-conda_forge.tar.bz2
9
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/_openmp_mutex-4.5-2_gnu.tar.bz2
10
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.1.0-py310hc6cd4ac_1.conda
11
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hd590300_5.conda
12
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/ca-certificates-2024.2.2-hbcca054_0.conda
13
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.40-h41732ed_0.conda
14
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.2-h7f98852_5.tar.bz2
15
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-13.2.0-h807b86a_5.conda
16
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-13.2.0-h807b86a_5.conda
17
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/libnsl-2.0.1-hd590300_0.conda
18
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.45.2-h2797004_0.conda
19
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-13.2.0-h7e041cc_5.conda
20
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.38.1-h0b41bf4_0.conda
21
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcrypt-4.4.36-hd590300_1.conda
22
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.2.13-hd590300_5.conda
23
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/mypy-1.8.0-py310h2372a71_0.conda
24
+ - conda: https://conda.anaconda.org/conda-forge/noarch/mypy_extensions-1.0.0-pyha770c72_0.conda
25
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.4.20240210-h59595ed_0.conda
26
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.2.1-hd590300_1.conda
27
+ - conda: https://conda.anaconda.org/conda-forge/noarch/pip-24.0-pyhd8ed1ab_0.conda
28
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/psutil-6.0.0-py310hc51659f_0.conda
29
+ - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha2e5f31_6.tar.bz2
30
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.10.14-hd12c33a_0_cpython.conda
31
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/python_abi-3.10-4_cp310.conda
32
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.2-h8228510_1.conda
33
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/ruff-0.3.7-py310h3d77a66_0.conda
34
+ - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-70.1.1-pyhd8ed1ab_0.conda
35
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h4845f30_101.conda
36
+ - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.0.1-pyhd8ed1ab_0.tar.bz2
37
+ - conda: https://conda.anaconda.org/conda-forge/noarch/types-requests-2.31.0.20240406-pyhd8ed1ab_0.conda
38
+ - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.11.0-pyha770c72_0.conda
39
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/typos-1.20.9-he8a937b_0.conda
40
+ - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda
41
+ - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.2.1-pyhd8ed1ab_0.conda
42
+ - conda: https://conda.anaconda.org/conda-forge/noarch/wheel-0.43.0-pyhd8ed1ab_1.conda
43
+ - conda: https://conda.anaconda.org/conda-forge/linux-64/xz-5.2.6-h166bdaf_0.tar.bz2
44
+ linux-aarch64:
45
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/_openmp_mutex-4.5-2_gnu.tar.bz2
46
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/brotli-python-1.1.0-py310hbb3657e_1.conda
47
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/bzip2-1.0.8-h31becfc_5.conda
48
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/ca-certificates-2024.2.2-hcefe29a_0.conda
49
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/ld_impl_linux-aarch64-2.40-h2d8c526_0.conda
50
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/libffi-3.4.2-h3557bc0_5.tar.bz2
51
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/libgcc-ng-13.2.0-hf8544c7_5.conda
52
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/libgomp-13.2.0-hf8544c7_5.conda
53
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/libnsl-2.0.1-h31becfc_0.conda
54
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/libsqlite-3.45.2-h194ca79_0.conda
55
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/libstdcxx-ng-13.2.0-h9a76618_5.conda
56
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/libuuid-2.38.1-hb4cce97_0.conda
57
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/libxcrypt-4.4.36-h31becfc_1.conda
58
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/libzlib-1.2.13-h31becfc_5.conda
59
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/mypy-1.8.0-py310hb299538_0.conda
60
+ - conda: https://conda.anaconda.org/conda-forge/noarch/mypy_extensions-1.0.0-pyha770c72_0.conda
61
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/ncurses-6.4.20240210-h0425590_0.conda
62
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/openssl-3.2.1-h31becfc_1.conda
63
+ - conda: https://conda.anaconda.org/conda-forge/noarch/pip-24.0-pyhd8ed1ab_0.conda
64
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/psutil-6.0.0-py310hb52b2da_0.conda
65
+ - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha2e5f31_6.tar.bz2
66
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/python-3.10.14-hbbe8eec_0_cpython.conda
67
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/python_abi-3.10-4_cp310.conda
68
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/readline-8.2-h8fc344f_1.conda
69
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/ruff-0.3.7-py310hf6424b7_0.conda
70
+ - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-70.1.1-pyhd8ed1ab_0.conda
71
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/tk-8.6.13-h194ca79_0.conda
72
+ - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.0.1-pyhd8ed1ab_0.tar.bz2
73
+ - conda: https://conda.anaconda.org/conda-forge/noarch/types-requests-2.31.0.20240406-pyhd8ed1ab_0.conda
74
+ - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.11.0-pyha770c72_0.conda
75
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/typos-1.20.9-h1d8f897_0.conda
76
+ - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda
77
+ - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.2.1-pyhd8ed1ab_0.conda
78
+ - conda: https://conda.anaconda.org/conda-forge/noarch/wheel-0.43.0-pyhd8ed1ab_1.conda
79
+ - conda: https://conda.anaconda.org/conda-forge/linux-aarch64/xz-5.2.6-h9cdd2b7_0.tar.bz2
80
+ osx-64:
81
+ - conda: https://conda.anaconda.org/conda-forge/osx-64/brotli-python-1.1.0-py310h9e9d8ca_1.conda
82
+ - conda: https://conda.anaconda.org/conda-forge/osx-64/bzip2-1.0.8-h10d778d_5.conda
83
+ - conda: https://conda.anaconda.org/conda-forge/osx-64/ca-certificates-2024.2.2-h8857fd0_0.conda
84
+ - conda: https://conda.anaconda.org/conda-forge/osx-64/libcxx-16.0.6-hd57cbcb_0.conda
85
+ - conda: https://conda.anaconda.org/conda-forge/osx-64/libffi-3.4.2-h0d85af4_5.tar.bz2
86
+ - conda: https://conda.anaconda.org/conda-forge/osx-64/libsqlite-3.45.2-h92b6c6a_0.conda
87
+ - conda: https://conda.anaconda.org/conda-forge/osx-64/libzlib-1.2.13-h8a1eda9_5.conda
88
+ - conda: https://conda.anaconda.org/conda-forge/osx-64/mypy-1.8.0-py310hb372a2b_0.conda
89
+ - conda: https://conda.anaconda.org/conda-forge/noarch/mypy_extensions-1.0.0-pyha770c72_0.conda
90
+ - conda: https://conda.anaconda.org/conda-forge/osx-64/ncurses-6.4.20240210-h73e2aa4_0.conda
91
+ - conda: https://conda.anaconda.org/conda-forge/osx-64/openssl-3.2.1-hd75f5a5_1.conda
92
+ - conda: https://conda.anaconda.org/conda-forge/noarch/pip-24.0-pyhd8ed1ab_0.conda
93
+ - conda: https://conda.anaconda.org/conda-forge/osx-64/psutil-6.0.0-py310h936d840_0.conda
94
+ - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha2e5f31_6.tar.bz2
95
+ - conda: https://conda.anaconda.org/conda-forge/osx-64/python-3.10.14-h00d2728_0_cpython.conda
96
+ - conda: https://conda.anaconda.org/conda-forge/osx-64/python_abi-3.10-4_cp310.conda
97
+ - conda: https://conda.anaconda.org/conda-forge/osx-64/readline-8.2-h9e318b2_1.conda
98
+ - conda: https://conda.anaconda.org/conda-forge/osx-64/ruff-0.3.7-py310hdac29b7_0.conda
99
+ - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-70.1.1-pyhd8ed1ab_0.conda
100
+ - conda: https://conda.anaconda.org/conda-forge/osx-64/tk-8.6.13-h1abcd95_1.conda
101
+ - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.0.1-pyhd8ed1ab_0.tar.bz2
102
+ - conda: https://conda.anaconda.org/conda-forge/noarch/types-requests-2.31.0.20240406-pyhd8ed1ab_0.conda
103
+ - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.11.0-pyha770c72_0.conda
104
+ - conda: https://conda.anaconda.org/conda-forge/osx-64/typos-1.20.9-h11a7dfb_0.conda
105
+ - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda
106
+ - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.2.1-pyhd8ed1ab_0.conda
107
+ - conda: https://conda.anaconda.org/conda-forge/noarch/wheel-0.43.0-pyhd8ed1ab_1.conda
108
+ - conda: https://conda.anaconda.org/conda-forge/osx-64/xz-5.2.6-h775f41a_0.tar.bz2
109
+ osx-arm64:
110
+ - conda: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.1.0-py310h1253130_1.conda
111
+ - conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-h93a5062_5.conda
112
+ - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ca-certificates-2024.2.2-hf0a4a13_0.conda
113
+ - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-16.0.6-h4653b0c_0.conda
114
+ - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.4.2-h3422bc3_5.tar.bz2
115
+ - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.45.2-h091b4b1_0.conda
116
+ - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.2.13-h53f4e23_5.conda
117
+ - conda: https://conda.anaconda.org/conda-forge/osx-arm64/mypy-1.8.0-py310hd125d64_0.conda
118
+ - conda: https://conda.anaconda.org/conda-forge/noarch/mypy_extensions-1.0.0-pyha770c72_0.conda
119
+ - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ncurses-6.4.20240210-h078ce10_0.conda
120
+ - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openssl-3.2.1-h0d3ecfb_1.conda
121
+ - conda: https://conda.anaconda.org/conda-forge/noarch/pip-24.0-pyhd8ed1ab_0.conda
122
+ - conda: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-6.0.0-py310ha6dd24b_0.conda
123
+ - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha2e5f31_6.tar.bz2
124
+ - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.10.14-h2469fbe_0_cpython.conda
125
+ - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python_abi-3.10-4_cp310.conda
126
+ - conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.2-h92ec313_1.conda
127
+ - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ruff-0.3.7-py310h81561d7_0.conda
128
+ - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-70.1.1-pyhd8ed1ab_0.conda
129
+ - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h5083fa2_1.conda
130
+ - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.0.1-pyhd8ed1ab_0.tar.bz2
131
+ - conda: https://conda.anaconda.org/conda-forge/noarch/types-requests-2.31.0.20240406-pyhd8ed1ab_0.conda
132
+ - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.11.0-pyha770c72_0.conda
133
+ - conda: https://conda.anaconda.org/conda-forge/osx-arm64/typos-1.20.9-h5ef7bb8_0.conda
134
+ - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda
135
+ - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.2.1-pyhd8ed1ab_0.conda
136
+ - conda: https://conda.anaconda.org/conda-forge/noarch/wheel-0.43.0-pyhd8ed1ab_1.conda
137
+ - conda: https://conda.anaconda.org/conda-forge/osx-arm64/xz-5.2.6-h57fd34a_0.tar.bz2
138
+ win-64:
139
+ - conda: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.1.0-py310h00ffb61_1.conda
140
+ - conda: https://conda.anaconda.org/conda-forge/win-64/bzip2-1.0.8-hcfcfb64_5.conda
141
+ - conda: https://conda.anaconda.org/conda-forge/win-64/ca-certificates-2024.2.2-h56e8100_0.conda
142
+ - conda: https://conda.anaconda.org/conda-forge/win-64/libffi-3.4.2-h8ffe710_5.tar.bz2
143
+ - conda: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.45.2-hcfcfb64_0.conda
144
+ - conda: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.2.13-hcfcfb64_5.conda
145
+ - conda: https://conda.anaconda.org/conda-forge/win-64/m2w64-gcc-libgfortran-5.3.0-6.tar.bz2
146
+ - conda: https://conda.anaconda.org/conda-forge/win-64/m2w64-gcc-libs-5.3.0-7.tar.bz2
147
+ - conda: https://conda.anaconda.org/conda-forge/win-64/m2w64-gcc-libs-core-5.3.0-7.tar.bz2
148
+ - conda: https://conda.anaconda.org/conda-forge/win-64/m2w64-gmp-6.1.0-2.tar.bz2
149
+ - conda: https://conda.anaconda.org/conda-forge/win-64/m2w64-libwinpthread-git-5.0.0.4634.697f757-2.tar.bz2
150
+ - conda: https://conda.anaconda.org/conda-forge/win-64/msys2-conda-epoch-20160418-1.tar.bz2
151
+ - conda: https://conda.anaconda.org/conda-forge/win-64/mypy-1.8.0-py310h8d17308_0.conda
152
+ - conda: https://conda.anaconda.org/conda-forge/noarch/mypy_extensions-1.0.0-pyha770c72_0.conda
153
+ - conda: https://conda.anaconda.org/conda-forge/win-64/openssl-3.2.1-hcfcfb64_1.conda
154
+ - conda: https://conda.anaconda.org/conda-forge/noarch/pip-24.0-pyhd8ed1ab_0.conda
155
+ - conda: https://conda.anaconda.org/conda-forge/win-64/psutil-6.0.0-py310ha8f682b_0.conda
156
+ - conda: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyh0701188_6.tar.bz2
157
+ - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.10.14-h4de0772_0_cpython.conda
158
+ - conda: https://conda.anaconda.org/conda-forge/win-64/python_abi-3.10-4_cp310.conda
159
+ - conda: https://conda.anaconda.org/conda-forge/win-64/ruff-0.3.7-py310h298983d_0.conda
160
+ - conda: https://conda.anaconda.org/conda-forge/noarch/setuptools-70.1.1-pyhd8ed1ab_0.conda
161
+ - conda: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h5226925_1.conda
162
+ - conda: https://conda.anaconda.org/conda-forge/noarch/tomli-2.0.1-pyhd8ed1ab_0.tar.bz2
163
+ - conda: https://conda.anaconda.org/conda-forge/noarch/types-requests-2.31.0.20240406-pyhd8ed1ab_0.conda
164
+ - conda: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.11.0-pyha770c72_0.conda
165
+ - conda: https://conda.anaconda.org/conda-forge/win-64/typos-1.20.9-h7f3b576_0.conda
166
+ - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda
167
+ - conda: https://conda.anaconda.org/conda-forge/win-64/ucrt-10.0.22621.0-h57928b3_0.tar.bz2
168
+ - conda: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.2.1-pyhd8ed1ab_0.conda
169
+ - conda: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-hcf57466_18.conda
170
+ - conda: https://conda.anaconda.org/conda-forge/win-64/vc14_runtime-14.38.33130-h82b7239_18.conda
171
+ - conda: https://conda.anaconda.org/conda-forge/win-64/vs2015_runtime-14.38.33130-hcb4865c_18.conda
172
+ - conda: https://conda.anaconda.org/conda-forge/noarch/wheel-0.43.0-pyhd8ed1ab_1.conda
173
+ - conda: https://conda.anaconda.org/conda-forge/noarch/win_inet_pton-1.1.0-pyhd8ed1ab_6.tar.bz2
174
+ - conda: https://conda.anaconda.org/conda-forge/win-64/xz-5.2.6-h8d14728_0.tar.bz2
175
+ packages:
176
+ - kind: conda
177
+ name: _libgcc_mutex
178
+ version: '0.1'
179
+ build: conda_forge
180
+ subdir: linux-64
181
+ url: https://conda.anaconda.org/conda-forge/linux-64/_libgcc_mutex-0.1-conda_forge.tar.bz2
182
+ sha256: fe51de6107f9edc7aa4f786a70f4a883943bc9d39b3bb7307c04c41410990726
183
+ md5: d7c89558ba9fa0495403155b64376d81
184
+ license: None
185
+ size: 2562
186
+ timestamp: 1578324546067
187
+ - kind: conda
188
+ name: _openmp_mutex
189
+ version: '4.5'
190
+ build: 2_gnu
191
+ build_number: 16
192
+ subdir: linux-64
193
+ url: https://conda.anaconda.org/conda-forge/linux-64/_openmp_mutex-4.5-2_gnu.tar.bz2
194
+ sha256: fbe2c5e56a653bebb982eda4876a9178aedfc2b545f25d0ce9c4c0b508253d22
195
+ md5: 73aaf86a425cc6e73fcf236a5a46396d
196
+ depends:
197
+ - _libgcc_mutex 0.1 conda_forge
198
+ - libgomp >=7.5.0
199
+ constrains:
200
+ - openmp_impl 9999
201
+ license: BSD-3-Clause
202
+ license_family: BSD
203
+ size: 23621
204
+ timestamp: 1650670423406
205
+ - kind: conda
206
+ name: _openmp_mutex
207
+ version: '4.5'
208
+ build: 2_gnu
209
+ build_number: 16
210
+ subdir: linux-aarch64
211
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/_openmp_mutex-4.5-2_gnu.tar.bz2
212
+ sha256: 3702bef2f0a4d38bd8288bbe54aace623602a1343c2cfbefd3fa188e015bebf0
213
+ md5: 6168d71addc746e8f2b8d57dfd2edcea
214
+ depends:
215
+ - libgomp >=7.5.0
216
+ constrains:
217
+ - openmp_impl 9999
218
+ license: BSD-3-Clause
219
+ license_family: BSD
220
+ size: 23712
221
+ timestamp: 1650670790230
222
+ - kind: conda
223
+ name: brotli-python
224
+ version: 1.1.0
225
+ build: py310h00ffb61_1
226
+ build_number: 1
227
+ subdir: win-64
228
+ url: https://conda.anaconda.org/conda-forge/win-64/brotli-python-1.1.0-py310h00ffb61_1.conda
229
+ sha256: 8de77cf62a653dd6ffe19927b92c421f5fa73c078d7799181f5211a1bac2883b
230
+ md5: 42bfbc1d41cbe2696a3c9d8b0342324f
231
+ depends:
232
+ - python >=3.10,<3.11.0a0
233
+ - python_abi 3.10.* *_cp310
234
+ - ucrt >=10.0.20348.0
235
+ - vc >=14.2,<15
236
+ - vc14_runtime >=14.29.30139
237
+ constrains:
238
+ - libbrotlicommon 1.1.0 hcfcfb64_1
239
+ license: MIT
240
+ license_family: MIT
241
+ size: 321672
242
+ timestamp: 1695990897641
243
+ - kind: conda
244
+ name: brotli-python
245
+ version: 1.1.0
246
+ build: py310h1253130_1
247
+ build_number: 1
248
+ subdir: osx-arm64
249
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/brotli-python-1.1.0-py310h1253130_1.conda
250
+ sha256: dab21e18c0275bfd93a09b751096998485677ed17c2e2d08298bc5b43c10bee1
251
+ md5: 26fab7f65a80fff9f402ec3b7860b88a
252
+ depends:
253
+ - libcxx >=15.0.7
254
+ - python >=3.10,<3.11.0a0
255
+ - python >=3.10,<3.11.0a0 *_cpython
256
+ - python_abi 3.10.* *_cp310
257
+ constrains:
258
+ - libbrotlicommon 1.1.0 hb547adb_1
259
+ license: MIT
260
+ license_family: MIT
261
+ size: 344275
262
+ timestamp: 1695990848681
263
+ - kind: conda
264
+ name: brotli-python
265
+ version: 1.1.0
266
+ build: py310h9e9d8ca_1
267
+ build_number: 1
268
+ subdir: osx-64
269
+ url: https://conda.anaconda.org/conda-forge/osx-64/brotli-python-1.1.0-py310h9e9d8ca_1.conda
270
+ sha256: 57d66ca3e072b889c94cfaf56eb7e1794d3b1b3179bd475a4edef50a03359354
271
+ md5: 2362e323293e7699cf1e621d502f86d6
272
+ depends:
273
+ - libcxx >=15.0.7
274
+ - python >=3.10,<3.11.0a0
275
+ - python_abi 3.10.* *_cp310
276
+ constrains:
277
+ - libbrotlicommon 1.1.0 h0dc2134_1
278
+ license: MIT
279
+ license_family: MIT
280
+ size: 367037
281
+ timestamp: 1695990378635
282
+ - kind: conda
283
+ name: brotli-python
284
+ version: 1.1.0
285
+ build: py310hbb3657e_1
286
+ build_number: 1
287
+ subdir: linux-aarch64
288
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/brotli-python-1.1.0-py310hbb3657e_1.conda
289
+ sha256: 192f2537ca30953653375abd851f1ccf8b849988e3195e0189aa47384a3a88d9
290
+ md5: 5ed52d1d3c480022fe67ae00d1cab792
291
+ depends:
292
+ - libgcc-ng >=12
293
+ - libstdcxx-ng >=12
294
+ - python >=3.10,<3.11.0a0
295
+ - python >=3.10,<3.11.0a0 *_cpython
296
+ - python_abi 3.10.* *_cp310
297
+ constrains:
298
+ - libbrotlicommon 1.1.0 h31becfc_1
299
+ license: MIT
300
+ license_family: MIT
301
+ size: 355646
302
+ timestamp: 1695990521531
303
+ - kind: conda
304
+ name: brotli-python
305
+ version: 1.1.0
306
+ build: py310hc6cd4ac_1
307
+ build_number: 1
308
+ subdir: linux-64
309
+ url: https://conda.anaconda.org/conda-forge/linux-64/brotli-python-1.1.0-py310hc6cd4ac_1.conda
310
+ sha256: e22268d81905338570786921b3def88e55f9ed6d0ccdd17d9fbae31a02fbef69
311
+ md5: 1f95722c94f00b69af69a066c7433714
312
+ depends:
313
+ - libgcc-ng >=12
314
+ - libstdcxx-ng >=12
315
+ - python >=3.10,<3.11.0a0
316
+ - python_abi 3.10.* *_cp310
317
+ constrains:
318
+ - libbrotlicommon 1.1.0 hd590300_1
319
+ license: MIT
320
+ license_family: MIT
321
+ size: 349397
322
+ timestamp: 1695990295884
323
+ - kind: conda
324
+ name: bzip2
325
+ version: 1.0.8
326
+ build: h10d778d_5
327
+ build_number: 5
328
+ subdir: osx-64
329
+ url: https://conda.anaconda.org/conda-forge/osx-64/bzip2-1.0.8-h10d778d_5.conda
330
+ sha256: 61fb2b488928a54d9472113e1280b468a309561caa54f33825a3593da390b242
331
+ md5: 6097a6ca9ada32699b5fc4312dd6ef18
332
+ license: bzip2-1.0.6
333
+ license_family: BSD
334
+ size: 127885
335
+ timestamp: 1699280178474
336
+ - kind: conda
337
+ name: bzip2
338
+ version: 1.0.8
339
+ build: h31becfc_5
340
+ build_number: 5
341
+ subdir: linux-aarch64
342
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/bzip2-1.0.8-h31becfc_5.conda
343
+ sha256: b9f170990625cb1eeefaca02e091dc009a64264b077166d8ed7aeb7a09e923b0
344
+ md5: a64e35f01e0b7a2a152eca87d33b9c87
345
+ depends:
346
+ - libgcc-ng >=12
347
+ license: bzip2-1.0.6
348
+ license_family: BSD
349
+ size: 189668
350
+ timestamp: 1699280060686
351
+ - kind: conda
352
+ name: bzip2
353
+ version: 1.0.8
354
+ build: h93a5062_5
355
+ build_number: 5
356
+ subdir: osx-arm64
357
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-h93a5062_5.conda
358
+ sha256: bfa84296a638bea78a8bb29abc493ee95f2a0218775642474a840411b950fe5f
359
+ md5: 1bbc659ca658bfd49a481b5ef7a0f40f
360
+ license: bzip2-1.0.6
361
+ license_family: BSD
362
+ size: 122325
363
+ timestamp: 1699280294368
364
+ - kind: conda
365
+ name: bzip2
366
+ version: 1.0.8
367
+ build: hcfcfb64_5
368
+ build_number: 5
369
+ subdir: win-64
370
+ url: https://conda.anaconda.org/conda-forge/win-64/bzip2-1.0.8-hcfcfb64_5.conda
371
+ sha256: ae5f47a5c86fd6db822931255dcf017eb12f60c77f07dc782ccb477f7808aab2
372
+ md5: 26eb8ca6ea332b675e11704cce84a3be
373
+ depends:
374
+ - ucrt >=10.0.20348.0
375
+ - vc >=14.2,<15
376
+ - vc14_runtime >=14.29.30139
377
+ license: bzip2-1.0.6
378
+ license_family: BSD
379
+ size: 124580
380
+ timestamp: 1699280668742
381
+ - kind: conda
382
+ name: bzip2
383
+ version: 1.0.8
384
+ build: hd590300_5
385
+ build_number: 5
386
+ subdir: linux-64
387
+ url: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-hd590300_5.conda
388
+ sha256: 242c0c324507ee172c0e0dd2045814e746bb303d1eb78870d182ceb0abc726a8
389
+ md5: 69b8b6202a07720f448be700e300ccf4
390
+ depends:
391
+ - libgcc-ng >=12
392
+ license: bzip2-1.0.6
393
+ license_family: BSD
394
+ size: 254228
395
+ timestamp: 1699279927352
396
+ - kind: conda
397
+ name: ca-certificates
398
+ version: 2024.2.2
399
+ build: h56e8100_0
400
+ subdir: win-64
401
+ url: https://conda.anaconda.org/conda-forge/win-64/ca-certificates-2024.2.2-h56e8100_0.conda
402
+ sha256: 4d587088ecccd393fec3420b64f1af4ee1a0e6897a45cfd5ef38055322cea5d0
403
+ md5: 63da060240ab8087b60d1357051ea7d6
404
+ license: ISC
405
+ size: 155886
406
+ timestamp: 1706843918052
407
+ - kind: conda
408
+ name: ca-certificates
409
+ version: 2024.2.2
410
+ build: h8857fd0_0
411
+ subdir: osx-64
412
+ url: https://conda.anaconda.org/conda-forge/osx-64/ca-certificates-2024.2.2-h8857fd0_0.conda
413
+ sha256: 54a794aedbb4796afeabdf54287b06b1d27f7b13b3814520925f4c2c80f58ca9
414
+ md5: f2eacee8c33c43692f1ccfd33d0f50b1
415
+ license: ISC
416
+ size: 155665
417
+ timestamp: 1706843838227
418
+ - kind: conda
419
+ name: ca-certificates
420
+ version: 2024.2.2
421
+ build: hbcca054_0
422
+ subdir: linux-64
423
+ url: https://conda.anaconda.org/conda-forge/linux-64/ca-certificates-2024.2.2-hbcca054_0.conda
424
+ sha256: 91d81bfecdbb142c15066df70cc952590ae8991670198f92c66b62019b251aeb
425
+ md5: 2f4327a1cbe7f022401b236e915a5fef
426
+ license: ISC
427
+ size: 155432
428
+ timestamp: 1706843687645
429
+ - kind: conda
430
+ name: ca-certificates
431
+ version: 2024.2.2
432
+ build: hcefe29a_0
433
+ subdir: linux-aarch64
434
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/ca-certificates-2024.2.2-hcefe29a_0.conda
435
+ sha256: 0f6b34d835e26e5fa97cca4985dc46f0aba551a3a23f07c6f13cca2542b8c642
436
+ md5: 57c226edb90c4e973b9b7503537dd339
437
+ license: ISC
438
+ size: 155738
439
+ timestamp: 1706845723412
440
+ - kind: conda
441
+ name: ca-certificates
442
+ version: 2024.2.2
443
+ build: hf0a4a13_0
444
+ subdir: osx-arm64
445
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/ca-certificates-2024.2.2-hf0a4a13_0.conda
446
+ sha256: 49bc3439816ac72d0c0e0f144b8cc870fdcc4adec2e861407ec818d8116b2204
447
+ md5: fb416a1795f18dcc5a038bc2dc54edf9
448
+ license: ISC
449
+ size: 155725
450
+ timestamp: 1706844034242
451
+ - kind: conda
452
+ name: ld_impl_linux-64
453
+ version: '2.40'
454
+ build: h41732ed_0
455
+ subdir: linux-64
456
+ url: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.40-h41732ed_0.conda
457
+ sha256: f6cc89d887555912d6c61b295d398cff9ec982a3417d38025c45d5dd9b9e79cd
458
+ md5: 7aca3059a1729aa76c597603f10b0dd3
459
+ constrains:
460
+ - binutils_impl_linux-64 2.40
461
+ license: GPL-3.0-only
462
+ license_family: GPL
463
+ size: 704696
464
+ timestamp: 1674833944779
465
+ - kind: conda
466
+ name: ld_impl_linux-aarch64
467
+ version: '2.40'
468
+ build: h2d8c526_0
469
+ subdir: linux-aarch64
470
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/ld_impl_linux-aarch64-2.40-h2d8c526_0.conda
471
+ sha256: 1ba06e8645094b340b4aee23603a6abb1b0383788180e65f3de34e655c5f577c
472
+ md5: 16246d69e945d0b1969a6099e7c5d457
473
+ constrains:
474
+ - binutils_impl_linux-aarch64 2.40
475
+ license: GPL-3.0-only
476
+ license_family: GPL
477
+ size: 738776
478
+ timestamp: 1674833843183
479
+ - kind: conda
480
+ name: libcxx
481
+ version: 16.0.6
482
+ build: h4653b0c_0
483
+ subdir: osx-arm64
484
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-16.0.6-h4653b0c_0.conda
485
+ sha256: 11d3fb51c14832d9e4f6d84080a375dec21ea8a3a381a1910e67ff9cedc20355
486
+ md5: 9d7d724faf0413bf1dbc5a85935700c8
487
+ license: Apache-2.0 WITH LLVM-exception
488
+ license_family: Apache
489
+ size: 1160232
490
+ timestamp: 1686896993785
491
+ - kind: conda
492
+ name: libcxx
493
+ version: 16.0.6
494
+ build: hd57cbcb_0
495
+ subdir: osx-64
496
+ url: https://conda.anaconda.org/conda-forge/osx-64/libcxx-16.0.6-hd57cbcb_0.conda
497
+ sha256: 9063271847cf05f3a6cc6cae3e7f0ced032ab5f3a3c9d3f943f876f39c5c2549
498
+ md5: 7d6972792161077908b62971802f289a
499
+ license: Apache-2.0 WITH LLVM-exception
500
+ license_family: Apache
501
+ size: 1142172
502
+ timestamp: 1686896907750
503
+ - kind: conda
504
+ name: libffi
505
+ version: 3.4.2
506
+ build: h0d85af4_5
507
+ build_number: 5
508
+ subdir: osx-64
509
+ url: https://conda.anaconda.org/conda-forge/osx-64/libffi-3.4.2-h0d85af4_5.tar.bz2
510
+ sha256: 7a2d27a936ceee6942ea4d397f9c7d136f12549d86f7617e8b6bad51e01a941f
511
+ md5: ccb34fb14960ad8b125962d3d79b31a9
512
+ license: MIT
513
+ license_family: MIT
514
+ size: 51348
515
+ timestamp: 1636488394370
516
+ - kind: conda
517
+ name: libffi
518
+ version: 3.4.2
519
+ build: h3422bc3_5
520
+ build_number: 5
521
+ subdir: osx-arm64
522
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.4.2-h3422bc3_5.tar.bz2
523
+ sha256: 41b3d13efb775e340e4dba549ab5c029611ea6918703096b2eaa9c015c0750ca
524
+ md5: 086914b672be056eb70fd4285b6783b6
525
+ license: MIT
526
+ license_family: MIT
527
+ size: 39020
528
+ timestamp: 1636488587153
529
+ - kind: conda
530
+ name: libffi
531
+ version: 3.4.2
532
+ build: h3557bc0_5
533
+ build_number: 5
534
+ subdir: linux-aarch64
535
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/libffi-3.4.2-h3557bc0_5.tar.bz2
536
+ sha256: 7e9258a102480757fe3faeb225a3ca04dffd10fecd2a958c65cdb4cdf75f2c3c
537
+ md5: dddd85f4d52121fab0a8b099c5e06501
538
+ depends:
539
+ - libgcc-ng >=9.4.0
540
+ license: MIT
541
+ license_family: MIT
542
+ size: 59450
543
+ timestamp: 1636488255090
544
+ - kind: conda
545
+ name: libffi
546
+ version: 3.4.2
547
+ build: h7f98852_5
548
+ build_number: 5
549
+ subdir: linux-64
550
+ url: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.2-h7f98852_5.tar.bz2
551
+ sha256: ab6e9856c21709b7b517e940ae7028ae0737546122f83c2aa5d692860c3b149e
552
+ md5: d645c6d2ac96843a2bfaccd2d62b3ac3
553
+ depends:
554
+ - libgcc-ng >=9.4.0
555
+ license: MIT
556
+ license_family: MIT
557
+ size: 58292
558
+ timestamp: 1636488182923
559
+ - kind: conda
560
+ name: libffi
561
+ version: 3.4.2
562
+ build: h8ffe710_5
563
+ build_number: 5
564
+ subdir: win-64
565
+ url: https://conda.anaconda.org/conda-forge/win-64/libffi-3.4.2-h8ffe710_5.tar.bz2
566
+ sha256: 1951ab740f80660e9bc07d2ed3aefb874d78c107264fd810f24a1a6211d4b1a5
567
+ md5: 2c96d1b6915b408893f9472569dee135
568
+ depends:
569
+ - vc >=14.1,<15.0a0
570
+ - vs2015_runtime >=14.16.27012
571
+ license: MIT
572
+ license_family: MIT
573
+ size: 42063
574
+ timestamp: 1636489106777
575
+ - kind: conda
576
+ name: libgcc-ng
577
+ version: 13.2.0
578
+ build: h807b86a_5
579
+ build_number: 5
580
+ subdir: linux-64
581
+ url: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-13.2.0-h807b86a_5.conda
582
+ sha256: d32f78bfaac282cfe5205f46d558704ad737b8dbf71f9227788a5ca80facaba4
583
+ md5: d4ff227c46917d3b4565302a2bbb276b
584
+ depends:
585
+ - _libgcc_mutex 0.1 conda_forge
586
+ - _openmp_mutex >=4.5
587
+ constrains:
588
+ - libgomp 13.2.0 h807b86a_5
589
+ license: GPL-3.0-only WITH GCC-exception-3.1
590
+ license_family: GPL
591
+ size: 770506
592
+ timestamp: 1706819192021
593
+ - kind: conda
594
+ name: libgcc-ng
595
+ version: 13.2.0
596
+ build: hf8544c7_5
597
+ build_number: 5
598
+ subdir: linux-aarch64
599
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/libgcc-ng-13.2.0-hf8544c7_5.conda
600
+ sha256: 869e44e1cf329198f5bea56c146207ed639b24b6281187159435b9499ecb3959
601
+ md5: dee934e640275d9e74e7bbd455f25162
602
+ depends:
603
+ - _openmp_mutex >=4.5
604
+ constrains:
605
+ - libgomp 13.2.0 hf8544c7_5
606
+ license: GPL-3.0-only WITH GCC-exception-3.1
607
+ license_family: GPL
608
+ size: 456795
609
+ timestamp: 1706820691781
610
+ - kind: conda
611
+ name: libgomp
612
+ version: 13.2.0
613
+ build: h807b86a_5
614
+ build_number: 5
615
+ subdir: linux-64
616
+ url: https://conda.anaconda.org/conda-forge/linux-64/libgomp-13.2.0-h807b86a_5.conda
617
+ sha256: 0d3d4b1b0134283ea02d58e8eb5accf3655464cf7159abf098cc694002f8d34e
618
+ md5: d211c42b9ce49aee3734fdc828731689
619
+ depends:
620
+ - _libgcc_mutex 0.1 conda_forge
621
+ license: GPL-3.0-only WITH GCC-exception-3.1
622
+ license_family: GPL
623
+ size: 419751
624
+ timestamp: 1706819107383
625
+ - kind: conda
626
+ name: libgomp
627
+ version: 13.2.0
628
+ build: hf8544c7_5
629
+ build_number: 5
630
+ subdir: linux-aarch64
631
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/libgomp-13.2.0-hf8544c7_5.conda
632
+ sha256: a98d4f242a351feb7983a28e7d6a0ca51da764c6233ea3dfc776975a3aba8a01
633
+ md5: 379be2f115ffb73860e4e260dd2170b7
634
+ license: GPL-3.0-only WITH GCC-exception-3.1
635
+ license_family: GPL
636
+ size: 423091
637
+ timestamp: 1706820564165
638
+ - kind: conda
639
+ name: libnsl
640
+ version: 2.0.1
641
+ build: h31becfc_0
642
+ subdir: linux-aarch64
643
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/libnsl-2.0.1-h31becfc_0.conda
644
+ sha256: fd18c2b75d7411096428d36a70b36b1a17e31f7b8956b6905d145792d49e97f8
645
+ md5: c14f32510f694e3185704d89967ec422
646
+ depends:
647
+ - libgcc-ng >=12
648
+ license: LGPL-2.1-only
649
+ license_family: GPL
650
+ size: 34501
651
+ timestamp: 1697358973269
652
+ - kind: conda
653
+ name: libnsl
654
+ version: 2.0.1
655
+ build: hd590300_0
656
+ subdir: linux-64
657
+ url: https://conda.anaconda.org/conda-forge/linux-64/libnsl-2.0.1-hd590300_0.conda
658
+ sha256: 26d77a3bb4dceeedc2a41bd688564fe71bf2d149fdcf117049970bc02ff1add6
659
+ md5: 30fd6e37fe21f86f4bd26d6ee73eeec7
660
+ depends:
661
+ - libgcc-ng >=12
662
+ license: LGPL-2.1-only
663
+ license_family: GPL
664
+ size: 33408
665
+ timestamp: 1697359010159
666
+ - kind: conda
667
+ name: libsqlite
668
+ version: 3.45.2
669
+ build: h091b4b1_0
670
+ subdir: osx-arm64
671
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.45.2-h091b4b1_0.conda
672
+ sha256: 7c234320a1a2132b9cc972aaa06bb215bb220a5b1addb0bed7a5a321c805920e
673
+ md5: 9d07427ee5bd9afd1e11ce14368a48d6
674
+ depends:
675
+ - libzlib >=1.2.13,<1.3.0a0
676
+ license: Unlicense
677
+ size: 825300
678
+ timestamp: 1710255078823
679
+ - kind: conda
680
+ name: libsqlite
681
+ version: 3.45.2
682
+ build: h194ca79_0
683
+ subdir: linux-aarch64
684
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/libsqlite-3.45.2-h194ca79_0.conda
685
+ sha256: 0ce6de6369c04386cfc8696b1f795f425843789609ae2e04e7a1eb7deae62a8b
686
+ md5: bf4c96a21fbfc6a6ef6a7781a534a4e0
687
+ depends:
688
+ - libgcc-ng >=12
689
+ - libzlib >=1.2.13,<1.3.0a0
690
+ license: Unlicense
691
+ size: 1038462
692
+ timestamp: 1710253998432
693
+ - kind: conda
694
+ name: libsqlite
695
+ version: 3.45.2
696
+ build: h2797004_0
697
+ subdir: linux-64
698
+ url: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.45.2-h2797004_0.conda
699
+ sha256: 8cdbeb7902729e319510a82d7c642402981818702b58812af265ef55d1315473
700
+ md5: 866983a220e27a80cb75e85cb30466a1
701
+ depends:
702
+ - libgcc-ng >=12
703
+ - libzlib >=1.2.13,<1.3.0a0
704
+ license: Unlicense
705
+ size: 857489
706
+ timestamp: 1710254744982
707
+ - kind: conda
708
+ name: libsqlite
709
+ version: 3.45.2
710
+ build: h92b6c6a_0
711
+ subdir: osx-64
712
+ url: https://conda.anaconda.org/conda-forge/osx-64/libsqlite-3.45.2-h92b6c6a_0.conda
713
+ sha256: 320ec73a4e3dd377757a2595770b8137ec4583df4d7782472d76377cdbdc4543
714
+ md5: 086f56e13a96a6cfb1bf640505ae6b70
715
+ depends:
716
+ - libzlib >=1.2.13,<1.3.0a0
717
+ license: Unlicense
718
+ size: 902355
719
+ timestamp: 1710254991672
720
+ - kind: conda
721
+ name: libsqlite
722
+ version: 3.45.2
723
+ build: hcfcfb64_0
724
+ subdir: win-64
725
+ url: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.45.2-hcfcfb64_0.conda
726
+ sha256: 4bb24b986550275a6d02835150d943c4c675808d05c0efc5c2a22154d007a69f
727
+ md5: f95359f8dc5abf7da7776ece9ef10bc5
728
+ depends:
729
+ - ucrt >=10.0.20348.0
730
+ - vc >=14.2,<15
731
+ - vc14_runtime >=14.29.30139
732
+ license: Unlicense
733
+ size: 869606
734
+ timestamp: 1710255095740
735
+ - kind: conda
736
+ name: libstdcxx-ng
737
+ version: 13.2.0
738
+ build: h7e041cc_5
739
+ build_number: 5
740
+ subdir: linux-64
741
+ url: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-13.2.0-h7e041cc_5.conda
742
+ sha256: a56c5b11f1e73a86e120e6141a42d9e935a99a2098491ac9e15347a1476ce777
743
+ md5: f6f6600d18a4047b54f803cf708b868a
744
+ license: GPL-3.0-only WITH GCC-exception-3.1
745
+ license_family: GPL
746
+ size: 3834139
747
+ timestamp: 1706819252496
748
+ - kind: conda
749
+ name: libstdcxx-ng
750
+ version: 13.2.0
751
+ build: h9a76618_5
752
+ build_number: 5
753
+ subdir: linux-aarch64
754
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/libstdcxx-ng-13.2.0-h9a76618_5.conda
755
+ sha256: c209f23a8a497fc87107a68b6bbc8d2089cf15fd4015b558dfdce63544379b05
756
+ md5: 1b79d37dce0fad96bdf3de03925f43b4
757
+ license: GPL-3.0-only WITH GCC-exception-3.1
758
+ license_family: GPL
759
+ size: 3752658
760
+ timestamp: 1706820778418
761
+ - kind: conda
762
+ name: libuuid
763
+ version: 2.38.1
764
+ build: h0b41bf4_0
765
+ subdir: linux-64
766
+ url: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.38.1-h0b41bf4_0.conda
767
+ sha256: 787eb542f055a2b3de553614b25f09eefb0a0931b0c87dbcce6efdfd92f04f18
768
+ md5: 40b61aab5c7ba9ff276c41cfffe6b80b
769
+ depends:
770
+ - libgcc-ng >=12
771
+ license: BSD-3-Clause
772
+ license_family: BSD
773
+ size: 33601
774
+ timestamp: 1680112270483
775
+ - kind: conda
776
+ name: libuuid
777
+ version: 2.38.1
778
+ build: hb4cce97_0
779
+ subdir: linux-aarch64
780
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/libuuid-2.38.1-hb4cce97_0.conda
781
+ sha256: 616277b0c5f7616c2cdf36f6c316ea3f9aa5bb35f2d4476a349ab58b9b91675f
782
+ md5: 000e30b09db0b7c775b21695dff30969
783
+ depends:
784
+ - libgcc-ng >=12
785
+ license: BSD-3-Clause
786
+ license_family: BSD
787
+ size: 35720
788
+ timestamp: 1680113474501
789
+ - kind: conda
790
+ name: libxcrypt
791
+ version: 4.4.36
792
+ build: h31becfc_1
793
+ build_number: 1
794
+ subdir: linux-aarch64
795
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/libxcrypt-4.4.36-h31becfc_1.conda
796
+ sha256: 6b46c397644091b8a26a3048636d10b989b1bf266d4be5e9474bf763f828f41f
797
+ md5: b4df5d7d4b63579d081fd3a4cf99740e
798
+ depends:
799
+ - libgcc-ng >=12
800
+ license: LGPL-2.1-or-later
801
+ size: 114269
802
+ timestamp: 1702724369203
803
+ - kind: conda
804
+ name: libxcrypt
805
+ version: 4.4.36
806
+ build: hd590300_1
807
+ build_number: 1
808
+ subdir: linux-64
809
+ url: https://conda.anaconda.org/conda-forge/linux-64/libxcrypt-4.4.36-hd590300_1.conda
810
+ sha256: 6ae68e0b86423ef188196fff6207ed0c8195dd84273cb5623b85aa08033a410c
811
+ md5: 5aa797f8787fe7a17d1b0821485b5adc
812
+ depends:
813
+ - libgcc-ng >=12
814
+ license: LGPL-2.1-or-later
815
+ size: 100393
816
+ timestamp: 1702724383534
817
+ - kind: conda
818
+ name: libzlib
819
+ version: 1.2.13
820
+ build: h31becfc_5
821
+ build_number: 5
822
+ subdir: linux-aarch64
823
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/libzlib-1.2.13-h31becfc_5.conda
824
+ sha256: aeeefbb61e5e8227e53566d5e42dbb49e120eb99109996bf0dbfde8f180747a7
825
+ md5: b213aa87eea9491ef7b129179322e955
826
+ depends:
827
+ - libgcc-ng >=12
828
+ constrains:
829
+ - zlib 1.2.13 *_5
830
+ license: Zlib
831
+ license_family: Other
832
+ size: 67036
833
+ timestamp: 1686575148440
834
+ - kind: conda
835
+ name: libzlib
836
+ version: 1.2.13
837
+ build: h53f4e23_5
838
+ build_number: 5
839
+ subdir: osx-arm64
840
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.2.13-h53f4e23_5.conda
841
+ sha256: ab1c8aefa2d54322a63aaeeefe9cf877411851738616c4068e0dccc66b9c758a
842
+ md5: 1a47f5236db2e06a320ffa0392f81bd8
843
+ constrains:
844
+ - zlib 1.2.13 *_5
845
+ license: Zlib
846
+ license_family: Other
847
+ size: 48102
848
+ timestamp: 1686575426584
849
+ - kind: conda
850
+ name: libzlib
851
+ version: 1.2.13
852
+ build: h8a1eda9_5
853
+ build_number: 5
854
+ subdir: osx-64
855
+ url: https://conda.anaconda.org/conda-forge/osx-64/libzlib-1.2.13-h8a1eda9_5.conda
856
+ sha256: fc58ad7f47ffea10df1f2165369978fba0a1cc32594aad778f5eec725f334867
857
+ md5: 4a3ad23f6e16f99c04e166767193d700
858
+ constrains:
859
+ - zlib 1.2.13 *_5
860
+ license: Zlib
861
+ license_family: Other
862
+ size: 59404
863
+ timestamp: 1686575566695
864
+ - kind: conda
865
+ name: libzlib
866
+ version: 1.2.13
867
+ build: hcfcfb64_5
868
+ build_number: 5
869
+ subdir: win-64
870
+ url: https://conda.anaconda.org/conda-forge/win-64/libzlib-1.2.13-hcfcfb64_5.conda
871
+ sha256: c161822ee8130b71e08b6d282b9919c1de2c5274b29921a867bca0f7d30cad26
872
+ md5: 5fdb9c6a113b6b6cb5e517fd972d5f41
873
+ depends:
874
+ - ucrt >=10.0.20348.0
875
+ - vc >=14.2,<15
876
+ - vc14_runtime >=14.29.30139
877
+ constrains:
878
+ - zlib 1.2.13 *_5
879
+ license: Zlib
880
+ license_family: Other
881
+ size: 55800
882
+ timestamp: 1686575452215
883
+ - kind: conda
884
+ name: libzlib
885
+ version: 1.2.13
886
+ build: hd590300_5
887
+ build_number: 5
888
+ subdir: linux-64
889
+ url: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.2.13-hd590300_5.conda
890
+ sha256: 370c7c5893b737596fd6ca0d9190c9715d89d888b8c88537ae1ef168c25e82e4
891
+ md5: f36c115f1ee199da648e0597ec2047ad
892
+ depends:
893
+ - libgcc-ng >=12
894
+ constrains:
895
+ - zlib 1.2.13 *_5
896
+ license: Zlib
897
+ license_family: Other
898
+ size: 61588
899
+ timestamp: 1686575217516
900
+ - kind: conda
901
+ name: m2w64-gcc-libgfortran
902
+ version: 5.3.0
903
+ build: '6'
904
+ build_number: 6
905
+ subdir: win-64
906
+ url: https://conda.anaconda.org/conda-forge/win-64/m2w64-gcc-libgfortran-5.3.0-6.tar.bz2
907
+ sha256: 9de95a7996d5366ae0808eef2acbc63f9b11b874aa42375f55379e6715845dc6
908
+ md5: 066552ac6b907ec6d72c0ddab29050dc
909
+ depends:
910
+ - m2w64-gcc-libs-core
911
+ - msys2-conda-epoch ==20160418
912
+ license: GPL, LGPL, FDL, custom
913
+ size: 350687
914
+ timestamp: 1608163451316
915
+ - kind: conda
916
+ name: m2w64-gcc-libs
917
+ version: 5.3.0
918
+ build: '7'
919
+ build_number: 7
920
+ subdir: win-64
921
+ url: https://conda.anaconda.org/conda-forge/win-64/m2w64-gcc-libs-5.3.0-7.tar.bz2
922
+ sha256: 3bd1ab02b7c89a5b153a17be03b36d833f1517ff2a6a77ead7c4a808b88196aa
923
+ md5: fe759119b8b3bfa720b8762c6fdc35de
924
+ depends:
925
+ - m2w64-gcc-libgfortran
926
+ - m2w64-gcc-libs-core
927
+ - m2w64-gmp
928
+ - m2w64-libwinpthread-git
929
+ - msys2-conda-epoch ==20160418
930
+ license: GPL3+, partial:GCCRLE, partial:LGPL2+
931
+ size: 532390
932
+ timestamp: 1608163512830
933
+ - kind: conda
934
+ name: m2w64-gcc-libs-core
935
+ version: 5.3.0
936
+ build: '7'
937
+ build_number: 7
938
+ subdir: win-64
939
+ url: https://conda.anaconda.org/conda-forge/win-64/m2w64-gcc-libs-core-5.3.0-7.tar.bz2
940
+ sha256: 58afdfe859ed2e9a9b1cc06bc408720cb2c3a6a132e59d4805b090d7574f4ee0
941
+ md5: 4289d80fb4d272f1f3b56cfe87ac90bd
942
+ depends:
943
+ - m2w64-gmp
944
+ - m2w64-libwinpthread-git
945
+ - msys2-conda-epoch ==20160418
946
+ license: GPL3+, partial:GCCRLE, partial:LGPL2+
947
+ size: 219240
948
+ timestamp: 1608163481341
949
+ - kind: conda
950
+ name: m2w64-gmp
951
+ version: 6.1.0
952
+ build: '2'
953
+ build_number: 2
954
+ subdir: win-64
955
+ url: https://conda.anaconda.org/conda-forge/win-64/m2w64-gmp-6.1.0-2.tar.bz2
956
+ sha256: 7e3cd95f554660de45f8323fca359e904e8d203efaf07a4d311e46d611481ed1
957
+ md5: 53a1c73e1e3d185516d7e3af177596d9
958
+ depends:
959
+ - msys2-conda-epoch ==20160418
960
+ license: LGPL3
961
+ size: 743501
962
+ timestamp: 1608163782057
963
+ - kind: conda
964
+ name: m2w64-libwinpthread-git
965
+ version: 5.0.0.4634.697f757
966
+ build: '2'
967
+ build_number: 2
968
+ subdir: win-64
969
+ url: https://conda.anaconda.org/conda-forge/win-64/m2w64-libwinpthread-git-5.0.0.4634.697f757-2.tar.bz2
970
+ sha256: f63a09b2cae7defae0480f1740015d6235f1861afa6fe2e2d3e10bd0d1314ee0
971
+ md5: 774130a326dee16f1ceb05cc687ee4f0
972
+ depends:
973
+ - msys2-conda-epoch ==20160418
974
+ license: MIT, BSD
975
+ size: 31928
976
+ timestamp: 1608166099896
977
+ - kind: conda
978
+ name: msys2-conda-epoch
979
+ version: '20160418'
980
+ build: '1'
981
+ build_number: 1
982
+ subdir: win-64
983
+ url: https://conda.anaconda.org/conda-forge/win-64/msys2-conda-epoch-20160418-1.tar.bz2
984
+ sha256: 99358d58d778abee4dca82ad29fb58058571f19b0f86138363c260049d4ac7f1
985
+ md5: b0309b72560df66f71a9d5e34a5efdfa
986
+ size: 3227
987
+ timestamp: 1608166968312
988
+ - kind: conda
989
+ name: mypy
990
+ version: 1.8.0
991
+ build: py310h2372a71_0
992
+ subdir: linux-64
993
+ url: https://conda.anaconda.org/conda-forge/linux-64/mypy-1.8.0-py310h2372a71_0.conda
994
+ sha256: 6c01268327db83c70c38cfc87fc13a71d09cda123ae06cd6edbbe620c2b20f33
995
+ md5: 3320dc32fc6bd29ab4a16cf22bc35fc2
996
+ depends:
997
+ - libgcc-ng >=12
998
+ - mypy_extensions >=1.0.0
999
+ - psutil >=4.0
1000
+ - python >=3.10,<3.11.0a0
1001
+ - python_abi 3.10.* *_cp310
1002
+ - tomli >=1.1.0
1003
+ - typing_extensions >=4.1.0
1004
+ license: MIT
1005
+ license_family: MIT
1006
+ size: 17160046
1007
+ timestamp: 1703185152663
1008
+ - kind: conda
1009
+ name: mypy
1010
+ version: 1.8.0
1011
+ build: py310h8d17308_0
1012
+ subdir: win-64
1013
+ url: https://conda.anaconda.org/conda-forge/win-64/mypy-1.8.0-py310h8d17308_0.conda
1014
+ sha256: 8ca9e638a538225b6a5a935573964fa4d743456ece6171988d4116d57a635069
1015
+ md5: 42c9adc3e138cd581a869d46dfdb3fcd
1016
+ depends:
1017
+ - mypy_extensions >=1.0.0
1018
+ - psutil >=4.0
1019
+ - python >=3.10,<3.11.0a0
1020
+ - python_abi 3.10.* *_cp310
1021
+ - tomli >=1.1.0
1022
+ - typing_extensions >=4.1.0
1023
+ - ucrt >=10.0.20348.0
1024
+ - vc >=14.2,<15
1025
+ - vc14_runtime >=14.29.30139
1026
+ license: MIT
1027
+ license_family: MIT
1028
+ size: 9332107
1029
+ timestamp: 1703185142866
1030
+ - kind: conda
1031
+ name: mypy
1032
+ version: 1.8.0
1033
+ build: py310hb299538_0
1034
+ subdir: linux-aarch64
1035
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/mypy-1.8.0-py310hb299538_0.conda
1036
+ sha256: a970f3c8d22641c7202a6b40c106a55e8b77b0fe4cca715c95ca551262cdebea
1037
+ md5: bf64efdd960ab39f9cd99f562f7ad27f
1038
+ depends:
1039
+ - libgcc-ng >=12
1040
+ - mypy_extensions >=1.0.0
1041
+ - psutil >=4.0
1042
+ - python >=3.10,<3.11.0a0
1043
+ - python >=3.10,<3.11.0a0 *_cpython
1044
+ - python_abi 3.10.* *_cp310
1045
+ - tomli >=1.1.0
1046
+ - typing_extensions >=4.1.0
1047
+ license: MIT
1048
+ license_family: MIT
1049
+ size: 14537904
1050
+ timestamp: 1703185330563
1051
+ - kind: conda
1052
+ name: mypy
1053
+ version: 1.8.0
1054
+ build: py310hb372a2b_0
1055
+ subdir: osx-64
1056
+ url: https://conda.anaconda.org/conda-forge/osx-64/mypy-1.8.0-py310hb372a2b_0.conda
1057
+ sha256: 3af15d65a207840b15b3298398f70e48263a20706d2bc48bede09f9077597759
1058
+ md5: 5e8c2a9af839d3d23be1cf7e2c955c5c
1059
+ depends:
1060
+ - mypy_extensions >=1.0.0
1061
+ - psutil >=4.0
1062
+ - python >=3.10,<3.11.0a0
1063
+ - python_abi 3.10.* *_cp310
1064
+ - tomli >=1.1.0
1065
+ - typing_extensions >=4.1.0
1066
+ license: MIT
1067
+ license_family: MIT
1068
+ size: 11303456
1069
+ timestamp: 1703184930605
1070
+ - kind: conda
1071
+ name: mypy
1072
+ version: 1.8.0
1073
+ build: py310hd125d64_0
1074
+ subdir: osx-arm64
1075
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/mypy-1.8.0-py310hd125d64_0.conda
1076
+ sha256: 630f3bccefb8b7139dcee0941fb241664130953f319885134f8cd813d4e300ce
1077
+ md5: 0884b51650eb1e4f77022ffd68819ce6
1078
+ depends:
1079
+ - mypy_extensions >=1.0.0
1080
+ - psutil >=4.0
1081
+ - python >=3.10,<3.11.0a0
1082
+ - python >=3.10,<3.11.0a0 *_cpython
1083
+ - python_abi 3.10.* *_cp310
1084
+ - tomli >=1.1.0
1085
+ - typing_extensions >=4.1.0
1086
+ license: MIT
1087
+ license_family: MIT
1088
+ size: 8941826
1089
+ timestamp: 1703185223331
1090
+ - kind: conda
1091
+ name: mypy_extensions
1092
+ version: 1.0.0
1093
+ build: pyha770c72_0
1094
+ subdir: noarch
1095
+ noarch: python
1096
+ url: https://conda.anaconda.org/conda-forge/noarch/mypy_extensions-1.0.0-pyha770c72_0.conda
1097
+ sha256: f240217476e148e825420c6bc3a0c0efb08c0718b7042fae960400c02af858a3
1098
+ md5: 4eccaeba205f0aed9ac3a9ea58568ca3
1099
+ depends:
1100
+ - python >=3.5
1101
+ license: MIT
1102
+ license_family: MIT
1103
+ size: 10492
1104
+ timestamp: 1675543414256
1105
+ - kind: conda
1106
+ name: ncurses
1107
+ version: 6.4.20240210
1108
+ build: h0425590_0
1109
+ subdir: linux-aarch64
1110
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/ncurses-6.4.20240210-h0425590_0.conda
1111
+ sha256: 4223dc34e2bddd37bf995158ae481e00be375b287d539bc7a0532634c0fc63b7
1112
+ md5: c1a1612ddaee95c83abfa0b2ec858626
1113
+ depends:
1114
+ - libgcc-ng >=12
1115
+ license: X11 AND BSD-3-Clause
1116
+ size: 926594
1117
+ timestamp: 1710866633409
1118
+ - kind: conda
1119
+ name: ncurses
1120
+ version: 6.4.20240210
1121
+ build: h078ce10_0
1122
+ subdir: osx-arm64
1123
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/ncurses-6.4.20240210-h078ce10_0.conda
1124
+ sha256: 06f0905791575e2cd3aa961493c56e490b3d82ad9eb49f1c332bd338b0216911
1125
+ md5: 616ae8691e6608527d0071e6766dcb81
1126
+ license: X11 AND BSD-3-Clause
1127
+ size: 820249
1128
+ timestamp: 1710866874348
1129
+ - kind: conda
1130
+ name: ncurses
1131
+ version: 6.4.20240210
1132
+ build: h59595ed_0
1133
+ subdir: linux-64
1134
+ url: https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.4.20240210-h59595ed_0.conda
1135
+ sha256: aa0f005b6727aac6507317ed490f0904430584fa8ca722657e7f0fb94741de81
1136
+ md5: 97da8860a0da5413c7c98a3b3838a645
1137
+ depends:
1138
+ - libgcc-ng >=12
1139
+ license: X11 AND BSD-3-Clause
1140
+ size: 895669
1141
+ timestamp: 1710866638986
1142
+ - kind: conda
1143
+ name: ncurses
1144
+ version: 6.4.20240210
1145
+ build: h73e2aa4_0
1146
+ subdir: osx-64
1147
+ url: https://conda.anaconda.org/conda-forge/osx-64/ncurses-6.4.20240210-h73e2aa4_0.conda
1148
+ sha256: 50b72acf08acbc4e5332807653e2ca6b26d4326e8af16fad1fd3f2ce9ea55503
1149
+ md5: 50f28c512e9ad78589e3eab34833f762
1150
+ license: X11 AND BSD-3-Clause
1151
+ size: 823010
1152
+ timestamp: 1710866856626
1153
+ - kind: conda
1154
+ name: openssl
1155
+ version: 3.2.1
1156
+ build: h0d3ecfb_1
1157
+ build_number: 1
1158
+ subdir: osx-arm64
1159
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/openssl-3.2.1-h0d3ecfb_1.conda
1160
+ sha256: 519dc941d7ab0ebf31a2878d85c2f444450e7c5f6f41c4d07252c6bb3417b78b
1161
+ md5: eb580fb888d93d5d550c557323ac5cee
1162
+ depends:
1163
+ - ca-certificates
1164
+ constrains:
1165
+ - pyopenssl >=22.1
1166
+ license: Apache-2.0
1167
+ license_family: Apache
1168
+ size: 2855250
1169
+ timestamp: 1710793435903
1170
+ - kind: conda
1171
+ name: openssl
1172
+ version: 3.2.1
1173
+ build: h31becfc_1
1174
+ build_number: 1
1175
+ subdir: linux-aarch64
1176
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/openssl-3.2.1-h31becfc_1.conda
1177
+ sha256: 055a26e99ebc12ae0cf23266a0e62e71b59b8ce8cafb1ebb87e375ef9c758d7b
1178
+ md5: e95eb18d256edc72058e0dc9be5338a0
1179
+ depends:
1180
+ - ca-certificates
1181
+ - libgcc-ng >=12
1182
+ constrains:
1183
+ - pyopenssl >=22.1
1184
+ license: Apache-2.0
1185
+ license_family: Apache
1186
+ size: 3380844
1187
+ timestamp: 1710793424665
1188
+ - kind: conda
1189
+ name: openssl
1190
+ version: 3.2.1
1191
+ build: hcfcfb64_1
1192
+ build_number: 1
1193
+ subdir: win-64
1194
+ url: https://conda.anaconda.org/conda-forge/win-64/openssl-3.2.1-hcfcfb64_1.conda
1195
+ sha256: 61ce4e11c3c26ed4e4d9b7e7e2483121a1741ad0f9c8db0a91a28b6e05182ce6
1196
+ md5: 958e0418e93e50c575bff70fbcaa12d8
1197
+ depends:
1198
+ - ca-certificates
1199
+ - ucrt >=10.0.20348.0
1200
+ - vc >=14.2,<15
1201
+ - vc14_runtime >=14.29.30139
1202
+ constrains:
1203
+ - pyopenssl >=22.1
1204
+ license: Apache-2.0
1205
+ license_family: Apache
1206
+ size: 8230112
1207
+ timestamp: 1710796158475
1208
+ - kind: conda
1209
+ name: openssl
1210
+ version: 3.2.1
1211
+ build: hd590300_1
1212
+ build_number: 1
1213
+ subdir: linux-64
1214
+ url: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.2.1-hd590300_1.conda
1215
+ sha256: 2c689444ed19a603be457284cf2115ee728a3fafb7527326e96054dee7cdc1a7
1216
+ md5: 9d731343cff6ee2e5a25c4a091bf8e2a
1217
+ depends:
1218
+ - ca-certificates
1219
+ - libgcc-ng >=12
1220
+ constrains:
1221
+ - pyopenssl >=22.1
1222
+ license: Apache-2.0
1223
+ license_family: Apache
1224
+ size: 2865379
1225
+ timestamp: 1710793235846
1226
+ - kind: conda
1227
+ name: openssl
1228
+ version: 3.2.1
1229
+ build: hd75f5a5_1
1230
+ build_number: 1
1231
+ subdir: osx-64
1232
+ url: https://conda.anaconda.org/conda-forge/osx-64/openssl-3.2.1-hd75f5a5_1.conda
1233
+ sha256: 7ae0ac6a1673584a8a380c2ff3d46eca48ed53bc7174c0d4eaa0dd2f247a0984
1234
+ md5: 570a6f04802df580be529f3a72d2bbf7
1235
+ depends:
1236
+ - ca-certificates
1237
+ constrains:
1238
+ - pyopenssl >=22.1
1239
+ license: Apache-2.0
1240
+ license_family: Apache
1241
+ size: 2506344
1242
+ timestamp: 1710793930515
1243
+ - kind: conda
1244
+ name: pip
1245
+ version: '24.0'
1246
+ build: pyhd8ed1ab_0
1247
+ subdir: noarch
1248
+ noarch: python
1249
+ url: https://conda.anaconda.org/conda-forge/noarch/pip-24.0-pyhd8ed1ab_0.conda
1250
+ sha256: b7c1c5d8f13e8cb491c4bd1d0d1896a4cf80fc47de01059ad77509112b664a4a
1251
+ md5: f586ac1e56c8638b64f9c8122a7b8a67
1252
+ depends:
1253
+ - python >=3.7
1254
+ - setuptools
1255
+ - wheel
1256
+ license: MIT
1257
+ license_family: MIT
1258
+ size: 1398245
1259
+ timestamp: 1706960660581
1260
+ - kind: conda
1261
+ name: psutil
1262
+ version: 6.0.0
1263
+ build: py310h936d840_0
1264
+ subdir: osx-64
1265
+ url: https://conda.anaconda.org/conda-forge/osx-64/psutil-6.0.0-py310h936d840_0.conda
1266
+ sha256: c976819733772f63a1c8e704cb96bf4287c0eb477b10ba467be3adbe5974bf3a
1267
+ md5: 2f5a3bd97ce3176794b59c160ed51fba
1268
+ depends:
1269
+ - __osx >=10.13
1270
+ - python >=3.10,<3.11.0a0
1271
+ - python_abi 3.10.* *_cp310
1272
+ license: BSD-3-Clause
1273
+ license_family: BSD
1274
+ size: 378205
1275
+ timestamp: 1719274714245
1276
+ - kind: conda
1277
+ name: psutil
1278
+ version: 6.0.0
1279
+ build: py310ha6dd24b_0
1280
+ subdir: osx-arm64
1281
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-6.0.0-py310ha6dd24b_0.conda
1282
+ sha256: 7477bd84734668992cda9076147c5d07ce92f59c70441757a5b289401bd8ed85
1283
+ md5: 0e0df689b8c6ea6676b786bd78a575d1
1284
+ depends:
1285
+ - __osx >=11.0
1286
+ - python >=3.10,<3.11.0a0
1287
+ - python >=3.10,<3.11.0a0 *_cpython
1288
+ - python_abi 3.10.* *_cp310
1289
+ license: BSD-3-Clause
1290
+ license_family: BSD
1291
+ size: 379588
1292
+ timestamp: 1719274858964
1293
+ - kind: conda
1294
+ name: psutil
1295
+ version: 6.0.0
1296
+ build: py310ha8f682b_0
1297
+ subdir: win-64
1298
+ url: https://conda.anaconda.org/conda-forge/win-64/psutil-6.0.0-py310ha8f682b_0.conda
1299
+ sha256: 9801a18aa6fadd3a6286fd89e83fe6affbcb3ca275bb2a00ab0da299d32e92ad
1300
+ md5: 32f5673b7aa2309dda74ccd01822caca
1301
+ depends:
1302
+ - python >=3.10,<3.11.0a0
1303
+ - python_abi 3.10.* *_cp310
1304
+ - ucrt >=10.0.20348.0
1305
+ - vc >=14.2,<15
1306
+ - vc14_runtime >=14.29.30139
1307
+ license: BSD-3-Clause
1308
+ license_family: BSD
1309
+ size: 388249
1310
+ timestamp: 1719275165312
1311
+ - kind: conda
1312
+ name: psutil
1313
+ version: 6.0.0
1314
+ build: py310hb52b2da_0
1315
+ subdir: linux-aarch64
1316
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/psutil-6.0.0-py310hb52b2da_0.conda
1317
+ sha256: 8ba0f87bbcbbd9f73c9695522ffe2082f1562a356f59ead8b3d02ab17151fc52
1318
+ md5: a75da26171bc7400ed580382b5c18196
1319
+ depends:
1320
+ - libgcc-ng >=12
1321
+ - python >=3.10,<3.11.0a0
1322
+ - python >=3.10,<3.11.0a0 *_cpython
1323
+ - python_abi 3.10.* *_cp310
1324
+ license: BSD-3-Clause
1325
+ license_family: BSD
1326
+ size: 372806
1327
+ timestamp: 1719274753799
1328
+ - kind: conda
1329
+ name: psutil
1330
+ version: 6.0.0
1331
+ build: py310hc51659f_0
1332
+ subdir: linux-64
1333
+ url: https://conda.anaconda.org/conda-forge/linux-64/psutil-6.0.0-py310hc51659f_0.conda
1334
+ sha256: d23e0a2bf49a752fcc8267484c5eff3e5b267703853c11cc7b4f762412d0f7ef
1335
+ md5: b04405826f96f4eb2f502e642d121bb5
1336
+ depends:
1337
+ - libgcc-ng >=12
1338
+ - python >=3.10,<3.11.0a0
1339
+ - python_abi 3.10.* *_cp310
1340
+ license: BSD-3-Clause
1341
+ license_family: BSD
1342
+ size: 371633
1343
+ timestamp: 1719274668659
1344
+ - kind: conda
1345
+ name: pysocks
1346
+ version: 1.7.1
1347
+ build: pyh0701188_6
1348
+ build_number: 6
1349
+ subdir: noarch
1350
+ noarch: python
1351
+ url: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyh0701188_6.tar.bz2
1352
+ sha256: b3a612bc887f3dd0fb7c4199ad8e342bd148cf69a9b74fd9468a18cf2bef07b7
1353
+ md5: 56cd9fe388baac0e90c7149cfac95b60
1354
+ depends:
1355
+ - __win
1356
+ - python >=3.8
1357
+ - win_inet_pton
1358
+ license: BSD-3-Clause
1359
+ license_family: BSD
1360
+ size: 19348
1361
+ timestamp: 1661605138291
1362
+ - kind: conda
1363
+ name: pysocks
1364
+ version: 1.7.1
1365
+ build: pyha2e5f31_6
1366
+ build_number: 6
1367
+ subdir: noarch
1368
+ noarch: python
1369
+ url: https://conda.anaconda.org/conda-forge/noarch/pysocks-1.7.1-pyha2e5f31_6.tar.bz2
1370
+ sha256: a42f826e958a8d22e65b3394f437af7332610e43ee313393d1cf143f0a2d274b
1371
+ md5: 2a7de29fb590ca14b5243c4c812c8025
1372
+ depends:
1373
+ - __unix
1374
+ - python >=3.8
1375
+ license: BSD-3-Clause
1376
+ license_family: BSD
1377
+ size: 18981
1378
+ timestamp: 1661604969727
1379
+ - kind: conda
1380
+ name: python
1381
+ version: 3.10.14
1382
+ build: h00d2728_0_cpython
1383
+ subdir: osx-64
1384
+ url: https://conda.anaconda.org/conda-forge/osx-64/python-3.10.14-h00d2728_0_cpython.conda
1385
+ sha256: 00c1de2d46ede26609ef4e84a44b83be7876ba6a0215b7c83bff41a0656bf694
1386
+ md5: 0a1cddc4382c5c171e791c70740546dd
1387
+ depends:
1388
+ - bzip2 >=1.0.8,<2.0a0
1389
+ - libffi >=3.4,<4.0a0
1390
+ - libsqlite >=3.45.2,<4.0a0
1391
+ - libzlib >=1.2.13,<2.0.0a0
1392
+ - ncurses >=6.4.20240210,<7.0a0
1393
+ - openssl >=3.2.1,<4.0a0
1394
+ - readline >=8.2,<9.0a0
1395
+ - tk >=8.6.13,<8.7.0a0
1396
+ - tzdata
1397
+ - xz >=5.2.6,<6.0a0
1398
+ constrains:
1399
+ - python_abi 3.10.* *_cp310
1400
+ license: Python-2.0
1401
+ size: 11890228
1402
+ timestamp: 1710940046031
1403
+ - kind: conda
1404
+ name: python
1405
+ version: 3.10.14
1406
+ build: h2469fbe_0_cpython
1407
+ subdir: osx-arm64
1408
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.10.14-h2469fbe_0_cpython.conda
1409
+ sha256: 454d609fe25daedce9e886efcbfcadad103ed0362e7cb6d2bcddec90b1ecd3ee
1410
+ md5: 4ae999c8227c6d8c7623d32d51d25ea9
1411
+ depends:
1412
+ - bzip2 >=1.0.8,<2.0a0
1413
+ - libffi >=3.4,<4.0a0
1414
+ - libsqlite >=3.45.2,<4.0a0
1415
+ - libzlib >=1.2.13,<2.0.0a0
1416
+ - ncurses >=6.4.20240210,<7.0a0
1417
+ - openssl >=3.2.1,<4.0a0
1418
+ - readline >=8.2,<9.0a0
1419
+ - tk >=8.6.13,<8.7.0a0
1420
+ - tzdata
1421
+ - xz >=5.2.6,<6.0a0
1422
+ constrains:
1423
+ - python_abi 3.10.* *_cp310
1424
+ license: Python-2.0
1425
+ size: 12336005
1426
+ timestamp: 1710939659384
1427
+ - kind: conda
1428
+ name: python
1429
+ version: 3.10.14
1430
+ build: h4de0772_0_cpython
1431
+ subdir: win-64
1432
+ url: https://conda.anaconda.org/conda-forge/win-64/python-3.10.14-h4de0772_0_cpython.conda
1433
+ sha256: 332f97d9927b65857d6d2d4d50d66dce9b37da81edb67833ae6b88ad52acbd0c
1434
+ md5: 4a00e84f29d1eb418d84970598c444e1
1435
+ depends:
1436
+ - bzip2 >=1.0.8,<2.0a0
1437
+ - libffi >=3.4,<4.0a0
1438
+ - libsqlite >=3.45.2,<4.0a0
1439
+ - libzlib >=1.2.13,<2.0.0a0
1440
+ - openssl >=3.2.1,<4.0a0
1441
+ - tk >=8.6.13,<8.7.0a0
1442
+ - tzdata
1443
+ - vc >=14.1,<15
1444
+ - vc14_runtime >=14.16.27033
1445
+ - xz >=5.2.6,<6.0a0
1446
+ constrains:
1447
+ - python_abi 3.10.* *_cp310
1448
+ license: Python-2.0
1449
+ size: 15864027
1450
+ timestamp: 1710938888352
1451
+ - kind: conda
1452
+ name: python
1453
+ version: 3.10.14
1454
+ build: hbbe8eec_0_cpython
1455
+ subdir: linux-aarch64
1456
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/python-3.10.14-hbbe8eec_0_cpython.conda
1457
+ sha256: 992583064b95d256e1b1f03581a51e225a425894d865e35ea2bf3017444c3e84
1458
+ md5: 8a8ee3a8c62032c554debc785a3b5aba
1459
+ depends:
1460
+ - bzip2 >=1.0.8,<2.0a0
1461
+ - ld_impl_linux-aarch64 >=2.36.1
1462
+ - libffi >=3.4,<4.0a0
1463
+ - libgcc-ng >=12
1464
+ - libnsl >=2.0.1,<2.1.0a0
1465
+ - libsqlite >=3.45.2,<4.0a0
1466
+ - libuuid >=2.38.1,<3.0a0
1467
+ - libxcrypt >=4.4.36
1468
+ - libzlib >=1.2.13,<2.0.0a0
1469
+ - ncurses >=6.4.20240210,<7.0a0
1470
+ - openssl >=3.2.1,<4.0a0
1471
+ - readline >=8.2,<9.0a0
1472
+ - tk >=8.6.13,<8.7.0a0
1473
+ - tzdata
1474
+ - xz >=5.2.6,<6.0a0
1475
+ constrains:
1476
+ - python_abi 3.10.* *_cp310
1477
+ license: Python-2.0
1478
+ size: 13116477
1479
+ timestamp: 1710971217224
1480
+ - kind: conda
1481
+ name: python
1482
+ version: 3.10.14
1483
+ build: hd12c33a_0_cpython
1484
+ subdir: linux-64
1485
+ url: https://conda.anaconda.org/conda-forge/linux-64/python-3.10.14-hd12c33a_0_cpython.conda
1486
+ sha256: 76a5d12e73542678b70a94570f7b0f7763f9a938f77f0e75d9ea615ef22aa84c
1487
+ md5: 2b4ba962994e8bd4be9ff5b64b75aff2
1488
+ depends:
1489
+ - bzip2 >=1.0.8,<2.0a0
1490
+ - ld_impl_linux-64 >=2.36.1
1491
+ - libffi >=3.4,<4.0a0
1492
+ - libgcc-ng >=12
1493
+ - libnsl >=2.0.1,<2.1.0a0
1494
+ - libsqlite >=3.45.2,<4.0a0
1495
+ - libuuid >=2.38.1,<3.0a0
1496
+ - libxcrypt >=4.4.36
1497
+ - libzlib >=1.2.13,<2.0.0a0
1498
+ - ncurses >=6.4.20240210,<7.0a0
1499
+ - openssl >=3.2.1,<4.0a0
1500
+ - readline >=8.2,<9.0a0
1501
+ - tk >=8.6.13,<8.7.0a0
1502
+ - tzdata
1503
+ - xz >=5.2.6,<6.0a0
1504
+ constrains:
1505
+ - python_abi 3.10.* *_cp310
1506
+ license: Python-2.0
1507
+ size: 25517742
1508
+ timestamp: 1710939725109
1509
+ - kind: conda
1510
+ name: python_abi
1511
+ version: '3.10'
1512
+ build: 4_cp310
1513
+ build_number: 4
1514
+ subdir: linux-64
1515
+ url: https://conda.anaconda.org/conda-forge/linux-64/python_abi-3.10-4_cp310.conda
1516
+ sha256: 456bec815bfc2b364763084d08b412fdc4c17eb9ccc66a36cb775fa7ac3cbaec
1517
+ md5: 26322ec5d7712c3ded99dd656142b8ce
1518
+ constrains:
1519
+ - python 3.10.* *_cpython
1520
+ license: BSD-3-Clause
1521
+ license_family: BSD
1522
+ size: 6398
1523
+ timestamp: 1695147363189
1524
+ - kind: conda
1525
+ name: python_abi
1526
+ version: '3.10'
1527
+ build: 4_cp310
1528
+ build_number: 4
1529
+ subdir: linux-aarch64
1530
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/python_abi-3.10-4_cp310.conda
1531
+ sha256: 9191cc3ddf380b655c08b3436a8174ce0cc798a6dfcfa8ee80fa793d0b7165de
1532
+ md5: b0ff2ed109650f9e90d627d3119eb442
1533
+ constrains:
1534
+ - python 3.10.* *_cpython
1535
+ license: BSD-3-Clause
1536
+ license_family: BSD
1537
+ size: 6436
1538
+ timestamp: 1695147402616
1539
+ - kind: conda
1540
+ name: python_abi
1541
+ version: '3.10'
1542
+ build: 4_cp310
1543
+ build_number: 4
1544
+ subdir: osx-64
1545
+ url: https://conda.anaconda.org/conda-forge/osx-64/python_abi-3.10-4_cp310.conda
1546
+ sha256: abc26b3b5a62f9c8112a2303d24b0c590d5f7fc9470521f5a520472d59c2223e
1547
+ md5: b15c816c5a86abcc4d1458dd63aa4c65
1548
+ constrains:
1549
+ - python 3.10.* *_cpython
1550
+ license: BSD-3-Clause
1551
+ license_family: BSD
1552
+ size: 6484
1553
+ timestamp: 1695147705581
1554
+ - kind: conda
1555
+ name: python_abi
1556
+ version: '3.10'
1557
+ build: 4_cp310
1558
+ build_number: 4
1559
+ subdir: osx-arm64
1560
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/python_abi-3.10-4_cp310.conda
1561
+ sha256: f69bac2f28082a275ef67313968b2c366d8236c3a6869b9cdf5cdb97a5821812
1562
+ md5: 1a3d9c6bb5f0b1b22d9e9296c127e8c7
1563
+ constrains:
1564
+ - python 3.10.* *_cpython
1565
+ license: BSD-3-Clause
1566
+ license_family: BSD
1567
+ size: 6490
1568
+ timestamp: 1695147522999
1569
+ - kind: conda
1570
+ name: python_abi
1571
+ version: '3.10'
1572
+ build: 4_cp310
1573
+ build_number: 4
1574
+ subdir: win-64
1575
+ url: https://conda.anaconda.org/conda-forge/win-64/python_abi-3.10-4_cp310.conda
1576
+ sha256: 19066c462fd0e32c64503c688f77cb603beb4019b812caf855d03f2a5447960b
1577
+ md5: b41195997c14fb7473d26637ea4c3946
1578
+ constrains:
1579
+ - python 3.10.* *_cpython
1580
+ license: BSD-3-Clause
1581
+ license_family: BSD
1582
+ size: 6773
1583
+ timestamp: 1695147715814
1584
+ - kind: conda
1585
+ name: readline
1586
+ version: '8.2'
1587
+ build: h8228510_1
1588
+ build_number: 1
1589
+ subdir: linux-64
1590
+ url: https://conda.anaconda.org/conda-forge/linux-64/readline-8.2-h8228510_1.conda
1591
+ sha256: 5435cf39d039387fbdc977b0a762357ea909a7694d9528ab40f005e9208744d7
1592
+ md5: 47d31b792659ce70f470b5c82fdfb7a4
1593
+ depends:
1594
+ - libgcc-ng >=12
1595
+ - ncurses >=6.3,<7.0a0
1596
+ license: GPL-3.0-only
1597
+ license_family: GPL
1598
+ size: 281456
1599
+ timestamp: 1679532220005
1600
+ - kind: conda
1601
+ name: readline
1602
+ version: '8.2'
1603
+ build: h8fc344f_1
1604
+ build_number: 1
1605
+ subdir: linux-aarch64
1606
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/readline-8.2-h8fc344f_1.conda
1607
+ sha256: 4c99f7417419734e3797d45bc355e61c26520e111893b0d7087a01a7fbfbe3dd
1608
+ md5: 105eb1e16bf83bfb2eb380a48032b655
1609
+ depends:
1610
+ - libgcc-ng >=12
1611
+ - ncurses >=6.3,<7.0a0
1612
+ license: GPL-3.0-only
1613
+ license_family: GPL
1614
+ size: 294092
1615
+ timestamp: 1679532238805
1616
+ - kind: conda
1617
+ name: readline
1618
+ version: '8.2'
1619
+ build: h92ec313_1
1620
+ build_number: 1
1621
+ subdir: osx-arm64
1622
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.2-h92ec313_1.conda
1623
+ sha256: a1dfa679ac3f6007362386576a704ad2d0d7a02e98f5d0b115f207a2da63e884
1624
+ md5: 8cbb776a2f641b943d413b3e19df71f4
1625
+ depends:
1626
+ - ncurses >=6.3,<7.0a0
1627
+ license: GPL-3.0-only
1628
+ license_family: GPL
1629
+ size: 250351
1630
+ timestamp: 1679532511311
1631
+ - kind: conda
1632
+ name: readline
1633
+ version: '8.2'
1634
+ build: h9e318b2_1
1635
+ build_number: 1
1636
+ subdir: osx-64
1637
+ url: https://conda.anaconda.org/conda-forge/osx-64/readline-8.2-h9e318b2_1.conda
1638
+ sha256: 41e7d30a097d9b060037f0c6a2b1d4c4ae7e942c06c943d23f9d481548478568
1639
+ md5: f17f77f2acf4d344734bda76829ce14e
1640
+ depends:
1641
+ - ncurses >=6.3,<7.0a0
1642
+ license: GPL-3.0-only
1643
+ license_family: GPL
1644
+ size: 255870
1645
+ timestamp: 1679532707590
1646
+ - kind: conda
1647
+ name: ruff
1648
+ version: 0.3.7
1649
+ build: py310h298983d_0
1650
+ subdir: win-64
1651
+ url: https://conda.anaconda.org/conda-forge/win-64/ruff-0.3.7-py310h298983d_0.conda
1652
+ sha256: fd0766092fb869c3c591da324cb8e675e9cd01e220dc72b9f2b857e92337c01d
1653
+ md5: 9a5e1425ea8eac4f79a275c20d859cac
1654
+ depends:
1655
+ - python >=3.10,<3.11.0a0
1656
+ - python_abi 3.10.* *_cp310
1657
+ - ucrt >=10.0.20348.0
1658
+ - vc >=14.2,<15
1659
+ - vc14_runtime >=14.29.30139
1660
+ license: MIT
1661
+ license_family: MIT
1662
+ size: 6353965
1663
+ timestamp: 1712963138812
1664
+ - kind: conda
1665
+ name: ruff
1666
+ version: 0.3.7
1667
+ build: py310h3d77a66_0
1668
+ subdir: linux-64
1669
+ url: https://conda.anaconda.org/conda-forge/linux-64/ruff-0.3.7-py310h3d77a66_0.conda
1670
+ sha256: fc66dae77831b8110cd20ee257b462b4471204d310fc438d9760b769799969d9
1671
+ md5: 55b40e33fae0b983b48d2f7aff8a8978
1672
+ depends:
1673
+ - libgcc-ng >=12
1674
+ - libstdcxx-ng >=12
1675
+ - python >=3.10,<3.11.0a0
1676
+ - python_abi 3.10.* *_cp310
1677
+ license: MIT
1678
+ license_family: MIT
1679
+ size: 6402959
1680
+ timestamp: 1712962093948
1681
+ - kind: conda
1682
+ name: ruff
1683
+ version: 0.3.7
1684
+ build: py310h81561d7_0
1685
+ subdir: osx-arm64
1686
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/ruff-0.3.7-py310h81561d7_0.conda
1687
+ sha256: b7c88aae99ef73ffac7886e4dbb02428230b66284fa360134985bb1741f8d569
1688
+ md5: 4d922de656cdef20fb9fd47f0529bb95
1689
+ depends:
1690
+ - libcxx >=16
1691
+ - python >=3.10,<3.11.0a0
1692
+ - python >=3.10,<3.11.0a0 *_cpython
1693
+ - python_abi 3.10.* *_cp310
1694
+ constrains:
1695
+ - __osx >=11.0
1696
+ license: MIT
1697
+ license_family: MIT
1698
+ size: 5882969
1699
+ timestamp: 1712963596623
1700
+ - kind: conda
1701
+ name: ruff
1702
+ version: 0.3.7
1703
+ build: py310hdac29b7_0
1704
+ subdir: osx-64
1705
+ url: https://conda.anaconda.org/conda-forge/osx-64/ruff-0.3.7-py310hdac29b7_0.conda
1706
+ sha256: 1d279579eb3282973102f3c903ef21b3b033a8635d00d08ff40959871baf91dc
1707
+ md5: ab5f005a86062035d3eaf1adb081cc26
1708
+ depends:
1709
+ - libcxx >=16
1710
+ - python >=3.10,<3.11.0a0
1711
+ - python_abi 3.10.* *_cp310
1712
+ constrains:
1713
+ - __osx >=10.12
1714
+ license: MIT
1715
+ license_family: MIT
1716
+ size: 6192290
1717
+ timestamp: 1712963577697
1718
+ - kind: conda
1719
+ name: ruff
1720
+ version: 0.3.7
1721
+ build: py310hf6424b7_0
1722
+ subdir: linux-aarch64
1723
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/ruff-0.3.7-py310hf6424b7_0.conda
1724
+ sha256: 45cf4cfcaf97598959077d93e698db15aa17a7e215c5b050738367def0227e02
1725
+ md5: c6dcc788fee5fefe50165e9c0353d065
1726
+ depends:
1727
+ - libgcc-ng >=12
1728
+ - libstdcxx-ng >=12
1729
+ - python >=3.10,<3.11.0a0
1730
+ - python >=3.10,<3.11.0a0 *_cpython
1731
+ - python_abi 3.10.* *_cp310
1732
+ license: MIT
1733
+ license_family: MIT
1734
+ size: 6047252
1735
+ timestamp: 1712962189494
1736
+ - kind: conda
1737
+ name: setuptools
1738
+ version: 70.1.1
1739
+ build: pyhd8ed1ab_0
1740
+ subdir: noarch
1741
+ noarch: python
1742
+ url: https://conda.anaconda.org/conda-forge/noarch/setuptools-70.1.1-pyhd8ed1ab_0.conda
1743
+ sha256: 34ecbc63df6052a320838335a0e594b60050c92de79254045e52095bc27dde03
1744
+ md5: 985e9e86e1b0fc75a74a9bfab9309ef7
1745
+ depends:
1746
+ - python >=3.8
1747
+ license: MIT
1748
+ license_family: MIT
1749
+ size: 496940
1750
+ timestamp: 1719325175003
1751
+ - kind: conda
1752
+ name: tk
1753
+ version: 8.6.13
1754
+ build: h194ca79_0
1755
+ subdir: linux-aarch64
1756
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/tk-8.6.13-h194ca79_0.conda
1757
+ sha256: 7fa27cc512d3a783f38bd16bbbffc008807372499d5b65d089a8e43bde9db267
1758
+ md5: f75105e0585851f818e0009dd1dde4dc
1759
+ depends:
1760
+ - libgcc-ng >=12
1761
+ - libzlib >=1.2.13,<1.3.0a0
1762
+ license: TCL
1763
+ license_family: BSD
1764
+ size: 3351802
1765
+ timestamp: 1695506242997
1766
+ - kind: conda
1767
+ name: tk
1768
+ version: 8.6.13
1769
+ build: h1abcd95_1
1770
+ build_number: 1
1771
+ subdir: osx-64
1772
+ url: https://conda.anaconda.org/conda-forge/osx-64/tk-8.6.13-h1abcd95_1.conda
1773
+ sha256: 30412b2e9de4ff82d8c2a7e5d06a15f4f4fef1809a72138b6ccb53a33b26faf5
1774
+ md5: bf830ba5afc507c6232d4ef0fb1a882d
1775
+ depends:
1776
+ - libzlib >=1.2.13,<1.3.0a0
1777
+ license: TCL
1778
+ license_family: BSD
1779
+ size: 3270220
1780
+ timestamp: 1699202389792
1781
+ - kind: conda
1782
+ name: tk
1783
+ version: 8.6.13
1784
+ build: h5083fa2_1
1785
+ build_number: 1
1786
+ subdir: osx-arm64
1787
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h5083fa2_1.conda
1788
+ sha256: 72457ad031b4c048e5891f3f6cb27a53cb479db68a52d965f796910e71a403a8
1789
+ md5: b50a57ba89c32b62428b71a875291c9b
1790
+ depends:
1791
+ - libzlib >=1.2.13,<1.3.0a0
1792
+ license: TCL
1793
+ license_family: BSD
1794
+ size: 3145523
1795
+ timestamp: 1699202432999
1796
+ - kind: conda
1797
+ name: tk
1798
+ version: 8.6.13
1799
+ build: h5226925_1
1800
+ build_number: 1
1801
+ subdir: win-64
1802
+ url: https://conda.anaconda.org/conda-forge/win-64/tk-8.6.13-h5226925_1.conda
1803
+ sha256: 2c4e914f521ccb2718946645108c9bd3fc3216ba69aea20c2c3cedbd8db32bb1
1804
+ md5: fc048363eb8f03cd1737600a5d08aafe
1805
+ depends:
1806
+ - ucrt >=10.0.20348.0
1807
+ - vc >=14.2,<15
1808
+ - vc14_runtime >=14.29.30139
1809
+ license: TCL
1810
+ license_family: BSD
1811
+ size: 3503410
1812
+ timestamp: 1699202577803
1813
+ - kind: conda
1814
+ name: tk
1815
+ version: 8.6.13
1816
+ build: noxft_h4845f30_101
1817
+ build_number: 101
1818
+ subdir: linux-64
1819
+ url: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h4845f30_101.conda
1820
+ sha256: e0569c9caa68bf476bead1bed3d79650bb080b532c64a4af7d8ca286c08dea4e
1821
+ md5: d453b98d9c83e71da0741bb0ff4d76bc
1822
+ depends:
1823
+ - libgcc-ng >=12
1824
+ - libzlib >=1.2.13,<1.3.0a0
1825
+ license: TCL
1826
+ license_family: BSD
1827
+ size: 3318875
1828
+ timestamp: 1699202167581
1829
+ - kind: conda
1830
+ name: tomli
1831
+ version: 2.0.1
1832
+ build: pyhd8ed1ab_0
1833
+ subdir: noarch
1834
+ noarch: python
1835
+ url: https://conda.anaconda.org/conda-forge/noarch/tomli-2.0.1-pyhd8ed1ab_0.tar.bz2
1836
+ sha256: 4cd48aba7cd026d17e86886af48d0d2ebc67ed36f87f6534f4b67138f5a5a58f
1837
+ md5: 5844808ffab9ebdb694585b50ba02a96
1838
+ depends:
1839
+ - python >=3.7
1840
+ license: MIT
1841
+ license_family: MIT
1842
+ size: 15940
1843
+ timestamp: 1644342331069
1844
+ - kind: conda
1845
+ name: types-requests
1846
+ version: 2.31.0.20240406
1847
+ build: pyhd8ed1ab_0
1848
+ subdir: noarch
1849
+ noarch: python
1850
+ url: https://conda.anaconda.org/conda-forge/noarch/types-requests-2.31.0.20240406-pyhd8ed1ab_0.conda
1851
+ sha256: de93470fe64b2baa5f8ef16a6edf849bb93542f301ed343d0ab4d6fd6116d742
1852
+ md5: b4bc9b6dbc54191100b518a18be6045e
1853
+ depends:
1854
+ - python >=3.6
1855
+ - urllib3 >=2
1856
+ license: Apache-2.0 AND MIT
1857
+ size: 26072
1858
+ timestamp: 1712378106245
1859
+ - kind: conda
1860
+ name: typing_extensions
1861
+ version: 4.11.0
1862
+ build: pyha770c72_0
1863
+ subdir: noarch
1864
+ noarch: python
1865
+ url: https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.11.0-pyha770c72_0.conda
1866
+ sha256: a7e8714d14f854058e971a6ed44f18cc37cc685f98ddefb2e6b7899a0cc4d1a2
1867
+ md5: 6ef2fc37559256cf682d8b3375e89b80
1868
+ depends:
1869
+ - python >=3.8
1870
+ license: PSF-2.0
1871
+ license_family: PSF
1872
+ size: 37583
1873
+ timestamp: 1712330089194
1874
+ - kind: conda
1875
+ name: typos
1876
+ version: 1.20.9
1877
+ build: h11a7dfb_0
1878
+ subdir: osx-64
1879
+ url: https://conda.anaconda.org/conda-forge/osx-64/typos-1.20.9-h11a7dfb_0.conda
1880
+ sha256: c6345dad41706c3f925a3c76308e2af390ed3ce8eb92b84ebb82a37f2f599d1e
1881
+ md5: 9e8f7d03be6ae3eb50f14fddffc7fe51
1882
+ constrains:
1883
+ - __osx >=10.12
1884
+ license: MIT
1885
+ license_family: MIT
1886
+ size: 3347431
1887
+ timestamp: 1713321633495
1888
+ - kind: conda
1889
+ name: typos
1890
+ version: 1.20.9
1891
+ build: h1d8f897_0
1892
+ subdir: linux-aarch64
1893
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/typos-1.20.9-h1d8f897_0.conda
1894
+ sha256: d98e8e5fb65b4d09cff752534418acf3e835f702003636ef2f2165039ddbb521
1895
+ md5: 519bac2b977d194cc65f32c72bea5d12
1896
+ depends:
1897
+ - libgcc-ng >=12
1898
+ license: MIT
1899
+ license_family: MIT
1900
+ size: 3561452
1901
+ timestamp: 1713321791172
1902
+ - kind: conda
1903
+ name: typos
1904
+ version: 1.20.9
1905
+ build: h5ef7bb8_0
1906
+ subdir: osx-arm64
1907
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/typos-1.20.9-h5ef7bb8_0.conda
1908
+ sha256: 42e47f16457a6193658e3052c2dbabbdcb5759f79c8f83eb06eb3af91287c18e
1909
+ md5: f22e8b9559961907af67f4ec6d612456
1910
+ constrains:
1911
+ - __osx >=11.0
1912
+ license: MIT
1913
+ license_family: MIT
1914
+ size: 3331993
1915
+ timestamp: 1713321682918
1916
+ - kind: conda
1917
+ name: typos
1918
+ version: 1.20.9
1919
+ build: h7f3b576_0
1920
+ subdir: win-64
1921
+ url: https://conda.anaconda.org/conda-forge/win-64/typos-1.20.9-h7f3b576_0.conda
1922
+ sha256: ff9db9be0d78163b9b26ab6169e84d6026c05552a51bb335bbe62e0f4680cfd9
1923
+ md5: 9e6937e2784adfe9088d8ee3ba944a07
1924
+ depends:
1925
+ - m2w64-gcc-libs
1926
+ - m2w64-gcc-libs-core
1927
+ license: MIT
1928
+ license_family: MIT
1929
+ size: 2596487
1930
+ timestamp: 1713321776083
1931
+ - kind: conda
1932
+ name: typos
1933
+ version: 1.20.9
1934
+ build: he8a937b_0
1935
+ subdir: linux-64
1936
+ url: https://conda.anaconda.org/conda-forge/linux-64/typos-1.20.9-he8a937b_0.conda
1937
+ sha256: 2fc3888119f17b675edca08e1927f7f3445ec8c5ceb7841fc5dd6f20d8373fdd
1938
+ md5: fc85f19eb1191bb7e6bf34d4b2ec39e6
1939
+ depends:
1940
+ - libgcc-ng >=12
1941
+ license: MIT
1942
+ license_family: MIT
1943
+ size: 3728370
1944
+ timestamp: 1713320476655
1945
+ - kind: conda
1946
+ name: tzdata
1947
+ version: 2024a
1948
+ build: h0c530f3_0
1949
+ subdir: noarch
1950
+ noarch: generic
1951
+ url: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h0c530f3_0.conda
1952
+ sha256: 7b2b69c54ec62a243eb6fba2391b5e443421608c3ae5dbff938ad33ca8db5122
1953
+ md5: 161081fc7cec0bfda0d86d7cb595f8d8
1954
+ license: LicenseRef-Public-Domain
1955
+ size: 119815
1956
+ timestamp: 1706886945727
1957
+ - kind: conda
1958
+ name: ucrt
1959
+ version: 10.0.22621.0
1960
+ build: h57928b3_0
1961
+ subdir: win-64
1962
+ url: https://conda.anaconda.org/conda-forge/win-64/ucrt-10.0.22621.0-h57928b3_0.tar.bz2
1963
+ sha256: f29cdaf8712008f6b419b8b1a403923b00ab2504bfe0fb2ba8eb60e72d4f14c6
1964
+ md5: 72608f6cd3e5898229c3ea16deb1ac43
1965
+ constrains:
1966
+ - vs2015_runtime >=14.29.30037
1967
+ license: LicenseRef-Proprietary
1968
+ license_family: PROPRIETARY
1969
+ size: 1283972
1970
+ timestamp: 1666630199266
1971
+ - kind: conda
1972
+ name: urllib3
1973
+ version: 2.2.1
1974
+ build: pyhd8ed1ab_0
1975
+ subdir: noarch
1976
+ noarch: python
1977
+ url: https://conda.anaconda.org/conda-forge/noarch/urllib3-2.2.1-pyhd8ed1ab_0.conda
1978
+ sha256: d4009dcc9327684d6409706ce17656afbeae690d8522d3c9bc4df57649a352cd
1979
+ md5: 08807a87fa7af10754d46f63b368e016
1980
+ depends:
1981
+ - brotli-python >=1.0.9
1982
+ - pysocks >=1.5.6,<2.0,!=1.5.7
1983
+ - python >=3.7
1984
+ license: MIT
1985
+ license_family: MIT
1986
+ size: 94669
1987
+ timestamp: 1708239595549
1988
+ - kind: conda
1989
+ name: vc
1990
+ version: '14.3'
1991
+ build: hcf57466_18
1992
+ build_number: 18
1993
+ subdir: win-64
1994
+ url: https://conda.anaconda.org/conda-forge/win-64/vc-14.3-hcf57466_18.conda
1995
+ sha256: 447a8d8292a7b2107dcc18afb67f046824711a652725fc0f522c368e7a7b8318
1996
+ md5: 20e1e652a4c740fa719002a8449994a2
1997
+ depends:
1998
+ - vc14_runtime >=14.38.33130
1999
+ track_features:
2000
+ - vc14
2001
+ license: BSD-3-Clause
2002
+ license_family: BSD
2003
+ size: 16977
2004
+ timestamp: 1702511255313
2005
+ - kind: conda
2006
+ name: vc14_runtime
2007
+ version: 14.38.33130
2008
+ build: h82b7239_18
2009
+ build_number: 18
2010
+ subdir: win-64
2011
+ url: https://conda.anaconda.org/conda-forge/win-64/vc14_runtime-14.38.33130-h82b7239_18.conda
2012
+ sha256: bf94c9af4b2e9cba88207001197e695934eadc96a5c5e4cd7597e950aae3d8ff
2013
+ md5: 8be79fdd2725ddf7bbf8a27a4c1f79ba
2014
+ depends:
2015
+ - ucrt >=10.0.20348.0
2016
+ constrains:
2017
+ - vs2015_runtime 14.38.33130.* *_18
2018
+ license: LicenseRef-ProprietaryMicrosoft
2019
+ license_family: Proprietary
2020
+ size: 749868
2021
+ timestamp: 1702511239004
2022
+ - kind: conda
2023
+ name: vs2015_runtime
2024
+ version: 14.38.33130
2025
+ build: hcb4865c_18
2026
+ build_number: 18
2027
+ subdir: win-64
2028
+ url: https://conda.anaconda.org/conda-forge/win-64/vs2015_runtime-14.38.33130-hcb4865c_18.conda
2029
+ sha256: a2fec221f361d6263c117f4ea6d772b21c90a2f8edc6f3eb0eadec6bfe8843db
2030
+ md5: 10d42885e3ed84e575b454db30f1aa93
2031
+ depends:
2032
+ - vc14_runtime >=14.38.33130
2033
+ license: BSD-3-Clause
2034
+ license_family: BSD
2035
+ size: 16988
2036
+ timestamp: 1702511261442
2037
+ - kind: conda
2038
+ name: wheel
2039
+ version: 0.43.0
2040
+ build: pyhd8ed1ab_1
2041
+ build_number: 1
2042
+ subdir: noarch
2043
+ noarch: python
2044
+ url: https://conda.anaconda.org/conda-forge/noarch/wheel-0.43.0-pyhd8ed1ab_1.conda
2045
+ sha256: cb318f066afd6fd64619f14c030569faf3f53e6f50abf743b4c865e7d95b96bc
2046
+ md5: 0b5293a157c2b5cd513dd1b03d8d3aae
2047
+ depends:
2048
+ - python >=3.8
2049
+ license: MIT
2050
+ license_family: MIT
2051
+ size: 57963
2052
+ timestamp: 1711546009410
2053
+ - kind: conda
2054
+ name: win_inet_pton
2055
+ version: 1.1.0
2056
+ build: pyhd8ed1ab_6
2057
+ build_number: 6
2058
+ subdir: noarch
2059
+ noarch: python
2060
+ url: https://conda.anaconda.org/conda-forge/noarch/win_inet_pton-1.1.0-pyhd8ed1ab_6.tar.bz2
2061
+ sha256: a11ae693a0645bf6c7b8a47bac030be9c0967d0b1924537b9ff7458e832c0511
2062
+ md5: 30878ecc4bd36e8deeea1e3c151b2e0b
2063
+ depends:
2064
+ - __win
2065
+ - python >=3.6
2066
+ license: PUBLIC-DOMAIN
2067
+ size: 8191
2068
+ timestamp: 1667051294134
2069
+ - kind: conda
2070
+ name: xz
2071
+ version: 5.2.6
2072
+ build: h166bdaf_0
2073
+ subdir: linux-64
2074
+ url: https://conda.anaconda.org/conda-forge/linux-64/xz-5.2.6-h166bdaf_0.tar.bz2
2075
+ sha256: 03a6d28ded42af8a347345f82f3eebdd6807a08526d47899a42d62d319609162
2076
+ md5: 2161070d867d1b1204ea749c8eec4ef0
2077
+ depends:
2078
+ - libgcc-ng >=12
2079
+ license: LGPL-2.1 and GPL-2.0
2080
+ size: 418368
2081
+ timestamp: 1660346797927
2082
+ - kind: conda
2083
+ name: xz
2084
+ version: 5.2.6
2085
+ build: h57fd34a_0
2086
+ subdir: osx-arm64
2087
+ url: https://conda.anaconda.org/conda-forge/osx-arm64/xz-5.2.6-h57fd34a_0.tar.bz2
2088
+ sha256: 59d78af0c3e071021cfe82dc40134c19dab8cdf804324b62940f5c8cd71803ec
2089
+ md5: 39c6b54e94014701dd157f4f576ed211
2090
+ license: LGPL-2.1 and GPL-2.0
2091
+ size: 235693
2092
+ timestamp: 1660346961024
2093
+ - kind: conda
2094
+ name: xz
2095
+ version: 5.2.6
2096
+ build: h775f41a_0
2097
+ subdir: osx-64
2098
+ url: https://conda.anaconda.org/conda-forge/osx-64/xz-5.2.6-h775f41a_0.tar.bz2
2099
+ sha256: eb09823f34cc2dd663c0ec4ab13f246f45dcd52e5b8c47b9864361de5204a1c8
2100
+ md5: a72f9d4ea13d55d745ff1ed594747f10
2101
+ license: LGPL-2.1 and GPL-2.0
2102
+ size: 238119
2103
+ timestamp: 1660346964847
2104
+ - kind: conda
2105
+ name: xz
2106
+ version: 5.2.6
2107
+ build: h8d14728_0
2108
+ subdir: win-64
2109
+ url: https://conda.anaconda.org/conda-forge/win-64/xz-5.2.6-h8d14728_0.tar.bz2
2110
+ sha256: 54d9778f75a02723784dc63aff4126ff6e6749ba21d11a6d03c1f4775f269fe0
2111
+ md5: 515d77642eaa3639413c6b1bc3f94219
2112
+ depends:
2113
+ - vc >=14.1,<15
2114
+ - vs2015_runtime >=14.16.27033
2115
+ license: LGPL-2.1 and GPL-2.0
2116
+ size: 217804
2117
+ timestamp: 1660346976440
2118
+ - kind: conda
2119
+ name: xz
2120
+ version: 5.2.6
2121
+ build: h9cdd2b7_0
2122
+ subdir: linux-aarch64
2123
+ url: https://conda.anaconda.org/conda-forge/linux-aarch64/xz-5.2.6-h9cdd2b7_0.tar.bz2
2124
+ sha256: 93f58a7b393adf41fa007ac8c55978765e957e90cd31877ece1e5a343cb98220
2125
+ md5: 83baad393a31d59c20b63ba4da6592df
2126
+ depends:
2127
+ - libgcc-ng >=12
2128
+ license: LGPL-2.1 and GPL-2.0
2129
+ size: 440555
2130
+ timestamp: 1660348056328
pixi.toml ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Pixi is a package management tool for developers.
2
+ # Before running a task, pixi ensures that all listed dependencies are installed first.echop
3
+ #
4
+ # Pixi is not required for rerun, but it is a convenient way to install the
5
+ # dependencies required for this example.
6
+ #
7
+ # https://prefix.dev/docs/pixi/overview
8
+ #
9
+ # Use `pixi task list` to list the available tasks,
10
+ # and `pixi run TASK` to run it (e.g. `pixi run example`).
11
+
12
+ [project]
13
+ name = "rerun_vista_example"
14
+ authors = ["rerun.io <opensource@rerun.io>"]
15
+ channels = ["conda-forge"]
16
+ description = "Visualizing the Vista model with Rerun."
17
+ homepage = "https://rerun.io"
18
+ license = "MIT OR Apache-2.0"
19
+
20
+ platforms = ["linux-64", "linux-aarch64", "osx-arm64", "osx-64", "win-64"]
21
+ readme = "README.md"
22
+ repository = "https://github.com/rerun-io/hf-example-vista"
23
+ version = "0.1.0"
24
+
25
+
26
+ [tasks]
27
+ # ------------------------------------------------------------------------------------------
28
+ # Python stuff:
29
+
30
+ # Run first ruff fix, then ruff format, order is important see also https://twitter.com/charliermarsh/status/1717229721954799727
31
+ py-fmt = "ruff check --fix --config pyproject.toml . && ruff format --config pyproject.toml ."
32
+ py-fmt-check = "ruff check --config pyproject.toml . && ruff format --check --config pyproject.toml"
33
+ py-lint = "mypy --install-types --non-interactive --no-warn-unused-ignore"
34
+
35
+ # ------------------------------------------------------------------------------------------
36
+ # General stuff:
37
+ lint-typos = "typos"
38
+
39
+ # ------------------------------------------------------------------------------------------
40
+ install-dependencies = "pip install -r requirements.txt"
41
+
42
+ [tasks.example]
43
+ cmd = "python main.py"
44
+ depends_on = ["install-dependencies"]
45
+
46
+
47
+ [dependencies]
48
+ # Python stuff:
49
+ mypy = "1.8.0"
50
+ ruff = "0.3.7"
51
+ python = "3.10.*"
52
+ pip = ">=24.0,<25" # to install dependencies from requirements.txt
53
+
54
+ types-requests = ">=2.31,<3" # mypy type hint stubs for generate_changelog.py
55
+
56
+ # General stuff:
57
+ typos = ">=1.16.20"
pyproject.toml ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copied from https://github.com/rerun-io/rerun_template
2
+
3
+ [tool.ruff]
4
+ # https://beta.ruff.rs/docs/configuration/
5
+
6
+ target-version = "py38"
7
+
8
+ # Enable unsafe fixes to allow ruff to apply fixes that may change the behavior of the code.
9
+ # This is needed because otherwise ruff will not be able to trim whitespaces in (codegened) docstrings.
10
+ unsafe-fixes = true
11
+
12
+ # Allow preview lints to be enabled (like `PLW1514` to force `encoding` on open).
13
+ preview = true
14
+ # But we only want to opt-in to certain preview rules!
15
+ lint.explicit-preview-rules = true
16
+
17
+ extend-exclude = [
18
+ # Automatically generated test artifacts
19
+ "venv/",
20
+ "target/",
21
+ ]
22
+
23
+ lint.ignore = [
24
+ # These makes sense to ignore in example code, but for a proper library we should not ignore these.
25
+ "D100", # Missing docstring in public module
26
+ "D101", # Missing docstring in public class
27
+ "D103", # Missing docstring in public function
28
+
29
+ # No blank lines allowed after function docstring.
30
+ "D202",
31
+
32
+ # npydocstyle: http://www.pydocstyle.org/en/stable/error_codes.html
33
+ # numpy convention with a few additional lints
34
+ "D107",
35
+ "D203",
36
+ "D212",
37
+ "D401",
38
+ "D402",
39
+ "D415",
40
+ "D416",
41
+
42
+ # Ruff can't fix this error on its own (yet)
43
+ # Having ruff check this causes errors that prevent the code-formatting process from completing.
44
+ "E501",
45
+
46
+ # allow relative imports
47
+ "TID252",
48
+
49
+ "UP007", # We need this, or `ruff format` will convert `Union[X, Y]` to `X | Y` which break on Python 3.8
50
+ ]
51
+
52
+ line-length = 120
53
+ lint.select = [
54
+ "D", # pydocstyle codes https://www.pydocstyle.org/en/latest/error_codes.html
55
+ "E", # pycodestyle error codes: https://pycodestyle.pycqa.org/en/latest/intro.html#error-codes
56
+ "F", # Flake8 error codes https://flake8.pycqa.org/en/latest/user/error-codes.html
57
+ "I", # Isort
58
+ "TID", # flake8-tidy-imports
59
+ "W", # pycodestyle warning codes: https://pycodestyle.pycqa.org/en/latest/intro.html#error-codes
60
+ "UP", # pyupgrade (ensures idomatic code for supported python version)
61
+ "PLW1514", # Force setting `encoding` for open calls. This is in order to prevent issues when opening utf8 files on windows where the default encoding may depend on the active locale. https://docs.astral.sh/ruff/rules/unspecified-encoding/
62
+ ]
63
+
64
+ lint.unfixable = [
65
+ "PLW1514", # Automatic fix for `encoding` doesn't do what we want - it queries the locale for the preferred encoding which is exactly what we want to avoid.
66
+ ]
67
+
68
+ [tool.ruff.lint.per-file-ignores]
69
+ "__init__.py" = ["F401", "F403"]
70
+
71
+ [tool.ruff.lint.isort]
72
+ required-imports = ["from __future__ import annotations"]
requirements.txt ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==23.2.1
2
+ aiohttp==3.9.5
3
+ aiosignal==1.3.1
4
+ altair==5.3.0
5
+ annotated-types==0.7.0
6
+ antlr4-python3-runtime==4.9.3
7
+ anyio==4.4.0
8
+ async-timeout==4.0.3
9
+ attrs==23.2.0
10
+ black==23.7.0
11
+ blinker==1.8.2
12
+ braceexpand==0.1.7
13
+ cachetools==5.3.3
14
+ certifi==2024.6.2
15
+ chardet==5.1.0
16
+ charset-normalizer==3.3.2
17
+ click==8.1.7
18
+ clip @ git+https://github.com/openai/CLIP.git
19
+ cmake==3.29.5.1
20
+ contourpy==1.2.1
21
+ cycler==0.12.1
22
+ deepspeed
23
+ dnspython==2.6.1
24
+ docker-pycreds==0.4.0
25
+ einops==0.8.0
26
+ email_validator==2.1.1
27
+ exceptiongroup==1.2.1
28
+ fairscale==0.4.13
29
+ fastapi==0.111.0
30
+ fastapi-cli==0.0.4
31
+ ffmpy==0.3.2
32
+ filelock==3.15.1
33
+ fire==0.6.0
34
+ fonttools==4.53.0
35
+ frozenlist==1.4.1
36
+ fsspec==2024.6.0
37
+ ftfy==6.2.0
38
+ gitdb==4.0.11
39
+ GitPython==3.1.43
40
+ gradio==4.26.0
41
+ gradio_client==0.15.1
42
+ gradio_rerun==0.0.3
43
+ h11==0.14.0
44
+ hjson==3.1.0
45
+ httpcore==1.0.5
46
+ httptools==0.6.1
47
+ httpx==0.27.0
48
+ huggingface-hub==0.23.3
49
+ idna==3.7
50
+ imageio==2.31.1
51
+ imageio-ffmpeg==0.4.8
52
+ importlib_resources==6.4.0
53
+ invisible-watermark==0.2.0
54
+ jedi==0.19.1
55
+ Jinja2==3.1.4
56
+ jsonschema==4.22.0
57
+ jsonschema-specifications==2023.12.1
58
+ kiwisolver==1.4.5
59
+ kornia==0.7.2
60
+ kornia_rs==0.1.3
61
+ lightning-utilities==0.11.2
62
+ lit==18.1.7
63
+ markdown-it-py==3.0.0
64
+ MarkupSafe==2.1.5
65
+ matplotlib==3.9.0
66
+ mdurl==0.1.2
67
+ mpmath==1.3.0
68
+ multidict==6.0.5
69
+ mypy-extensions==1.0.0
70
+ natsort==8.4.0
71
+ networkx==3.3
72
+ ninja==1.11.1.1
73
+ numpy==1.26.4
74
+ nvidia-cublas-cu11==11.10.3.66
75
+ nvidia-cuda-cupti-cu11==11.7.101
76
+ nvidia-cuda-nvrtc-cu11==11.7.99
77
+ nvidia-cuda-runtime-cu11==11.7.99
78
+ nvidia-cudnn-cu11==8.5.0.96
79
+ nvidia-cufft-cu11==10.9.0.58
80
+ nvidia-curand-cu11==10.2.10.91
81
+ nvidia-cusolver-cu11==11.4.0.1
82
+ nvidia-cusparse-cu11==11.7.4.91
83
+ nvidia-ml-py==12.555.43
84
+ nvidia-nccl-cu11==2.14.3
85
+ nvidia-nvtx-cu11==11.7.91
86
+ omegaconf==2.3.0
87
+ open-clip-torch==2.24.0
88
+ opencv-python==4.6.0.66
89
+ orjson==3.10.4
90
+ packaging==24.1
91
+ pandas==2.2.2
92
+ parso==0.8.4
93
+ pathspec==0.12.1
94
+ pillow==10.3.0
95
+ platformdirs==4.2.2
96
+ protobuf==3.20.3
97
+ psutil==5.9.8
98
+ pudb==2024.1
99
+ py-cpuinfo==9.0.0
100
+ pyarrow==16.1.0
101
+ pydantic==2.7.4
102
+ pydantic_core==2.18.4
103
+ pydeck==0.9.1
104
+ pydub==0.25.1
105
+ Pygments==2.18.0
106
+ pyparsing==3.1.2
107
+ python-dateutil==2.9.0.post0
108
+ python-dotenv==1.0.1
109
+ python-multipart==0.0.9
110
+ pytorch-lightning==2.0.1
111
+ pytz==2024.1
112
+ PyWavelets==1.6.0
113
+ PyYAML==6.0.1
114
+ referencing==0.35.1
115
+ regex==2024.5.15
116
+ requests==2.32.3
117
+ rerun-sdk==0.16.1
118
+ rich==13.7.1
119
+ rpds-py==0.18.1
120
+ ruff==0.4.8
121
+ safetensors==0.4.3
122
+ scipy==1.13.1
123
+ semantic-version==2.10.0
124
+ sentencepiece==0.2.0
125
+ sentry-sdk==2.5.1
126
+ setproctitle==1.3.3
127
+ shellingham==1.5.4
128
+ six==1.16.0
129
+ smmap==5.0.1
130
+ sniffio==1.3.1
131
+ spaces==0.28.3
132
+ starlette==0.37.2
133
+ streamlit==1.35.0
134
+ sympy==1.12.1
135
+ tensorboardX==2.6
136
+ termcolor==2.4.0
137
+ timm==1.0.3
138
+ tokenizers==0.12.1
139
+ toml==0.10.2
140
+ tomli==2.0.1
141
+ tomlkit==0.12.0
142
+ toolz==0.12.1
143
+ torch==2.0.1
144
+ torchaudio==2.0.2
145
+ torchdata==0.6.1
146
+ torchmetrics==1.4.0.post0
147
+ torchvision==0.15.2
148
+ tornado==6.4.1
149
+ tqdm==4.66.4
150
+ transformers==4.19.1
151
+ triton==2.0.0
152
+ typer==0.12.3
153
+ typing_extensions==4.12.2
154
+ tzdata==2024.1
155
+ ujson==5.10.0
156
+ urllib3==1.26.18
157
+ urwid==2.6.14
158
+ urwid_readline==0.14
159
+ uvicorn==0.30.1
160
+ uvloop==0.19.0
161
+ wandb==0.17.1
162
+ watchdog==4.0.1
163
+ watchfiles==0.22.0
164
+ wcwidth==0.2.13
165
+ webdataset==0.2.86
166
+ websockets==11.0.3
167
+ xformers==0.0.22
168
+ yarl==1.9.4
scripts/template_update.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Copied from https://github.com/rerun-io/rerun_template
3
+
4
+ """
5
+ The script has two purposes.
6
+
7
+ After using `rerun_template` as a template, run this to clean out things you don't need.
8
+ Use `scripts/template_update.py init --languages cpp,rust,python` for this.
9
+
10
+ Update an existing repository with the latest changes from the template.
11
+ Use `scripts/template_update.py update --languages cpp,rust,python` for this.
12
+
13
+ In either case, make sure the list of languages matches the languages you want to support.
14
+ You can also use `--dry-run` to see what would happen without actually changing anything.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import argparse
20
+ import os
21
+ import shutil
22
+ import tempfile
23
+
24
+ from git import Repo # pip install GitPython
25
+
26
+ OWNER = "rerun-io"
27
+
28
+ # Don't overwrite these when updating existing repository from the template
29
+ DO_NOT_OVERWRITE = {
30
+ "Cargo.lock",
31
+ "CHANGELOG.md",
32
+ "main.py",
33
+ "pixi.lock",
34
+ "README.md",
35
+ "requirements.txt",
36
+ }
37
+
38
+ # Files required by C++, but not by _both_ Python and Rust
39
+ CPP_FILES = {
40
+ ".clang-format",
41
+ ".github/workflows/cpp.yml",
42
+ "CMakeLists.txt",
43
+ "pixi.lock", # Pixi is only C++ & Python - For Rust we only use cargo
44
+ "pixi.toml", # Pixi is only C++ & Python - For Rust we only use cargo
45
+ "src/",
46
+ "src/main.cpp",
47
+ }
48
+
49
+ # Files required by Python, but not by _both_ C++ and Rust
50
+ PYTHON_FILES = {
51
+ ".github/workflows/python.yml",
52
+ ".mypy.ini",
53
+ "main.py",
54
+ "pixi.lock", # Pixi is only C++ & Python - For Rust we only use cargo
55
+ "pixi.toml", # Pixi is only C++ & Python - For Rust we only use cargo
56
+ "pyproject.toml",
57
+ "requirements.txt",
58
+ }
59
+
60
+ # Files required by Rust, but not by _both_ C++ and Python
61
+ RUST_FILES = {
62
+ ".github/workflows/rust.yml",
63
+ "bacon.toml",
64
+ "Cargo.lock",
65
+ "Cargo.toml",
66
+ "CHANGELOG.md", # We only keep a changelog for Rust crates at the moment
67
+ "clippy.toml",
68
+ "Cranky.toml",
69
+ "deny.toml",
70
+ "rust-toolchain",
71
+ "scripts/clippy_wasm/",
72
+ "scripts/clippy_wasm/clippy.toml",
73
+ "scripts/generate_changelog.py", # We only keep a changelog for Rust crates at the moment
74
+ "src/",
75
+ "src/lib.rs",
76
+ "src/main.rs",
77
+ }
78
+
79
+ # Files we used to have, but have been removed in never version of rerun_template
80
+ DEAD_FILES = ["bacon.toml", "Cranky.toml"]
81
+
82
+
83
+ def parse_languages(lang_str: str) -> set[str]:
84
+ languages = lang_str.split(",") if lang_str else []
85
+ for lang in languages:
86
+ assert lang in ["cpp", "python", "rust"], f"Unsupported language: {lang}"
87
+ return set(languages)
88
+
89
+
90
+ def calc_deny_set(languages: set[str]) -> set[str]:
91
+ """The set of files to delete/ignore."""
92
+ files_to_delete = CPP_FILES | PYTHON_FILES | RUST_FILES
93
+ if "cpp" in languages:
94
+ files_to_delete -= CPP_FILES
95
+ if "python" in languages:
96
+ files_to_delete -= PYTHON_FILES
97
+ if "rust" in languages:
98
+ files_to_delete -= RUST_FILES
99
+ return files_to_delete
100
+
101
+
102
+ def init(languages: set[str], dry_run: bool) -> None:
103
+ print("Removing all language-specific files not needed for languages {languages}.")
104
+ files_to_delete = calc_deny_set(languages)
105
+ delete_files_and_folder(files_to_delete, dry_run)
106
+
107
+
108
+ def remove_file(filepath: str):
109
+ try:
110
+ os.remove(filepath)
111
+ except FileNotFoundError:
112
+ pass
113
+
114
+
115
+ def delete_files_and_folder(paths: set[str], dry_run: bool) -> None:
116
+ repo_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
117
+ for path in paths:
118
+ full_path = os.path.join(repo_path, path)
119
+ if os.path.exists(full_path):
120
+ if os.path.isfile(full_path):
121
+ print(f"Removing file {full_path}…")
122
+ if not dry_run:
123
+ remove_file(full_path)
124
+ elif os.path.isdir(full_path):
125
+ print(f"Removing folder {full_path}…")
126
+ if not dry_run:
127
+ shutil.rmtree(full_path)
128
+
129
+
130
+ def update(languages: set[str], dry_run: bool) -> None:
131
+ for file in DEAD_FILES:
132
+ print(f"Removing dead file {file}…")
133
+ if not dry_run:
134
+ remove_file(file)
135
+
136
+ files_to_ignore = calc_deny_set(languages) | DO_NOT_OVERWRITE
137
+ repo_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
138
+
139
+ with tempfile.TemporaryDirectory() as temp_dir:
140
+ Repo.clone_from("https://github.com/rerun-io/rerun_template.git", temp_dir)
141
+ for root, dirs, files in os.walk(temp_dir):
142
+ for file in files:
143
+ src_path = os.path.join(root, file)
144
+ rel_path = os.path.relpath(src_path, temp_dir)
145
+
146
+ if rel_path.startswith(".git/"):
147
+ continue
148
+ if rel_path.startswith("src/"):
149
+ continue
150
+ if rel_path in files_to_ignore:
151
+ continue
152
+
153
+ dest_path = os.path.join(repo_path, rel_path)
154
+
155
+ print(f"Updating {rel_path}…")
156
+ if not dry_run:
157
+ os.makedirs(os.path.dirname(dest_path), exist_ok=True)
158
+ shutil.copy2(src_path, dest_path)
159
+
160
+
161
+ def main() -> None:
162
+ parser = argparse.ArgumentParser(description="Handle the Rerun template.")
163
+ subparsers = parser.add_subparsers(dest="command")
164
+
165
+ init_parser = subparsers.add_parser("init", help="Initialize a new checkout of the template.")
166
+ init_parser.add_argument(
167
+ "--languages", default="", nargs="?", const="", help="The languages to support (e.g. `cpp,python,rust`)."
168
+ )
169
+ init_parser.add_argument("--dry-run", action="store_true", help="Don't actually delete any files.")
170
+
171
+ update_parser = subparsers.add_parser(
172
+ "update", help="Update all existing Rerun repositories with the latest changes from the template"
173
+ )
174
+ update_parser.add_argument(
175
+ "--languages", default="", nargs="?", const="", help="The languages to support (e.g. `cpp,python,rust`)."
176
+ )
177
+ update_parser.add_argument("--dry-run", action="store_true", help="Don't actually delete any files.")
178
+
179
+ args = parser.parse_args()
180
+
181
+ if args.command == "init":
182
+ init(parse_languages(args.languages), args.dry_run)
183
+ elif args.command == "update":
184
+ update(parse_languages(args.languages), args.dry_run)
185
+ else:
186
+ parser.print_help()
187
+ exit(1)
188
+
189
+
190
+ if __name__ == "__main__":
191
+ main()
style.css ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio-app {
2
+ max-width: 900px;
3
+ margin: auto;
4
+ }
vista/.gitignore ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
110
+ .pdm.toml
111
+ .pdm-python
112
+ .pdm-build/
113
+
114
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
+ __pypackages__/
116
+
117
+ # Celery stuff
118
+ celerybeat-schedule
119
+ celerybeat.pid
120
+
121
+ # SageMath parsed files
122
+ *.sage.py
123
+
124
+ # Environments
125
+ .env
126
+ .venv
127
+ env/
128
+ venv/
129
+ ENV/
130
+ env.bak/
131
+ venv.bak/
132
+
133
+ # Spyder project settings
134
+ .spyderproject
135
+ .spyproject
136
+
137
+ # Rope project settings
138
+ .ropeproject
139
+
140
+ # mkdocs documentation
141
+ /site
142
+
143
+ # mypy
144
+ .mypy_cache/
145
+ .dmypy.json
146
+ dmypy.json
147
+
148
+ # Pyre type checker
149
+ .pyre/
150
+
151
+ # pytype static type analyzer
152
+ .pytype/
153
+
154
+ # Cython debug symbols
155
+ cython_debug/
156
+
157
+ # PyCharm
158
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
161
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
+ .idea/
163
+
164
+ outputs/
165
+ logs/
166
+
167
+ .DS_Store
168
+ */.DS_Store
vista/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
vista/__init__.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import rerun.blueprint as rrb
4
+ import torch
5
+ from transformers.utils import hub
6
+
7
+ from . import sample, sample_utils
8
+
9
+
10
+ def create_model():
11
+ return sample_utils.init_model(
12
+ {
13
+ "config": "./vista/configs/inference/vista.yaml",
14
+ "ckpt": hub.get_file_from_repo("OpenDriveLab/Vista", "vista.safetensors"),
15
+ }
16
+ )
17
+
18
+
19
+ def generate_blueprint(n_rounds: int) -> rrb.Blueprint:
20
+ row1 = rrb.Horizontal(
21
+ *[
22
+ rrb.TensorView(origin=f"diffusion_{i}", name=f"Latents Segment {i+1}")
23
+ for i in range(n_rounds)
24
+ ],
25
+ )
26
+ row2 = rrb.Spatial2DView(origin="generated_image", name="Generated Video")
27
+
28
+ return rrb.Blueprint(rrb.Vertical(row1, row2), collapse_panels=True)
29
+
30
+
31
+ def run_sampling(
32
+ log_queue,
33
+ first_frame_file_name,
34
+ height,
35
+ width,
36
+ n_rounds,
37
+ n_frames,
38
+ n_steps,
39
+ cfg_scale,
40
+ cond_aug,
41
+ model=None,
42
+ ) -> None:
43
+ if model is None:
44
+ model = create_model()
45
+
46
+ unique_keys = set([x.input_key for x in model.conditioner.embedders])
47
+ value_dict = sample_utils.init_embedder_options(unique_keys)
48
+
49
+ action_dict = None
50
+
51
+ first_frame = sample.load_img(first_frame_file_name, height, width, "cuda")[None]
52
+ repeated_frame = first_frame.expand(n_frames, -1, -1, -1)
53
+
54
+ value_dict = sample_utils.init_embedder_options(unique_keys)
55
+ cond_img = first_frame
56
+ value_dict["cond_frames_without_noise"] = cond_img
57
+ value_dict["cond_aug"] = cond_aug
58
+ value_dict["cond_frames"] = cond_img + cond_aug * torch.randn_like(cond_img)
59
+ if action_dict is not None:
60
+ for key, value in action_dict.items():
61
+ value_dict[key] = value
62
+
63
+ if n_rounds > 1:
64
+ guider = "TrianglePredictionGuider"
65
+ else:
66
+ guider = "VanillaCFG"
67
+ sampler = sample_utils.init_sampling(
68
+ guider=guider,
69
+ steps=n_steps,
70
+ cfg_scale=cfg_scale,
71
+ num_frames=n_frames,
72
+ )
73
+
74
+ uc_keys = [
75
+ "cond_frames",
76
+ "cond_frames_without_noise",
77
+ "command",
78
+ "trajectory",
79
+ "speed",
80
+ "angle",
81
+ "goal",
82
+ ]
83
+
84
+ _generated_images, _samples_z, _inputs = sample_utils.do_sample(
85
+ repeated_frame,
86
+ model,
87
+ sampler,
88
+ value_dict,
89
+ num_rounds=n_rounds,
90
+ num_frames=n_frames,
91
+ force_uc_zero_embeddings=uc_keys,
92
+ initial_cond_indices=[0],
93
+ log_queue=log_queue,
94
+ )
95
+
96
+ log_queue.put("done")
vista/bin_to_st.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+
5
+ import torch
6
+ from safetensors.torch import save_file
7
+
8
+ ckpt = "path_to/pytorch_model.bin"
9
+
10
+ vista_bin = torch.load(ckpt, map_location="cpu") # only contains model weights
11
+
12
+ for k in list(vista_bin.keys()): # merge LoRA weights (if exist) for inference
13
+ if "adapter_down" in k:
14
+ if "q_adapter_down" in k:
15
+ up_k = k.replace("q_adapter_down", "q_adapter_up")
16
+ pretrain_k = k.replace("q_adapter_down", "to_q")
17
+ elif "k_adapter_down" in k:
18
+ up_k = k.replace("k_adapter_down", "k_adapter_up")
19
+ pretrain_k = k.replace("k_adapter_down", "to_k")
20
+ elif "v_adapter_down" in k:
21
+ up_k = k.replace("v_adapter_down", "v_adapter_up")
22
+ pretrain_k = k.replace("v_adapter_down", "to_v")
23
+ else:
24
+ up_k = k.replace("out_adapter_down", "out_adapter_up")
25
+ if "model_ema" in k:
26
+ pretrain_k = k.replace("out_adapter_down", "to_out0")
27
+ else:
28
+ pretrain_k = k.replace("out_adapter_down", "to_out.0")
29
+
30
+ lora_weights = vista_bin[up_k] @ vista_bin[k]
31
+ del vista_bin[k]
32
+ del vista_bin[up_k]
33
+ vista_bin[pretrain_k] = vista_bin[pretrain_k] + lora_weights
34
+
35
+ for k in list(vista_bin.keys()): # remove the prefix
36
+ if "_forward_module" in k and "decay" not in k and "num_updates" not in k:
37
+ vista_bin[k.replace("_forward_module.", "")] = vista_bin[k]
38
+ del vista_bin[k]
39
+
40
+ for k in list(vista_bin.keys()): # combine EMA weights
41
+ if "model_ema" in k:
42
+ orig_k = None
43
+ for kk in list(vista_bin.keys()):
44
+ if "model_ema" not in kk and k[10:] == kk[6:].replace(".", ""):
45
+ orig_k = kk
46
+ assert orig_k is not None
47
+ vista_bin[orig_k] = vista_bin[k]
48
+ del vista_bin[k]
49
+ print("Replace", orig_k, "with", k)
50
+
51
+ vista_st = dict()
52
+ for k in list(vista_bin.keys()):
53
+ vista_st[k] = vista_bin[k]
54
+
55
+ os.makedirs("ckpts", exist_ok=True)
56
+ save_file(vista_st, "ckpts/vista.safetensors")
vista/configs/example/nusc_train.yaml ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 5.e-5
3
+ target: vista.vwm.models.diffusion.DiffusionEngine
4
+ params:
5
+ use_ema: True
6
+ input_key: img_seq
7
+ scale_factor: 0.18215
8
+ disable_first_stage_autocast: True
9
+ en_and_decode_n_samples_a_time: 1
10
+ num_frames: &num_frames 25
11
+ slow_spatial_layers: True
12
+ train_peft_adapters: False
13
+ replace_cond_frames: &replace_cond_frames True
14
+ fixed_cond_frames: # only used for logging images
15
+ - [ 0, 1, 2 ]
16
+
17
+ denoiser_config:
18
+ target: vista.vwm.modules.diffusionmodules.denoiser.Denoiser
19
+ params:
20
+ num_frames: *num_frames
21
+
22
+ scaling_config:
23
+ target: vista.vwm.modules.diffusionmodules.denoiser_scaling.VScalingWithEDMcNoise
24
+
25
+ network_config:
26
+ target: vista.vwm.modules.diffusionmodules.video_model.VideoUNet
27
+ params:
28
+ adm_in_channels: 768
29
+ num_classes: sequential
30
+ use_checkpoint: True
31
+ in_channels: 8
32
+ out_channels: 4
33
+ model_channels: 320
34
+ attention_resolutions: [ 4, 2, 1 ]
35
+ num_res_blocks: 2
36
+ channel_mult: [ 1, 2, 4, 4 ]
37
+ num_head_channels: 64
38
+ use_linear_in_transformer: True
39
+ transformer_depth: 1
40
+ context_dim: 1024
41
+ spatial_transformer_attn_type: softmax-xformers
42
+ extra_ff_mix_layer: True
43
+ use_spatial_context: True
44
+ merge_strategy: learned_with_images
45
+ video_kernel_size: [ 3, 1, 1 ]
46
+ add_lora: False
47
+ action_control: False
48
+
49
+ conditioner_config:
50
+ target: vista.vwm.modules.GeneralConditioner
51
+ params:
52
+ emb_models:
53
+ - input_key: cond_frames_without_noise
54
+ is_trainable: False
55
+ ucg_rate: 0.15
56
+ target: vista.vwm.modules.encoders.modules.FrozenOpenCLIPImagePredictionEmbedder
57
+ params:
58
+ n_cond_frames: 1
59
+ n_copies: 1
60
+ open_clip_embedding_config:
61
+ target: vista.vwm.modules.encoders.modules.FrozenOpenCLIPImageEmbedder
62
+ params:
63
+ freeze: True
64
+
65
+ - input_key: fps_id
66
+ is_trainable: False
67
+ ucg_rate: 0.0
68
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
69
+ params:
70
+ outdim: 256
71
+
72
+ - input_key: motion_bucket_id
73
+ is_trainable: False
74
+ ucg_rate: 0.0
75
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
76
+ params:
77
+ outdim: 256
78
+
79
+ - input_key: cond_frames
80
+ is_trainable: False
81
+ ucg_rate: 0.15
82
+ target: vista.vwm.modules.encoders.modules.VideoPredictionEmbedderWithEncoder
83
+ params:
84
+ disable_encoder_autocast: True
85
+ n_cond_frames: 1
86
+ n_copies: 1
87
+ is_ae: True
88
+
89
+ encoder_config:
90
+ target: vista.vwm.models.autoencoder.AutoencoderKLModeOnly
91
+ params:
92
+ embed_dim: 4
93
+ monitor: val/rec_loss
94
+
95
+ ddconfig:
96
+ attn_type: vanilla-xformers
97
+ double_z: True
98
+ z_channels: 4
99
+ resolution: 256
100
+ in_channels: 3
101
+ out_ch: 3
102
+ ch: 128
103
+ ch_mult: [ 1, 2, 4, 4 ]
104
+ num_res_blocks: 2
105
+ attn_resolutions: [ ]
106
+ dropout: 0.0
107
+
108
+ loss_config:
109
+ target: torch.nn.Identity
110
+
111
+ - input_key: cond_aug
112
+ is_trainable: False
113
+ ucg_rate: 0.0
114
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
115
+ params:
116
+ outdim: 256
117
+
118
+ - input_key: command
119
+ is_trainable: False
120
+ ucg_rate: 0.15
121
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
122
+ params:
123
+ outdim: &action_emb_dim 128
124
+ num_features: 1
125
+ add_sequence_dim: True
126
+
127
+ - input_key: trajectory
128
+ is_trainable: False
129
+ ucg_rate: 0.15
130
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
131
+ params:
132
+ outdim: *action_emb_dim
133
+ num_features: 8
134
+ add_sequence_dim: True
135
+
136
+ - input_key: speed
137
+ is_trainable: False
138
+ ucg_rate: 0.15
139
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
140
+ params:
141
+ outdim: *action_emb_dim
142
+ num_features: 4
143
+ add_sequence_dim: True
144
+
145
+ - input_key: angle
146
+ is_trainable: False
147
+ ucg_rate: 0.15
148
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
149
+ params:
150
+ outdim: *action_emb_dim
151
+ num_features: 4
152
+ add_sequence_dim: True
153
+
154
+ - input_key: goal
155
+ is_trainable: False
156
+ ucg_rate: 0.15
157
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
158
+ params:
159
+ outdim: *action_emb_dim
160
+ num_features: 2
161
+ add_sequence_dim: True
162
+
163
+ first_stage_config:
164
+ target: vista.vwm.models.autoencoder.AutoencodingEngine
165
+ params:
166
+ loss_config:
167
+ target: torch.nn.Identity
168
+
169
+ regularizer_config:
170
+ target: vista.vwm.modules.autoencoding.regularizers.DiagonalGaussianRegularizer
171
+
172
+ encoder_config:
173
+ target: vista.vwm.modules.diffusionmodules.model.Encoder
174
+ params:
175
+ attn_type: vanilla
176
+ double_z: True
177
+ z_channels: 4
178
+ resolution: 256
179
+ in_channels: 3
180
+ out_ch: 3
181
+ ch: 128
182
+ ch_mult: [ 1, 2, 4, 4 ]
183
+ num_res_blocks: 2
184
+ attn_resolutions: [ ]
185
+ dropout: 0.0
186
+
187
+ decoder_config:
188
+ target: vista.vwm.modules.autoencoding.temporal_ae.VideoDecoder
189
+ params:
190
+ attn_type: vanilla
191
+ double_z: True
192
+ z_channels: 4
193
+ resolution: 256
194
+ in_channels: 3
195
+ out_ch: 3
196
+ ch: 128
197
+ ch_mult: [ 1, 2, 4, 4 ]
198
+ num_res_blocks: 2
199
+ attn_resolutions: [ ]
200
+ dropout: 0.0
201
+ video_kernel_size: [ 3, 1, 1 ]
202
+
203
+ scheduler_config:
204
+ target: vista.vwm.lr_scheduler.LambdaLinearScheduler
205
+ params:
206
+ warm_up_steps: [ 1000 ]
207
+ cycle_lengths: [ 10000000000000 ]
208
+ f_start: [ 1.e-6 ]
209
+ f_max: [ 1. ]
210
+ f_min: [ 1. ]
211
+
212
+ loss_fn_config:
213
+ target: vista.vwm.modules.diffusionmodules.loss.StandardDiffusionLoss
214
+ params:
215
+ use_additional_loss: True
216
+ offset_noise_level: 0.02
217
+ additional_loss_weight: 0.1
218
+ num_frames: *num_frames
219
+ replace_cond_frames: *replace_cond_frames
220
+ cond_frames_choices:
221
+ - [ ]
222
+ - [ 0 ]
223
+ - [ 0, 1 ]
224
+ - [ 0, 1, 2 ]
225
+
226
+ sigma_sampler_config:
227
+ target: vista.vwm.modules.diffusionmodules.sigma_sampling.EDMSampling
228
+ params:
229
+ p_mean: 1.0
230
+ p_std: 1.6
231
+ num_frames: *num_frames
232
+
233
+ loss_weighting_config:
234
+ target: vista.vwm.modules.diffusionmodules.loss_weighting.VWeighting
235
+
236
+ sampler_config:
237
+ target: vista.vwm.modules.diffusionmodules.sampling.EulerEDMSampler
238
+ params:
239
+ num_steps: 15
240
+
241
+ discretization_config:
242
+ target: vista.vwm.modules.diffusionmodules.discretizer.EDMDiscretization
243
+ params:
244
+ sigma_max: 700.0
245
+
246
+ guider_config:
247
+ target: vista.vwm.modules.diffusionmodules.guiders.LinearPredictionGuider
248
+ params:
249
+ num_frames: *num_frames
250
+ max_scale: 3.0
251
+ min_scale: 1.5
252
+
253
+ data:
254
+ target: vista.vwm.data.dataset.Sampler
255
+ params:
256
+ batch_size: 1
257
+ num_workers: 16
258
+ subsets:
259
+ - NuScenes
260
+ probs:
261
+ - 1
262
+ samples_per_epoch: 16000
263
+ target_height: 576
264
+ target_width: 1024
265
+ num_frames: *num_frames
266
+
267
+ lightning:
268
+ callbacks:
269
+ image_logger:
270
+ target: train.ImageLogger
271
+ params:
272
+ num_frames: *num_frames
273
+ disabled: False
274
+ enable_autocast: False
275
+ batch_frequency: 100
276
+ increase_log_steps: True
277
+ log_first_step: False
278
+ log_images_kwargs:
279
+ N: *num_frames
280
+
281
+ modelcheckpoint:
282
+ params:
283
+ every_n_epochs: 1 # every_n_train_steps: 5000, set the same as image_logger batch_frequency
284
+
285
+ trainer:
286
+ devices: 0,1
287
+ benchmark: True
288
+ num_sanity_val_steps: 0
289
+ accumulate_grad_batches: 1
290
+ max_epochs: 100
291
+ strategy: deepspeed_stage_2
292
+ gradient_clip_val: 0.3
vista/configs/inference/vista.yaml ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: vista.vwm.models.diffusion.DiffusionEngine
3
+ params:
4
+ input_key: img_seq
5
+ scale_factor: 0.18215
6
+ disable_first_stage_autocast: True
7
+ en_and_decode_n_samples_a_time: 1
8
+ num_frames: &num_frames 25
9
+
10
+ denoiser_config:
11
+ target: vista.vwm.modules.diffusionmodules.denoiser.Denoiser
12
+ params:
13
+ num_frames: *num_frames
14
+
15
+ scaling_config:
16
+ target: vista.vwm.modules.diffusionmodules.denoiser_scaling.VScalingWithEDMcNoise
17
+
18
+ network_config:
19
+ target: vista.vwm.modules.diffusionmodules.video_model.VideoUNet
20
+ params:
21
+ adm_in_channels: 768
22
+ num_classes: sequential
23
+ use_checkpoint: False
24
+ in_channels: 8
25
+ out_channels: 4
26
+ model_channels: 320
27
+ attention_resolutions: [ 4, 2, 1 ]
28
+ num_res_blocks: 2
29
+ channel_mult: [ 1, 2, 4, 4 ]
30
+ num_head_channels: 64
31
+ use_linear_in_transformer: True
32
+ transformer_depth: 1
33
+ context_dim: 1024
34
+ spatial_transformer_attn_type: softmax-xformers
35
+ extra_ff_mix_layer: True
36
+ use_spatial_context: True
37
+ merge_strategy: learned_with_images
38
+ video_kernel_size: [ 3, 1, 1 ]
39
+ add_lora: False
40
+ action_control: True
41
+
42
+ conditioner_config:
43
+ target: vista.vwm.modules.GeneralConditioner
44
+ params:
45
+ emb_models:
46
+ - input_key: cond_frames_without_noise
47
+ is_trainable: False
48
+ target: vista.vwm.modules.encoders.modules.FrozenOpenCLIPImagePredictionEmbedder
49
+ params:
50
+ n_cond_frames: 1
51
+ n_copies: 1
52
+ open_clip_embedding_config:
53
+ target: vista.vwm.modules.encoders.modules.FrozenOpenCLIPImageEmbedder
54
+ params:
55
+ freeze: True
56
+
57
+ - input_key: fps_id
58
+ is_trainable: False
59
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
60
+ params:
61
+ outdim: 256
62
+
63
+ - input_key: motion_bucket_id
64
+ is_trainable: False
65
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
66
+ params:
67
+ outdim: 256
68
+
69
+ - input_key: cond_frames
70
+ is_trainable: False
71
+ target: vista.vwm.modules.encoders.modules.VideoPredictionEmbedderWithEncoder
72
+ params:
73
+ disable_encoder_autocast: True
74
+ n_cond_frames: 1
75
+ n_copies: 1
76
+ is_ae: True
77
+
78
+ encoder_config:
79
+ target: vista.vwm.models.autoencoder.AutoencoderKLModeOnly
80
+ params:
81
+ embed_dim: 4
82
+ monitor: val/rec_loss
83
+
84
+ ddconfig:
85
+ attn_type: vanilla-xformers
86
+ double_z: True
87
+ z_channels: 4
88
+ resolution: 256
89
+ in_channels: 3
90
+ out_ch: 3
91
+ ch: 128
92
+ ch_mult: [ 1, 2, 4, 4 ]
93
+ num_res_blocks: 2
94
+ attn_resolutions: [ ]
95
+ dropout: 0.0
96
+
97
+ loss_config:
98
+ target: torch.nn.Identity
99
+
100
+ - input_key: cond_aug
101
+ is_trainable: False
102
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
103
+ params:
104
+ outdim: 256
105
+
106
+ - input_key: command
107
+ is_trainable: False
108
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
109
+ params:
110
+ outdim: &action_emb_dim 128
111
+ num_features: 1
112
+ add_sequence_dim: True
113
+
114
+ - input_key: trajectory
115
+ is_trainable: False
116
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
117
+ params:
118
+ outdim: *action_emb_dim
119
+ num_features: 8
120
+ add_sequence_dim: True
121
+
122
+ - input_key: speed
123
+ is_trainable: False
124
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
125
+ params:
126
+ outdim: *action_emb_dim
127
+ num_features: 4
128
+ add_sequence_dim: True
129
+
130
+ - input_key: angle
131
+ is_trainable: False
132
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
133
+ params:
134
+ outdim: *action_emb_dim
135
+ num_features: 4
136
+ add_sequence_dim: True
137
+
138
+ - input_key: goal
139
+ is_trainable: False
140
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
141
+ params:
142
+ outdim: *action_emb_dim
143
+ num_features: 2
144
+ add_sequence_dim: True
145
+
146
+ first_stage_config:
147
+ target: vista.vwm.models.autoencoder.AutoencodingEngine
148
+ params:
149
+ loss_config:
150
+ target: torch.nn.Identity
151
+
152
+ regularizer_config:
153
+ target: vista.vwm.modules.autoencoding.regularizers.DiagonalGaussianRegularizer
154
+
155
+ encoder_config:
156
+ target: vista.vwm.modules.diffusionmodules.model.Encoder
157
+ params:
158
+ attn_type: vanilla
159
+ double_z: True
160
+ z_channels: 4
161
+ resolution: 256
162
+ in_channels: 3
163
+ out_ch: 3
164
+ ch: 128
165
+ ch_mult: [ 1, 2, 4, 4 ]
166
+ num_res_blocks: 2
167
+ attn_resolutions: [ ]
168
+ dropout: 0.0
169
+
170
+ decoder_config:
171
+ target: vista.vwm.modules.autoencoding.temporal_ae.VideoDecoder
172
+ params:
173
+ attn_type: vanilla
174
+ double_z: True
175
+ z_channels: 4
176
+ resolution: 256
177
+ in_channels: 3
178
+ out_ch: 3
179
+ ch: 128
180
+ ch_mult: [ 1, 2, 4, 4 ]
181
+ num_res_blocks: 2
182
+ attn_resolutions: [ ]
183
+ dropout: 0.0
184
+ video_kernel_size: [ 3, 1, 1 ]
vista/configs/training/vista_phase1.yaml ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.e-5
3
+ target: vista.vwm.models.diffusion.DiffusionEngine
4
+ params:
5
+ use_ema: True
6
+ input_key: img_seq
7
+ scale_factor: 0.18215
8
+ disable_first_stage_autocast: True
9
+ en_and_decode_n_samples_a_time: 1
10
+ num_frames: &num_frames 25
11
+ slow_spatial_layers: True
12
+ train_peft_adapters: False
13
+ replace_cond_frames: &replace_cond_frames True
14
+ fixed_cond_frames: # only used for logging images
15
+ - [ 0, 1, 2 ]
16
+
17
+ denoiser_config:
18
+ target: vista.vwm.modules.diffusionmodules.denoiser.Denoiser
19
+ params:
20
+ num_frames: *num_frames
21
+
22
+ scaling_config:
23
+ target: vista.vwm.modules.diffusionmodules.denoiser_scaling.VScalingWithEDMcNoise
24
+
25
+ network_config:
26
+ target: vista.vwm.modules.diffusionmodules.video_model.VideoUNet
27
+ params:
28
+ adm_in_channels: 768
29
+ num_classes: sequential
30
+ use_checkpoint: True
31
+ in_channels: 8
32
+ out_channels: 4
33
+ model_channels: 320
34
+ attention_resolutions: [ 4, 2, 1 ]
35
+ num_res_blocks: 2
36
+ channel_mult: [ 1, 2, 4, 4 ]
37
+ num_head_channels: 64
38
+ use_linear_in_transformer: True
39
+ transformer_depth: 1
40
+ context_dim: 1024
41
+ spatial_transformer_attn_type: softmax-xformers
42
+ extra_ff_mix_layer: True
43
+ use_spatial_context: True
44
+ merge_strategy: learned_with_images
45
+ video_kernel_size: [ 3, 1, 1 ]
46
+ add_lora: False
47
+ action_control: False
48
+
49
+ conditioner_config:
50
+ target: vista.vwm.modules.GeneralConditioner
51
+ params:
52
+ emb_models:
53
+ - input_key: cond_frames_without_noise
54
+ is_trainable: False
55
+ ucg_rate: 0.15
56
+ target: vista.vwm.modules.encoders.modules.FrozenOpenCLIPImagePredictionEmbedder
57
+ params:
58
+ n_cond_frames: 1
59
+ n_copies: 1
60
+ open_clip_embedding_config:
61
+ target: vista.vwm.modules.encoders.modules.FrozenOpenCLIPImageEmbedder
62
+ params:
63
+ freeze: True
64
+
65
+ - input_key: fps_id
66
+ is_trainable: False
67
+ ucg_rate: 0.0
68
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
69
+ params:
70
+ outdim: 256
71
+
72
+ - input_key: motion_bucket_id
73
+ is_trainable: False
74
+ ucg_rate: 0.0
75
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
76
+ params:
77
+ outdim: 256
78
+
79
+ - input_key: cond_frames
80
+ is_trainable: False
81
+ ucg_rate: 0.15
82
+ target: vista.vwm.modules.encoders.modules.VideoPredictionEmbedderWithEncoder
83
+ params:
84
+ disable_encoder_autocast: True
85
+ n_cond_frames: 1
86
+ n_copies: 1
87
+ is_ae: True
88
+
89
+ encoder_config:
90
+ target: vista.vwm.models.autoencoder.AutoencoderKLModeOnly
91
+ params:
92
+ embed_dim: 4
93
+ monitor: val/rec_loss
94
+
95
+ ddconfig:
96
+ attn_type: vanilla-xformers
97
+ double_z: True
98
+ z_channels: 4
99
+ resolution: 256
100
+ in_channels: 3
101
+ out_ch: 3
102
+ ch: 128
103
+ ch_mult: [ 1, 2, 4, 4 ]
104
+ num_res_blocks: 2
105
+ attn_resolutions: [ ]
106
+ dropout: 0.0
107
+
108
+ loss_config:
109
+ target: torch.nn.Identity
110
+
111
+ - input_key: cond_aug
112
+ is_trainable: False
113
+ ucg_rate: 0.0
114
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
115
+ params:
116
+ outdim: 256
117
+
118
+ first_stage_config:
119
+ target: vista.vwm.models.autoencoder.AutoencodingEngine
120
+ params:
121
+ loss_config:
122
+ target: torch.nn.Identity
123
+
124
+ regularizer_config:
125
+ target: vista.vwm.modules.autoencoding.regularizers.DiagonalGaussianRegularizer
126
+
127
+ encoder_config:
128
+ target: vista.vwm.modules.diffusionmodules.model.Encoder
129
+ params:
130
+ attn_type: vanilla
131
+ double_z: True
132
+ z_channels: 4
133
+ resolution: 256
134
+ in_channels: 3
135
+ out_ch: 3
136
+ ch: 128
137
+ ch_mult: [ 1, 2, 4, 4 ]
138
+ num_res_blocks: 2
139
+ attn_resolutions: [ ]
140
+ dropout: 0.0
141
+
142
+ decoder_config:
143
+ target: vista.vwm.modules.autoencoding.temporal_ae.VideoDecoder
144
+ params:
145
+ attn_type: vanilla
146
+ double_z: True
147
+ z_channels: 4
148
+ resolution: 256
149
+ in_channels: 3
150
+ out_ch: 3
151
+ ch: 128
152
+ ch_mult: [ 1, 2, 4, 4 ]
153
+ num_res_blocks: 2
154
+ attn_resolutions: [ ]
155
+ dropout: 0.0
156
+ video_kernel_size: [ 3, 1, 1 ]
157
+
158
+ scheduler_config:
159
+ target: vista.vwm.lr_scheduler.LambdaLinearScheduler
160
+ params:
161
+ warm_up_steps: [ 1000 ]
162
+ cycle_lengths: [ 10000000000000 ]
163
+ f_start: [ 1.e-6 ]
164
+ f_max: [ 1. ]
165
+ f_min: [ 1. ]
166
+
167
+ loss_fn_config:
168
+ target: vista.vwm.modules.diffusionmodules.loss.StandardDiffusionLoss
169
+ params:
170
+ use_additional_loss: True
171
+ offset_noise_level: 0.02
172
+ additional_loss_weight: 0.1
173
+ num_frames: *num_frames
174
+ replace_cond_frames: *replace_cond_frames
175
+ cond_frames_choices:
176
+ - [ ]
177
+ - [ 0 ]
178
+ - [ 0, 1 ]
179
+ - [ 0, 1, 2 ]
180
+
181
+ sigma_sampler_config:
182
+ target: vista.vwm.modules.diffusionmodules.sigma_sampling.EDMSampling
183
+ params:
184
+ p_mean: 1.0
185
+ p_std: 1.6
186
+ num_frames: *num_frames
187
+
188
+ loss_weighting_config:
189
+ target: vista.vwm.modules.diffusionmodules.loss_weighting.VWeighting
190
+
191
+ sampler_config:
192
+ target: vista.vwm.modules.diffusionmodules.sampling.EulerEDMSampler
193
+ params:
194
+ num_steps: 15
195
+
196
+ discretization_config:
197
+ target: vista.vwm.modules.diffusionmodules.discretizer.EDMDiscretization
198
+ params:
199
+ sigma_max: 700.0
200
+
201
+ guider_config:
202
+ target: vista.vwm.modules.diffusionmodules.guiders.LinearPredictionGuider
203
+ params:
204
+ num_frames: *num_frames
205
+ max_scale: 3.0
206
+ min_scale: 1.5
207
+
208
+ data:
209
+ target: vista.vwm.data.dataset.Sampler
210
+ params:
211
+ batch_size: 1
212
+ num_workers: 16
213
+ subsets:
214
+ - YouTube
215
+ probs:
216
+ - 1
217
+ samples_per_epoch: 256000
218
+ target_height: 576
219
+ target_width: 1024
220
+ num_frames: *num_frames
221
+
222
+ lightning:
223
+ callbacks:
224
+ image_logger:
225
+ target: train.ImageLogger
226
+ params:
227
+ num_frames: *num_frames
228
+ disabled: False
229
+ enable_autocast: False
230
+ batch_frequency: 1000
231
+ increase_log_steps: True
232
+ log_first_step: False
233
+ log_images_kwargs:
234
+ N: *num_frames
235
+
236
+ modelcheckpoint:
237
+ params:
238
+ every_n_epochs: 1 # every_n_train_steps: 5000, set the same as image_logger batch_frequency
239
+
240
+ trainer:
241
+ devices: 0,1
242
+ benchmark: True
243
+ num_sanity_val_steps: 0
244
+ accumulate_grad_batches: 2
245
+ max_epochs: 100
246
+ strategy: deepspeed_stage_2
247
+ gradient_clip_val: 0.3
vista/configs/training/vista_phase2_stage1.yaml ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 5.e-5
3
+ target: vista.vwm.models.diffusion.DiffusionEngine
4
+ params:
5
+ use_ema: True
6
+ input_key: img_seq
7
+ scale_factor: 0.18215
8
+ disable_first_stage_autocast: True
9
+ en_and_decode_n_samples_a_time: 1
10
+ num_frames: &num_frames 25
11
+ slow_spatial_layers: False
12
+ train_peft_adapters: True
13
+ replace_cond_frames: &replace_cond_frames True
14
+ fixed_cond_frames: # only used for logging images
15
+ - [ 0 ]
16
+
17
+ denoiser_config:
18
+ target: vista.vwm.modules.diffusionmodules.denoiser.Denoiser
19
+ params:
20
+ num_frames: *num_frames
21
+
22
+ scaling_config:
23
+ target: vista.vwm.modules.diffusionmodules.denoiser_scaling.VScalingWithEDMcNoise
24
+
25
+ network_config:
26
+ target: vista.vwm.modules.diffusionmodules.video_model.VideoUNet
27
+ params:
28
+ adm_in_channels: 768
29
+ num_classes: sequential
30
+ use_checkpoint: True
31
+ in_channels: 8
32
+ out_channels: 4
33
+ model_channels: 320
34
+ attention_resolutions: [ 4, 2, 1 ]
35
+ num_res_blocks: 2
36
+ channel_mult: [ 1, 2, 4, 4 ]
37
+ num_head_channels: 64
38
+ use_linear_in_transformer: True
39
+ transformer_depth: 1
40
+ context_dim: 1024
41
+ spatial_transformer_attn_type: softmax-xformers
42
+ extra_ff_mix_layer: True
43
+ use_spatial_context: True
44
+ merge_strategy: learned_with_images
45
+ video_kernel_size: [ 3, 1, 1 ]
46
+ add_lora: True
47
+ action_control: True
48
+
49
+ conditioner_config:
50
+ target: vista.vwm.modules.GeneralConditioner
51
+ params:
52
+ emb_models:
53
+ - input_key: cond_frames_without_noise
54
+ is_trainable: False
55
+ ucg_rate: 0.15
56
+ target: vista.vwm.modules.encoders.modules.FrozenOpenCLIPImagePredictionEmbedder
57
+ params:
58
+ n_cond_frames: 1
59
+ n_copies: 1
60
+ open_clip_embedding_config:
61
+ target: vista.vwm.modules.encoders.modules.FrozenOpenCLIPImageEmbedder
62
+ params:
63
+ freeze: True
64
+
65
+ - input_key: fps_id
66
+ is_trainable: False
67
+ ucg_rate: 0.0
68
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
69
+ params:
70
+ outdim: 256
71
+
72
+ - input_key: motion_bucket_id
73
+ is_trainable: False
74
+ ucg_rate: 0.0
75
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
76
+ params:
77
+ outdim: 256
78
+
79
+ - input_key: cond_frames
80
+ is_trainable: False
81
+ ucg_rate: 0.15
82
+ target: vista.vwm.modules.encoders.modules.VideoPredictionEmbedderWithEncoder
83
+ params:
84
+ disable_encoder_autocast: True
85
+ n_cond_frames: 1
86
+ n_copies: 1
87
+ is_ae: True
88
+
89
+ encoder_config:
90
+ target: vista.vwm.models.autoencoder.AutoencoderKLModeOnly
91
+ params:
92
+ embed_dim: 4
93
+ monitor: val/rec_loss
94
+
95
+ ddconfig:
96
+ attn_type: vanilla-xformers
97
+ double_z: True
98
+ z_channels: 4
99
+ resolution: 256
100
+ in_channels: 3
101
+ out_ch: 3
102
+ ch: 128
103
+ ch_mult: [ 1, 2, 4, 4 ]
104
+ num_res_blocks: 2
105
+ attn_resolutions: [ ]
106
+ dropout: 0.0
107
+
108
+ loss_config:
109
+ target: torch.nn.Identity
110
+
111
+ - input_key: cond_aug
112
+ is_trainable: False
113
+ ucg_rate: 0.0
114
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
115
+ params:
116
+ outdim: 256
117
+
118
+ - input_key: command
119
+ is_trainable: False
120
+ ucg_rate: 0.15
121
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
122
+ params:
123
+ outdim: &action_emb_dim 128
124
+ num_features: 1
125
+ add_sequence_dim: True
126
+
127
+ - input_key: trajectory
128
+ is_trainable: False
129
+ ucg_rate: 0.15
130
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
131
+ params:
132
+ outdim: *action_emb_dim
133
+ num_features: 8
134
+ add_sequence_dim: True
135
+
136
+ - input_key: speed
137
+ is_trainable: False
138
+ ucg_rate: 0.15
139
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
140
+ params:
141
+ outdim: *action_emb_dim
142
+ num_features: 4
143
+ add_sequence_dim: True
144
+
145
+ - input_key: angle
146
+ is_trainable: False
147
+ ucg_rate: 0.15
148
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
149
+ params:
150
+ outdim: *action_emb_dim
151
+ num_features: 4
152
+ add_sequence_dim: True
153
+
154
+ - input_key: goal
155
+ is_trainable: False
156
+ ucg_rate: 0.15
157
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
158
+ params:
159
+ outdim: *action_emb_dim
160
+ num_features: 2
161
+ add_sequence_dim: True
162
+
163
+ first_stage_config:
164
+ target: vista.vwm.models.autoencoder.AutoencodingEngine
165
+ params:
166
+ loss_config:
167
+ target: torch.nn.Identity
168
+
169
+ regularizer_config:
170
+ target: vista.vwm.modules.autoencoding.regularizers.DiagonalGaussianRegularizer
171
+
172
+ encoder_config:
173
+ target: vista.vwm.modules.diffusionmodules.model.Encoder
174
+ params:
175
+ attn_type: vanilla
176
+ double_z: True
177
+ z_channels: 4
178
+ resolution: 256
179
+ in_channels: 3
180
+ out_ch: 3
181
+ ch: 128
182
+ ch_mult: [ 1, 2, 4, 4 ]
183
+ num_res_blocks: 2
184
+ attn_resolutions: [ ]
185
+ dropout: 0.0
186
+
187
+ decoder_config:
188
+ target: vista.vwm.modules.autoencoding.temporal_ae.VideoDecoder
189
+ params:
190
+ attn_type: vanilla
191
+ double_z: True
192
+ z_channels: 4
193
+ resolution: 256
194
+ in_channels: 3
195
+ out_ch: 3
196
+ ch: 128
197
+ ch_mult: [ 1, 2, 4, 4 ]
198
+ num_res_blocks: 2
199
+ attn_resolutions: [ ]
200
+ dropout: 0.0
201
+ video_kernel_size: [ 3, 1, 1 ]
202
+
203
+ scheduler_config:
204
+ target: vista.vwm.lr_scheduler.LambdaLinearScheduler
205
+ params:
206
+ warm_up_steps: [ 1000 ]
207
+ cycle_lengths: [ 10000000000000 ]
208
+ f_start: [ 1.e-6 ]
209
+ f_max: [ 1. ]
210
+ f_min: [ 1. ]
211
+
212
+ loss_fn_config:
213
+ target: vista.vwm.modules.diffusionmodules.loss.StandardDiffusionLoss
214
+ params:
215
+ use_additional_loss: True
216
+ offset_noise_level: 0.02
217
+ additional_loss_weight: 0.1
218
+ num_frames: *num_frames
219
+ replace_cond_frames: *replace_cond_frames
220
+ cond_frames_choices:
221
+ - [ ]
222
+ - [ 0 ]
223
+ - [ 0, 1 ]
224
+ - [ 0, 1, 2 ]
225
+
226
+ sigma_sampler_config:
227
+ target: vista.vwm.modules.diffusionmodules.sigma_sampling.EDMSampling
228
+ params:
229
+ p_mean: 1.0
230
+ p_std: 1.6
231
+ num_frames: *num_frames
232
+
233
+ loss_weighting_config:
234
+ target: vista.vwm.modules.diffusionmodules.loss_weighting.VWeighting
235
+
236
+ sampler_config:
237
+ target: vista.vwm.modules.diffusionmodules.sampling.EulerEDMSampler
238
+ params:
239
+ num_steps: 15
240
+
241
+ discretization_config:
242
+ target: vista.vwm.modules.diffusionmodules.discretizer.EDMDiscretization
243
+ params:
244
+ sigma_max: 700.0
245
+
246
+ guider_config:
247
+ target: vista.vwm.modules.diffusionmodules.guiders.LinearPredictionGuider
248
+ params:
249
+ num_frames: *num_frames
250
+ max_scale: 3.0
251
+ min_scale: 1.5
252
+
253
+ data:
254
+ target: vista.vwm.data.dataset.Sampler
255
+ params:
256
+ batch_size: 1
257
+ num_workers: 16
258
+ subsets:
259
+ - YouTube
260
+ - NuScenes
261
+ probs:
262
+ - 1
263
+ - 1
264
+ samples_per_epoch: 256000
265
+ target_height: 320
266
+ target_width: 576
267
+ num_frames: *num_frames
268
+
269
+ lightning:
270
+ callbacks:
271
+ image_logger:
272
+ target: train.ImageLogger
273
+ params:
274
+ num_frames: *num_frames
275
+ disabled: False
276
+ enable_autocast: False
277
+ batch_frequency: 1000
278
+ increase_log_steps: True
279
+ log_first_step: False
280
+ log_images_kwargs:
281
+ N: *num_frames
282
+
283
+ modelcheckpoint:
284
+ params:
285
+ every_n_epochs: 1 # every_n_train_steps: 5000, set the same as image_logger batch_frequency
286
+
287
+ trainer:
288
+ devices: 0,1
289
+ benchmark: True
290
+ num_sanity_val_steps: 0
291
+ accumulate_grad_batches: 1
292
+ max_epochs: 100
293
+ strategy: deepspeed_stage_2
294
+ gradient_clip_val: 0.3
vista/configs/training/vista_phase2_stage2.yaml ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 5.e-5
3
+ target: vista.vwm.models.diffusion.DiffusionEngine
4
+ params:
5
+ use_ema: True
6
+ input_key: img_seq
7
+ scale_factor: 0.18215
8
+ disable_first_stage_autocast: True
9
+ en_and_decode_n_samples_a_time: 1
10
+ num_frames: &num_frames 25
11
+ slow_spatial_layers: False
12
+ train_peft_adapters: True
13
+ replace_cond_frames: &replace_cond_frames True
14
+ fixed_cond_frames: # only used for logging images
15
+ - [ 0 ]
16
+
17
+ denoiser_config:
18
+ target: vista.vwm.modules.diffusionmodules.denoiser.Denoiser
19
+ params:
20
+ num_frames: *num_frames
21
+
22
+ scaling_config:
23
+ target: vista.vwm.modules.diffusionmodules.denoiser_scaling.VScalingWithEDMcNoise
24
+
25
+ network_config:
26
+ target: vista.vwm.modules.diffusionmodules.video_model.VideoUNet
27
+ params:
28
+ adm_in_channels: 768
29
+ num_classes: sequential
30
+ use_checkpoint: True
31
+ in_channels: 8
32
+ out_channels: 4
33
+ model_channels: 320
34
+ attention_resolutions: [ 4, 2, 1 ]
35
+ num_res_blocks: 2
36
+ channel_mult: [ 1, 2, 4, 4 ]
37
+ num_head_channels: 64
38
+ use_linear_in_transformer: True
39
+ transformer_depth: 1
40
+ context_dim: 1024
41
+ spatial_transformer_attn_type: softmax-xformers
42
+ extra_ff_mix_layer: True
43
+ use_spatial_context: True
44
+ merge_strategy: learned_with_images
45
+ video_kernel_size: [ 3, 1, 1 ]
46
+ add_lora: True
47
+ action_control: True
48
+
49
+ conditioner_config:
50
+ target: vista.vwm.modules.GeneralConditioner
51
+ params:
52
+ emb_models:
53
+ - input_key: cond_frames_without_noise
54
+ is_trainable: False
55
+ ucg_rate: 0.15
56
+ target: vista.vwm.modules.encoders.modules.FrozenOpenCLIPImagePredictionEmbedder
57
+ params:
58
+ n_cond_frames: 1
59
+ n_copies: 1
60
+ open_clip_embedding_config:
61
+ target: vista.vwm.modules.encoders.modules.FrozenOpenCLIPImageEmbedder
62
+ params:
63
+ freeze: True
64
+
65
+ - input_key: fps_id
66
+ is_trainable: False
67
+ ucg_rate: 0.0
68
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
69
+ params:
70
+ outdim: 256
71
+
72
+ - input_key: motion_bucket_id
73
+ is_trainable: False
74
+ ucg_rate: 0.0
75
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
76
+ params:
77
+ outdim: 256
78
+
79
+ - input_key: cond_frames
80
+ is_trainable: False
81
+ ucg_rate: 0.15
82
+ target: vista.vwm.modules.encoders.modules.VideoPredictionEmbedderWithEncoder
83
+ params:
84
+ disable_encoder_autocast: True
85
+ n_cond_frames: 1
86
+ n_copies: 1
87
+ is_ae: True
88
+
89
+ encoder_config:
90
+ target: vista.vwm.models.autoencoder.AutoencoderKLModeOnly
91
+ params:
92
+ embed_dim: 4
93
+ monitor: val/rec_loss
94
+
95
+ ddconfig:
96
+ attn_type: vanilla-xformers
97
+ double_z: True
98
+ z_channels: 4
99
+ resolution: 256
100
+ in_channels: 3
101
+ out_ch: 3
102
+ ch: 128
103
+ ch_mult: [ 1, 2, 4, 4 ]
104
+ num_res_blocks: 2
105
+ attn_resolutions: [ ]
106
+ dropout: 0.0
107
+
108
+ loss_config:
109
+ target: torch.nn.Identity
110
+
111
+ - input_key: cond_aug
112
+ is_trainable: False
113
+ ucg_rate: 0.0
114
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
115
+ params:
116
+ outdim: 256
117
+
118
+ - input_key: command
119
+ is_trainable: False
120
+ ucg_rate: 0.15
121
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
122
+ params:
123
+ outdim: &action_emb_dim 128
124
+ num_features: 1
125
+ add_sequence_dim: True
126
+
127
+ - input_key: trajectory
128
+ is_trainable: False
129
+ ucg_rate: 0.15
130
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
131
+ params:
132
+ outdim: *action_emb_dim
133
+ num_features: 8
134
+ add_sequence_dim: True
135
+
136
+ - input_key: speed
137
+ is_trainable: False
138
+ ucg_rate: 0.15
139
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
140
+ params:
141
+ outdim: *action_emb_dim
142
+ num_features: 4
143
+ add_sequence_dim: True
144
+
145
+ - input_key: angle
146
+ is_trainable: False
147
+ ucg_rate: 0.15
148
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
149
+ params:
150
+ outdim: *action_emb_dim
151
+ num_features: 4
152
+ add_sequence_dim: True
153
+
154
+ - input_key: goal
155
+ is_trainable: False
156
+ ucg_rate: 0.15
157
+ target: vista.vwm.modules.encoders.modules.ConcatTimestepEmbedderND
158
+ params:
159
+ outdim: *action_emb_dim
160
+ num_features: 2
161
+ add_sequence_dim: True
162
+
163
+ first_stage_config:
164
+ target: vista.vwm.models.autoencoder.AutoencodingEngine
165
+ params:
166
+ loss_config:
167
+ target: torch.nn.Identity
168
+
169
+ regularizer_config:
170
+ target: vista.vwm.modules.autoencoding.regularizers.DiagonalGaussianRegularizer
171
+
172
+ encoder_config:
173
+ target: vista.vwm.modules.diffusionmodules.model.Encoder
174
+ params:
175
+ attn_type: vanilla
176
+ double_z: True
177
+ z_channels: 4
178
+ resolution: 256
179
+ in_channels: 3
180
+ out_ch: 3
181
+ ch: 128
182
+ ch_mult: [ 1, 2, 4, 4 ]
183
+ num_res_blocks: 2
184
+ attn_resolutions: [ ]
185
+ dropout: 0.0
186
+
187
+ decoder_config:
188
+ target: vista.vwm.modules.autoencoding.temporal_ae.VideoDecoder
189
+ params:
190
+ attn_type: vanilla
191
+ double_z: True
192
+ z_channels: 4
193
+ resolution: 256
194
+ in_channels: 3
195
+ out_ch: 3
196
+ ch: 128
197
+ ch_mult: [ 1, 2, 4, 4 ]
198
+ num_res_blocks: 2
199
+ attn_resolutions: [ ]
200
+ dropout: 0.0
201
+ video_kernel_size: [ 3, 1, 1 ]
202
+
203
+ scheduler_config:
204
+ target: vista.vwm.lr_scheduler.LambdaLinearScheduler
205
+ params:
206
+ warm_up_steps: [ 1000 ]
207
+ cycle_lengths: [ 10000000000000 ]
208
+ f_start: [ 1.e-6 ]
209
+ f_max: [ 1. ]
210
+ f_min: [ 1. ]
211
+
212
+ loss_fn_config:
213
+ target: vista.vwm.modules.diffusionmodules.loss.StandardDiffusionLoss
214
+ params:
215
+ use_additional_loss: True
216
+ offset_noise_level: 0.02
217
+ additional_loss_weight: 0.1
218
+ num_frames: *num_frames
219
+ replace_cond_frames: *replace_cond_frames
220
+ cond_frames_choices:
221
+ - [ ]
222
+ - [ 0 ]
223
+ - [ 0, 1 ]
224
+ - [ 0, 1, 2 ]
225
+
226
+ sigma_sampler_config:
227
+ target: vista.vwm.modules.diffusionmodules.sigma_sampling.EDMSampling
228
+ params:
229
+ p_mean: 1.0
230
+ p_std: 1.6
231
+ num_frames: *num_frames
232
+
233
+ loss_weighting_config:
234
+ target: vista.vwm.modules.diffusionmodules.loss_weighting.VWeighting
235
+
236
+ sampler_config:
237
+ target: vista.vwm.modules.diffusionmodules.sampling.EulerEDMSampler
238
+ params:
239
+ num_steps: 15
240
+
241
+ discretization_config:
242
+ target: vista.vwm.modules.diffusionmodules.discretizer.EDMDiscretization
243
+ params:
244
+ sigma_max: 700.0
245
+
246
+ guider_config:
247
+ target: vista.vwm.modules.diffusionmodules.guiders.LinearPredictionGuider
248
+ params:
249
+ num_frames: *num_frames
250
+ max_scale: 3.0
251
+ min_scale: 1.5
252
+
253
+ data:
254
+ target: vista.vwm.data.dataset.Sampler
255
+ params:
256
+ batch_size: 1
257
+ num_workers: 16
258
+ subsets:
259
+ - YouTube
260
+ - NuScenes
261
+ probs:
262
+ - 1
263
+ - 1
264
+ samples_per_epoch: 256000
265
+ target_height: 576
266
+ target_width: 1024
267
+ num_frames: *num_frames
268
+
269
+ lightning:
270
+ callbacks:
271
+ image_logger:
272
+ target: train.ImageLogger
273
+ params:
274
+ num_frames: *num_frames
275
+ disabled: False
276
+ enable_autocast: False
277
+ batch_frequency: 1000
278
+ increase_log_steps: True
279
+ log_first_step: False
280
+ log_images_kwargs:
281
+ N: *num_frames
282
+
283
+ modelcheckpoint:
284
+ params:
285
+ every_n_epochs: 1 # every_n_train_steps: 5000, set the same as image_logger batch_frequency
286
+
287
+ trainer:
288
+ devices: 0,1
289
+ benchmark: True
290
+ num_sanity_val_steps: 0
291
+ accumulate_grad_batches: 1
292
+ max_epochs: 100
293
+ strategy: deepspeed_stage_2
294
+ gradient_clip_val: 0.3
vista/docs/INSTALL.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Installation
2
+
3
+ - ### Requirement
4
+
5
+ Our experiments are conducted with **PyTorch 2.0.1**, **CUDA 11.7**, **Ubuntu 22.04**, and **NVIDIA Tesla A100** (80 GB). For other requirements, please check [TRAINING.md](https://github.com/OpenDriveLab/Vista/blob/main/docs/TRAINING.md) and [SAMPLING.md](https://github.com/OpenDriveLab/Vista/blob/main/docs/SAMPLING.md).
6
+
7
+ - ### Preparation
8
+
9
+ Clone the repository to your local directory.
10
+
11
+ ```shell
12
+ git clone https://github.com/OpenDriveLab/Vista.git
13
+ ```
14
+
15
+ We provide an example on nuScenes dataset for training and sampling. Before you start, make sure you have:
16
+
17
+ - Downloaded the translated action annotations from [here](https://drive.google.com/drive/folders/1JpZObdR0OXagCbnPZfMSI8vhGLom5pht?usp=sharing) and put the JSON files into `annos`.
18
+
19
+ - Downloaded all splits of **Trainval** in **Full dataset (v1.0)** to your device following [official instructions](https://www.nuscenes.org/download). After downloading, it should contain:
20
+
21
+ ```
22
+ $<your-nusc-data-root>
23
+ ├── samples
24
+ ├── sweeps
25
+ ├── ...
26
+ └── v1.0-trainval
27
+ ```
28
+
29
+ - ### Installation
30
+
31
+ - We use conda to manage the environment.
32
+
33
+ ```shell
34
+ conda create -n vista python=3.9 -y
35
+ conda activate vista
36
+ ```
37
+
38
+ - Install dependencies.
39
+
40
+ ```shell
41
+ conda install -y pytorch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 pytorch-cuda=11.7 -c pytorch -c nvidia
42
+ pip3 install -r requirements.txt
43
+ pip3 install -e git+https://github.com/Stability-AI/datapipelines.git@main#egg=sdata
44
+ ```
45
+
46
+ ---
47
+
48
+ => Next: [[Training](https://github.com/OpenDriveLab/Vista/blob/main/docs/TRAINING.md)]
vista/docs/ISSUES.md ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Trouble Shooting
2
+
3
+ 1. #### Out of memory during sampling.
4
+
5
+ - Possible reason:
6
+ - Too many high-resolution frames for parallel decoding. The default setting will request ca. 66 GB peak VARM.
7
+
8
+ - Try this:
9
+ - Reduce the number of jointly decoded frames *en_and_decode_n_samples_a_time* in `inference/vista.yaml`.
10
+
11
+ 2. #### Get stuck at loading FrozenCLIPEmbedder or FrozenOpenCLIPImageEmbedder.
12
+
13
+ - Possible reason:
14
+ - A network failure.
15
+
16
+ - Try this:
17
+ 1. Download [openai/clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14/tree/main) and [laion/CLIP-ViT-H-14-laion2B-s32B-b79K](https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K/tree/main) in advance.
18
+ 2. Set *version* of FrozenCLIPEmbedder and FrozenOpenCLIPImageEmbedder in `vwm/modules/encoders/modules.py` to the new paths of `pytorch_model.bin`.
19
+
20
+ 3. #### Datasets not yet available during training.
21
+
22
+ - Possible reason:
23
+
24
+ - The installed [sdata](https://github.com/Stability-AI/datapipelines) is not detected.
25
+
26
+ - Try this:
27
+
28
+ - Reinstall in the current project directory.
29
+
30
+ ````shell
31
+ pip3 install -e git+https://github.com/Stability-AI/datapipelines.git@main#egg=sdata
32
+ ````
33
+
34
+ ---
35
+
36
+ <= Previous: [[Sampling](https://github.com/OpenDriveLab/Vista/blob/main/docs/SAMPLING.md)]
vista/docs/SAMPLING.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Sampling
2
+
3
+ - ### Requirement
4
+
5
+ Currently, we suggest using Nvidia GPUs with a minimum of **32 GB** VRAM for sampling. Check [ISSUES.md](https://github.com/OpenDriveLab/Vista/blob/main/docs/ISSUES.md) if you do not have enough memory.
6
+
7
+ - ### Preparation
8
+
9
+ Make sure you have downloaded `vista.safetensors` from [Hugging Face](https://huggingface.co/OpenDriveLab/Vista/blob/main/vista.safetensors) or [Google Drive](https://drive.google.com/file/d/1bCM7XLDquRqnnpauQAK5j1jP-n0y1ama/view). Move (or link) the checkpoint into `ckpts`.
10
+
11
+ - ### Future Prediction
12
+
13
+ - We provide a sampling example for nuScenes. Make sure to prepare the dataset as [INSTALL.md](https://github.com/OpenDriveLab/Vista/blob/main/docs/INSTALL.md) and replace the correct *data_root* in `sample.py`.
14
+
15
+ - Short-term action-free prediction.
16
+
17
+ ```shell
18
+ python sample.py
19
+ ```
20
+
21
+ - Long-term rollout.
22
+
23
+ ```shell
24
+ python sample.py --n_rounds 6
25
+ ```
26
+
27
+ - Action-conditioned simulation (take trajectory as an example).
28
+
29
+ ```shell
30
+ python sample.py --action traj
31
+ ```
32
+
33
+ > Make sure the loaded checkpoint strictly match all parameters. Otherwise, you may get a sequence of blur.
34
+
35
+ - Important arguments:
36
+
37
+ - `--dataset`: You can also customize the scenes by providing other driving views within a folder of images. They will serve as the initial frames for prediction when you set `--dataset` to "IMG".
38
+ - `--action`: The mode of control inputs. By default, we perform action-free prediction. You can try different actions using "traj", "cmd", "steer", or "goal". It will import ground truth actions (if available), but you can enforce any actions by making slight modifications.
39
+ - `--n_rounds`: The number of sampling rounds, which determines the duration to predict. You can increase it to perform long-horizon rollout. Each additional round extends the prediction by 2.3 seconds.
40
+ - `--n_steps`: The number of DDIM sampling steps, which can be reduced for efficiency.
41
+ - `--rand_gen`: Whether to generate samples randomly selected from the whole dataset or go through all samples one by one.
42
+ - `--low_vram`: Enable the low VRAM mode if you are using a GPU with less than 80 GB VRAM.
43
+
44
+ - ### Reward Estimation
45
+
46
+ - We provide a simplified example to estimate the rewards on nuScenes. Make sure to replace the correct *data_root* in `reward.py`.
47
+
48
+ ```shell
49
+ python reward.py
50
+ ```
51
+
52
+ - Important arguments:
53
+
54
+ - `--ens_size`: The number of samples to generate per case (initial frame and action condition).
55
+
56
+ ---
57
+
58
+ <= Previous: [[Training](https://github.com/OpenDriveLab/Vista/blob/main/docs/TRAINING.md)]
59
+
60
+ => Next: [[Trouble Shooting](https://github.com/OpenDriveLab/Vista/blob/main/docs/ISSUES.md)]
vista/docs/TRAINING.md ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Training
2
+
3
+ - ### Requirement
4
+
5
+ Nvidia GPUs with **80 GB** VRAM are required for training, but you can train low-resolution variants on smaller GPUs.
6
+
7
+ - ### Preparation
8
+
9
+ Download the pretrained `svd_xt.safetensors` from [Hugging Face](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/blob/main/svd_xt.safetensors) and place the checkpoint into `ckpts`.
10
+
11
+ - ### Training (example)
12
+
13
+ - We take **nuScenes** dataset as an example for training. After finishing the setups in [INSTALL.md](https://github.com/OpenDriveLab/Vista/blob/main/docs/INSTALL.md), remember to edit *data_root* in `vwm/data/subsets/nuscenes.py` to the proper path of nuScenes.
14
+
15
+ - We use DeepSpeed ZeRO stage 2 to improve data parallelism and lower memory footprint during training. The training can be launched as:
16
+
17
+ - Distributed training (suppose you train with 2 nodes, and each node has 8 GPUs).
18
+
19
+ ```shell
20
+ torchrun \
21
+ --nnodes=2 \
22
+ --nproc_per_node=8 \
23
+ train.py \
24
+ --base configs/example/nusc_train.yaml \
25
+ --num_nodes 2 \
26
+ --n_devices 8
27
+ ```
28
+
29
+ - Single GPU debugging (too slow, not recommended for training).
30
+
31
+ ```shell
32
+ python train.py --num_nodes 1 --n_devices 1
33
+ ```
34
+
35
+ > The training logs, including visualization samples and model checkpoints, will be saved in the project directory by default. Given that the size of checkpoints could be very large, you can set another directory to save these logs by providing an available path to `--logdir`.
36
+ >
37
+ > You can disable `--no_test` to test a bunch of samples for every checkpoint, but we recommend evaluating them offline for flexible comparison and uninterrupted training.
38
+
39
+ - After training, switch to the log directory with the model checkpoint. You should find a Python script named `zero_to_fp32.py` and a `checkpoint` folder that contains all partitioned checkpoints. The final checkpoint can be obtained by:
40
+
41
+ 1. [*if you only want to resume training*] Merge the partitioned checkpoints as `pytorch_model.bin` using `zero_to_fp32.py`.
42
+
43
+ ```shell
44
+ python zero_to_fp32.py . pytorch_model.bin
45
+ ```
46
+
47
+ 2. [*if you also want to do inference*] Navigate into the project root, and use `bin_to_st.py` to convert the resulting `path_to/pytorch_model.bin` to `ckpts/vista.safetensors`.
48
+
49
+ - ### Training of Vista
50
+
51
+ - Download **OpenDV-YouTube** dataset (or a part of it) from [DriveAGI](https://github.com/OpenDriveLab/DriveAGI#genad-dataset-opendv-youtube). You can refer to the structure in `vwm/data/subsets/youtube.py` to organize the dataset.
52
+
53
+ - #### Phase 1: learning high-fidelity future prediction
54
+
55
+ - This phase uses unlabeled OpenDV-YouTube for training.
56
+
57
+ - The model is trained at a resolution of 576x1024 on 128 GPUs for 20K iterations with gradient accumulation.
58
+
59
+ ```shell
60
+ torchrun \
61
+ --nnodes=16 \
62
+ --nproc_per_node=8 \
63
+ train.py \
64
+ --base configs/training/vista_phase1.yaml \
65
+ --num_nodes 16 \
66
+ --n_devices 8
67
+ ```
68
+
69
+ - We pause the training after the effect of dynamics priors can be witnessed. The last checkpoint is merged for the training of next phase.
70
+
71
+ - #### Phase 2: learning versatile action controllability
72
+
73
+ - This phase uses OpenDV-YouTube and nuScenes for collaborative training.
74
+
75
+ - ##### Stage 1: low-resolution training
76
+
77
+ - The model is finetuned at a resolution of 320x576 on 8 GPUs for 120K iterations.
78
+
79
+ ```shell
80
+ torchrun \
81
+ --nnodes=1 \
82
+ --nproc_per_node=8 \
83
+ train.py \
84
+ --base configs/training/vista_phase2_stage1.yaml \
85
+ --finetune ${PATH_TO_PHASE1_CKPT}/pytorch_model.bin \
86
+ --num_nodes 1 \
87
+ --n_devices 8
88
+ ```
89
+
90
+ - We pause the training after the controllability can be clearly witnessed. The last checkpoint is merged for the training of next stage.
91
+
92
+ - ##### Stage 2: high-resolution training
93
+
94
+ - The model is finetuned at a resolution of 576x1024 on 8 GPUs for another 10K iterations.
95
+
96
+ ```shell
97
+ torchrun \
98
+ --nnodes=1 \
99
+ --nproc_per_node=8 \
100
+ train.py \
101
+ --base configs/training/vista_phase2_stage2.yaml \
102
+ --finetune ${PATH_TO_STAGE1_CKPT}/pytorch_model.bin \
103
+ --num_nodes 1 \
104
+ --n_devices 8
105
+ ```
106
+
107
+ - We pause the training after the model adapt to the desired resolution. The last checkpoint is merged for application.
108
+
109
+ ---
110
+
111
+ <= Previous: [[Installation](https://github.com/OpenDriveLab/Vista/blob/main/docs/INSTALL.md)]
112
+
113
+ => Next: [[Sampling](https://github.com/OpenDriveLab/Vista/blob/main/docs/SAMPLING.md)]
vista/reward.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import argparse
4
+ import json
5
+ import random
6
+
7
+ from pytorch_lightning import seed_everything
8
+ from reward_utils import *
9
+ from torchvision import transforms
10
+
11
+ VERSION2SPECS = {
12
+ "vwm": {
13
+ "config": "configs/inference/vista.yaml",
14
+ "ckpt": "ckpts/vista.safetensors"
15
+ }
16
+ }
17
+
18
+ DATASET2SOURCES = {
19
+ "NUSCENES": {
20
+ "data_root": "data/nuscenes",
21
+ "anno_file": "annos/nuScenes_val.json"
22
+ },
23
+ "IMG": {
24
+ "data_root": "image_folder"
25
+ }
26
+ }
27
+
28
+
29
+ def parse_args(**parser_kwargs):
30
+ parser = argparse.ArgumentParser(**parser_kwargs)
31
+ parser.add_argument(
32
+ "--version",
33
+ type=str,
34
+ default="vwm",
35
+ help="model version"
36
+ )
37
+ parser.add_argument(
38
+ "--dataset",
39
+ type=str,
40
+ default="NUSCENES",
41
+ help="dataset name"
42
+ )
43
+ parser.add_argument(
44
+ "--save",
45
+ type=str,
46
+ default="outputs",
47
+ help="directory to save samples"
48
+ )
49
+ parser.add_argument(
50
+ "--action",
51
+ type=str,
52
+ default="traj",
53
+ help="action mode for control, such as traj, cmd, steer, goal"
54
+ )
55
+ parser.add_argument(
56
+ "--n_frames",
57
+ type=int,
58
+ default=25,
59
+ help="number of frames for each round"
60
+ )
61
+ parser.add_argument(
62
+ "--n_conds",
63
+ type=int,
64
+ default=1,
65
+ help="number of initial condition frames for the first round"
66
+ )
67
+ parser.add_argument(
68
+ "--ens_size",
69
+ type=int,
70
+ default=5,
71
+ help="number of samples per case"
72
+ )
73
+ parser.add_argument(
74
+ "--seed",
75
+ type=int,
76
+ default=23,
77
+ help="random seed for seed_everything"
78
+ )
79
+ parser.add_argument(
80
+ "--height",
81
+ type=int,
82
+ default=576,
83
+ help="target height of the generated video"
84
+ )
85
+ parser.add_argument(
86
+ "--width",
87
+ type=int,
88
+ default=1024,
89
+ help="target width of the generated video"
90
+ )
91
+ parser.add_argument(
92
+ "--cfg_scale",
93
+ type=float,
94
+ default=2.5,
95
+ help="scale of the classifier-free guidance"
96
+ )
97
+ parser.add_argument(
98
+ "--cond_aug",
99
+ type=float,
100
+ default=0.0,
101
+ help="strength of the noise augmentation"
102
+ )
103
+ parser.add_argument(
104
+ "--n_steps",
105
+ type=int,
106
+ default=10,
107
+ help="number of sampling steps"
108
+ )
109
+ parser.add_argument(
110
+ "--rand_gen",
111
+ action="store_false",
112
+ help="whether to generate samples randomly or sequentially"
113
+ )
114
+ parser.add_argument(
115
+ "--low_vram",
116
+ action="store_true",
117
+ help="whether to save memory or not"
118
+ )
119
+ return parser
120
+
121
+
122
+ def get_sample(selected_index=0, dataset_name="NUSCENES", num_frames=25, action_mode="free"):
123
+ dataset_dict = DATASET2SOURCES[dataset_name]
124
+ action_dict = None
125
+ if dataset_name == "IMG":
126
+ image_list = os.listdir(dataset_dict["data_root"])
127
+ total_length = len(image_list)
128
+ while selected_index >= total_length:
129
+ selected_index -= total_length
130
+ image_file = image_list[selected_index]
131
+
132
+ path_list = [os.path.join(dataset_dict["data_root"], image_file)] * num_frames
133
+ else:
134
+ with open(dataset_dict["anno_file"]) as anno_json:
135
+ all_samples = json.load(anno_json)
136
+ total_length = len(all_samples)
137
+ while selected_index >= total_length:
138
+ selected_index -= total_length
139
+ sample_dict = all_samples[selected_index]
140
+
141
+ path_list = list()
142
+ if dataset_name == "NUSCENES":
143
+ for index in range(num_frames):
144
+ image_path = os.path.join(dataset_dict["data_root"], sample_dict["frames"][index])
145
+ assert os.path.exists(image_path), image_path
146
+ path_list.append(image_path)
147
+ action_dict = dict()
148
+ if action_mode == "traj" or action_mode == "trajectory":
149
+ action_dict["trajectory"] = torch.tensor(sample_dict["traj"][2:])
150
+ elif action_mode == "cmd" or action_mode == "command":
151
+ action_dict["command"] = torch.tensor(sample_dict["cmd"])
152
+ elif action_mode == "steer":
153
+ # scene might be empty
154
+ if sample_dict["speed"]:
155
+ action_dict["speed"] = torch.tensor(sample_dict["speed"][1:])
156
+ # scene might be empty
157
+ if sample_dict["angle"]:
158
+ action_dict["angle"] = torch.tensor(sample_dict["angle"][1:]) / 780
159
+ elif action_mode == "goal":
160
+ # point might be invalid
161
+ if sample_dict["z"] > 0 and 0 < sample_dict["goal"][0] < 1600 and 0 < sample_dict["goal"][1] < 900:
162
+ action_dict["goal"] = torch.tensor([
163
+ sample_dict["goal"][0] / 1600,
164
+ sample_dict["goal"][1] / 900
165
+ ])
166
+ else:
167
+ raise ValueError(f"Unsupported action mode {action_mode}")
168
+ else:
169
+ raise ValueError(f"Invalid dataset {dataset_name}")
170
+ return path_list, selected_index, total_length, action_dict
171
+
172
+
173
+ def load_img(file_name, target_height=320, target_width=576, device="cuda"):
174
+ if file_name is not None:
175
+ image = Image.open(file_name)
176
+ if not image.mode == "RGB":
177
+ image = image.convert("RGB")
178
+ else:
179
+ raise ValueError(f"Invalid image file {file_name}")
180
+ ori_w, ori_h = image.size
181
+ # print(f"Loaded input image of size ({ori_w}, {ori_h})")
182
+
183
+ if ori_w / ori_h > target_width / target_height:
184
+ tmp_w = int(target_width / target_height * ori_h)
185
+ left = (ori_w - tmp_w) // 2
186
+ right = (ori_w + tmp_w) // 2
187
+ image = image.crop((left, 0, right, ori_h))
188
+ elif ori_w / ori_h < target_width / target_height:
189
+ tmp_h = int(target_height / target_width * ori_w)
190
+ top = (ori_h - tmp_h) // 2
191
+ bottom = (ori_h + tmp_h) // 2
192
+ image = image.crop((0, top, ori_w, bottom))
193
+ image = image.resize((target_width, target_height), resample=Image.LANCZOS)
194
+ if not image.mode == "RGB":
195
+ image = image.convert("RGB")
196
+ image = transforms.Compose([
197
+ transforms.ToTensor(),
198
+ transforms.Lambda(lambda x: x * 2.0 - 1.0)
199
+ ])(image)
200
+ return image.to(device)
201
+
202
+
203
+ if __name__ == "__main__":
204
+ parser = parse_args()
205
+ opt, unknown = parser.parse_known_args()
206
+
207
+ set_lowvram_mode(opt.low_vram)
208
+ version_dict = VERSION2SPECS[opt.version]
209
+ model = init_model(version_dict)
210
+ unique_keys = set([x.input_key for x in model.conditioner.embedders])
211
+
212
+ sample_index = 0
213
+ while sample_index >= 0:
214
+ seed_everything(opt.seed)
215
+
216
+ frame_list, sample_index, dataset_length, action_dict = get_sample(sample_index,
217
+ opt.dataset,
218
+ opt.n_frames,
219
+ opt.action)
220
+
221
+ img_seq = list()
222
+ for each_path in frame_list:
223
+ img = load_img(each_path, opt.height, opt.width)
224
+ img_seq.append(img)
225
+ images = torch.stack(img_seq)
226
+
227
+ value_dict = init_embedder_options(unique_keys)
228
+ cond_img = img_seq[0][None]
229
+ value_dict["cond_frames_without_noise"] = cond_img
230
+ value_dict["cond_aug"] = opt.cond_aug
231
+ value_dict["cond_frames"] = cond_img + opt.cond_aug * torch.randn_like(cond_img)
232
+ if action_dict is not None:
233
+ for key, value in action_dict.items():
234
+ value_dict[key] = value
235
+
236
+ guider = "VanillaCFG"
237
+ sampler = init_sampling(guider=guider, steps=opt.n_steps, cfg_scale=opt.cfg_scale, num_frames=opt.n_frames)
238
+
239
+ uc_keys = ["cond_frames", "cond_frames_without_noise", "command", "trajectory", "speed", "angle", "goal"]
240
+
241
+ out = do_sample(
242
+ images,
243
+ model,
244
+ sampler,
245
+ value_dict,
246
+ num_frames=opt.n_frames,
247
+ ensemble_size=opt.ens_size,
248
+ force_uc_zero_embeddings=uc_keys,
249
+ initial_cond_indices=[index for index in range(opt.n_conds)]
250
+ )
251
+
252
+ if isinstance(out, (tuple, list)):
253
+ inputs, reward = out
254
+ real_path = os.path.join(opt.save, "real")
255
+ perform_save_locally(real_path, inputs, "videos", opt.dataset, sample_index)
256
+ perform_save_locally(real_path, inputs, "grids", opt.dataset, sample_index)
257
+ perform_save_locally(real_path, inputs, "images", opt.dataset, sample_index)
258
+ else:
259
+ raise TypeError
260
+
261
+ if opt.rand_gen:
262
+ sample_index += random.randint(1, dataset_length - 1)
263
+ else:
264
+ sample_index += 1
265
+ if dataset_length <= sample_index:
266
+ sample_index = -1
vista/reward_utils.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import math
4
+ import os
5
+ from typing import Optional, Union
6
+
7
+ import numpy as np
8
+ import torch
9
+ import torchvision
10
+ from einops import rearrange, repeat
11
+ from omegaconf import ListConfig, OmegaConf
12
+ from PIL import Image
13
+ from safetensors.torch import load_file as load_safetensors
14
+ from torch import autocast
15
+ from train import save_img_seq_to_video
16
+ from vwm.modules.diffusionmodules.sampling import EulerEDMSampler
17
+ from vwm.util import default, instantiate_from_config
18
+
19
+
20
+ def init_model(version_dict, load_ckpt=True):
21
+ config = OmegaConf.load(version_dict["config"])
22
+ model = load_model_from_config(config, version_dict["ckpt"] if load_ckpt else None)
23
+ return model
24
+
25
+
26
+ lowvram_mode = True
27
+
28
+
29
+ def set_lowvram_mode(mode):
30
+ global lowvram_mode
31
+ lowvram_mode = mode
32
+
33
+
34
+ def initial_model_load(model):
35
+ global lowvram_mode
36
+ if lowvram_mode:
37
+ model.model.half()
38
+ else:
39
+ model.cuda()
40
+ return model
41
+
42
+
43
+ def load_model(model):
44
+ model.cuda()
45
+
46
+
47
+ def unload_model(model):
48
+ global lowvram_mode
49
+ if lowvram_mode:
50
+ model.cpu()
51
+ torch.cuda.empty_cache()
52
+
53
+
54
+ def load_model_from_config(config, ckpt=None):
55
+ model = instantiate_from_config(config.model)
56
+
57
+ if ckpt is not None:
58
+ print(f"Loading model from {ckpt}")
59
+ if ckpt.endswith("ckpt"):
60
+ pl_svd = torch.load(ckpt, map_location="cpu")
61
+ # dict contains:
62
+ # "epoch", "global_step", "pytorch-lightning_version",
63
+ # "state_dict", "loops", "callbacks", "optimizer_states", "lr_schedulers"
64
+ if "global_step" in pl_svd:
65
+ print(f"Global step: {pl_svd['global_step']}")
66
+ svd = pl_svd["state_dict"]
67
+ elif ckpt.endswith("safetensors"):
68
+ svd = load_safetensors(ckpt)
69
+ else:
70
+ raise NotImplementedError("Please convert the checkpoint to safetensors first")
71
+
72
+ missing, unexpected = model.load_state_dict(svd, strict=False)
73
+ if len(missing) > 0:
74
+ print(f"Missing keys: {missing}")
75
+ if len(unexpected) > 0:
76
+ print(f"Unexpected keys: {unexpected}")
77
+
78
+ model = initial_model_load(model)
79
+ model.eval()
80
+ return model
81
+
82
+
83
+ def init_embedder_options(keys):
84
+ # hardcoded demo settings, might undergo some changes in the future
85
+ value_dict = dict()
86
+ for key in keys:
87
+ if key in ["fps_id", "fps"]:
88
+ fps = 10
89
+ value_dict["fps"] = fps
90
+ value_dict["fps_id"] = fps - 1
91
+ elif key == "motion_bucket_id":
92
+ value_dict["motion_bucket_id"] = 127 # [0, 511]
93
+ return value_dict
94
+
95
+
96
+ def perform_save_locally(save_path, samples, mode, dataset_name, sample_index):
97
+ assert mode in ["images", "grids", "videos"]
98
+ merged_path = os.path.join(save_path, mode)
99
+ os.makedirs(merged_path, exist_ok=True)
100
+ samples = samples.cpu()
101
+
102
+ if mode == "images":
103
+ frame_count = 0
104
+ for sample in samples:
105
+ sample = rearrange(sample.numpy(), "c h w -> h w c")
106
+ if "real" in save_path:
107
+ sample = 255.0 * (sample + 1.0) / 2.0
108
+ else:
109
+ sample = 255.0 * sample
110
+ image_save_path = os.path.join(merged_path, f"{dataset_name}_{sample_index:06}_{frame_count:04}.png")
111
+ # if os.path.exists(image_save_path):
112
+ # return
113
+ Image.fromarray(sample.astype(np.uint8)).save(image_save_path)
114
+ frame_count += 1
115
+ elif mode == "grids":
116
+ grid = torchvision.utils.make_grid(samples, nrow=int(samples.shape[0] ** 0.5))
117
+ grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1).numpy()
118
+ if "real" in save_path:
119
+ grid = 255.0 * (grid + 1.0) / 2.0
120
+ else:
121
+ grid = 255.0 * grid
122
+ grid_save_path = os.path.join(merged_path, f"{dataset_name}_{sample_index:06}.png")
123
+ # if os.path.exists(grid_save_path):
124
+ # return
125
+ Image.fromarray(grid.astype(np.uint8)).save(grid_save_path)
126
+ elif mode == "videos":
127
+ img_seq = rearrange(samples.numpy(), "t c h w -> t h w c")
128
+ if "real" in save_path:
129
+ img_seq = 255.0 * (img_seq + 1.0) / 2.0
130
+ else:
131
+ img_seq = 255.0 * img_seq
132
+ video_save_path = os.path.join(merged_path, f"{dataset_name}_{sample_index:06}.mp4")
133
+ # if os.path.exists(video_save_path):
134
+ # return
135
+ save_img_seq_to_video(video_save_path, img_seq.astype(np.uint8), 10)
136
+ else:
137
+ raise NotImplementedError
138
+
139
+
140
+ def init_sampling(sampler="EulerEDMSampler", guider="VanillaCFG", discretization="EDMDiscretization",
141
+ steps=50, cfg_scale=2.5, num_frames=25):
142
+ discretization_config = get_discretization(discretization)
143
+ guider_config = get_guider(guider, cfg_scale, num_frames)
144
+ sampler = get_sampler(sampler, steps, discretization_config, guider_config)
145
+ return sampler
146
+
147
+
148
+ def get_discretization(discretization):
149
+ if discretization == "LegacyDDPMDiscretization":
150
+ discretization_config = {
151
+ "target": "vista.vwm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization"
152
+ }
153
+ elif discretization == "EDMDiscretization":
154
+ discretization_config = {
155
+ "target": "vista.vwm.modules.diffusionmodules.discretizer.EDMDiscretization",
156
+ "params": {
157
+ "sigma_min": 0.002,
158
+ "sigma_max": 700.0,
159
+ "rho": 7.0
160
+ }
161
+ }
162
+ else:
163
+ raise NotImplementedError
164
+ return discretization_config
165
+
166
+
167
+ def get_guider(guider="LinearPredictionGuider", cfg_scale=2.5, num_frames=25):
168
+ if guider == "IdentityGuider":
169
+ guider_config = {
170
+ "target": "vista.vwm.modules.diffusionmodules.guiders.IdentityGuider"
171
+ }
172
+ elif guider == "VanillaCFG":
173
+ scale = cfg_scale
174
+
175
+ guider_config = {
176
+ "target": "vista.vwm.modules.diffusionmodules.guiders.VanillaCFG",
177
+ "params": {
178
+ "scale": scale
179
+ }
180
+ }
181
+ elif guider == "LinearPredictionGuider":
182
+ max_scale = cfg_scale
183
+ min_scale = 1.0
184
+
185
+ guider_config = {
186
+ "target": "vista.vwm.modules.diffusionmodules.guiders.LinearPredictionGuider",
187
+ "params": {
188
+ "max_scale": max_scale,
189
+ "min_scale": min_scale,
190
+ "num_frames": num_frames
191
+ }
192
+ }
193
+ elif guider == "TrianglePredictionGuider":
194
+ max_scale = cfg_scale
195
+ min_scale = 1.0
196
+
197
+ guider_config = {
198
+ "target": "vista.vwm.modules.diffusionmodules.guiders.TrianglePredictionGuider",
199
+ "params": {
200
+ "max_scale": max_scale,
201
+ "min_scale": min_scale,
202
+ "num_frames": num_frames
203
+ }
204
+ }
205
+ else:
206
+ raise NotImplementedError
207
+ return guider_config
208
+
209
+
210
+ def get_sampler(sampler, steps, discretization_config, guider_config):
211
+ if sampler == "EulerEDMSampler":
212
+ s_churn = 0.0
213
+ s_tmin = 0.0
214
+ s_tmax = 999.0
215
+ s_noise = 1.0
216
+
217
+ sampler = EulerEDMSampler(
218
+ num_steps=steps,
219
+ discretization_config=discretization_config,
220
+ guider_config=guider_config,
221
+ s_churn=s_churn,
222
+ s_tmin=s_tmin,
223
+ s_tmax=s_tmax,
224
+ s_noise=s_noise,
225
+ verbose=False
226
+ )
227
+ else:
228
+ raise ValueError(f"Unknown sampler {sampler}")
229
+ return sampler
230
+
231
+
232
+ def get_batch(keys, value_dict, N: Union[list, ListConfig], device="cuda"):
233
+ # hardcoded demo setups, might undergo some changes in the future
234
+ batch = dict()
235
+ batch_uc = dict()
236
+
237
+ for key in keys:
238
+ if key in value_dict:
239
+ if key in ["fps", "fps_id", "motion_bucket_id", "cond_aug"]:
240
+ batch[key] = repeat(torch.tensor([value_dict[key]]).to(device), "1 -> b", b=math.prod(N))
241
+ elif key in ["command", "trajectory", "speed", "angle", "goal"]:
242
+ batch[key] = repeat(value_dict[key][None].to(device), "1 ... -> b ...", b=N[0])
243
+ elif key in ["cond_frames", "cond_frames_without_noise"]:
244
+ batch[key] = repeat(value_dict[key], "1 ... -> b ...", b=N[0])
245
+ else:
246
+ # batch[key] = value_dict[key]
247
+ raise NotImplementedError
248
+
249
+ for key in batch.keys():
250
+ if key not in batch_uc and isinstance(batch[key], torch.Tensor):
251
+ batch_uc[key] = torch.clone(batch[key])
252
+ return batch, batch_uc
253
+
254
+
255
+ def get_condition(model, value_dict, num_samples, force_uc_zero_embeddings, device):
256
+ load_model(model.conditioner)
257
+ batch, batch_uc = get_batch(
258
+ list(set([x.input_key for x in model.conditioner.embedders])),
259
+ value_dict,
260
+ [num_samples]
261
+ )
262
+ c, uc = model.conditioner.get_unconditional_conditioning(
263
+ batch,
264
+ batch_uc=batch_uc,
265
+ force_uc_zero_embeddings=force_uc_zero_embeddings
266
+ )
267
+ unload_model(model.conditioner)
268
+
269
+ for k in c:
270
+ if isinstance(c[k], torch.Tensor):
271
+ c[k], uc[k] = map(lambda y: y[k][:num_samples].to(device), (c, uc))
272
+ if c[k].shape[0] < num_samples:
273
+ c[k] = c[k][[0]]
274
+ if uc[k].shape[0] < num_samples:
275
+ uc[k] = uc[k][[0]]
276
+ return c, uc
277
+
278
+
279
+ def fill_latent(cond, length, cond_indices, device):
280
+ latent = torch.zeros(length, *cond.shape[1:]).to(device)
281
+ latent[cond_indices] = cond
282
+ return latent
283
+
284
+
285
+ @torch.no_grad()
286
+ def do_sample(
287
+ images,
288
+ model,
289
+ sampler,
290
+ value_dict,
291
+ num_frames,
292
+ ensemble_size: int = 5,
293
+ force_uc_zero_embeddings: Optional[list] = None,
294
+ initial_cond_indices: Optional[list] = None,
295
+ device="cuda"
296
+ ):
297
+ if initial_cond_indices is None:
298
+ initial_cond_indices = [0]
299
+
300
+ force_uc_zero_embeddings = default(force_uc_zero_embeddings, list())
301
+ precision_scope = autocast
302
+
303
+ with torch.no_grad(), precision_scope(device), model.ema_scope("Sampling"):
304
+ load_model(model.first_stage_model)
305
+ z = model.encode_first_stage(images)
306
+ unload_model(model.first_stage_model)
307
+
308
+ def denoiser(x, sigma, cond, cond_mask):
309
+ return model.denoiser(model.model, x, sigma, cond, cond_mask)
310
+
311
+ load_model(model.denoiser)
312
+ load_model(model.model)
313
+
314
+ initial_cond_mask = torch.zeros(num_frames).to(device)
315
+ initial_cond_mask[initial_cond_indices] = 1
316
+
317
+ c, uc = get_condition(model, value_dict, num_frames, force_uc_zero_embeddings, device)
318
+
319
+ sample_ensemble = list()
320
+ for _ in range(ensemble_size):
321
+ noise = torch.randn_like(z)
322
+ sample = sampler(
323
+ denoiser,
324
+ noise,
325
+ cond=c,
326
+ uc=uc,
327
+ cond_frame=z, # cond_frame will be rescaled when calling the sampler
328
+ cond_mask=initial_cond_mask
329
+ )
330
+ sample[0] = z[0]
331
+ sample_ensemble.append(sample)
332
+
333
+ u = torch.mean(torch.stack(sample_ensemble), 0)
334
+ diff = torch.zeros_like(sample)
335
+ for each_sample in sample_ensemble:
336
+ diff.add_((each_sample - u) ** 2)
337
+ variance = diff / (ensemble_size - 1)
338
+ reward = torch.exp(-variance.mean()).cpu()
339
+
340
+ unload_model(model.model)
341
+ unload_model(model.denoiser)
342
+ return images, reward
vista/sample.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import argparse
4
+ import json
5
+ import os
6
+ import random
7
+
8
+ import PIL
9
+ import torch
10
+ from pytorch_lightning import seed_everything
11
+ from torchvision import transforms
12
+
13
+ from . import sample_utils
14
+
15
+ VERSION2SPECS = {
16
+ "vwm": {"config": "configs/inference/vista.yaml", "ckpt": "ckpts/vista.safetensors"}
17
+ }
18
+
19
+ DATASET2SOURCES = {
20
+ "NUSCENES": {"data_root": "data/nuscenes", "anno_file": "annos/nuScenes_val.json"},
21
+ "IMG": {"data_root": "image_folder"},
22
+ }
23
+
24
+
25
+ def parse_args(**parser_kwargs):
26
+ parser = argparse.ArgumentParser(**parser_kwargs)
27
+ parser.add_argument("--version", type=str, default="vwm", help="model version")
28
+ parser.add_argument("--dataset", type=str, default="NUSCENES", help="dataset name")
29
+ parser.add_argument(
30
+ "--save", type=str, default="outputs", help="directory to save samples"
31
+ )
32
+ parser.add_argument(
33
+ "--action",
34
+ type=str,
35
+ default="free",
36
+ help="action mode for control, such as traj, cmd, steer, goal",
37
+ )
38
+ parser.add_argument(
39
+ "--n_rounds", type=int, default=1, help="number of sampling rounds"
40
+ )
41
+ parser.add_argument(
42
+ "--n_frames", type=int, default=25, help="number of frames for each round"
43
+ )
44
+ parser.add_argument(
45
+ "--n_conds",
46
+ type=int,
47
+ default=1,
48
+ help="number of initial condition frames for the first round",
49
+ )
50
+ parser.add_argument(
51
+ "--seed", type=int, default=23, help="random seed for seed_everything"
52
+ )
53
+ parser.add_argument(
54
+ "--height", type=int, default=576, help="target height of the generated video"
55
+ )
56
+ parser.add_argument(
57
+ "--width", type=int, default=1024, help="target width of the generated video"
58
+ )
59
+ parser.add_argument(
60
+ "--cfg_scale",
61
+ type=float,
62
+ default=2.5,
63
+ help="scale of the classifier-free guidance",
64
+ )
65
+ parser.add_argument(
66
+ "--cond_aug", type=float, default=0.0, help="strength of the noise augmentation"
67
+ )
68
+ parser.add_argument(
69
+ "--n_steps", type=int, default=50, help="number of sampling steps"
70
+ )
71
+ parser.add_argument(
72
+ "--rand_gen",
73
+ action="store_false",
74
+ help="whether to generate samples randomly or sequentially",
75
+ )
76
+ parser.add_argument(
77
+ "--low_vram", action="store_true", help="whether to save memory or not"
78
+ )
79
+ return parser
80
+
81
+
82
+ def get_sample(
83
+ selected_index=0, dataset_name="NUSCENES", num_frames=25, action_mode="free"
84
+ ):
85
+ dataset_dict = DATASET2SOURCES[dataset_name]
86
+ action_dict = None
87
+ if dataset_name == "IMG":
88
+ image_list = os.listdir(dataset_dict["data_root"])
89
+ total_length = len(image_list)
90
+ while selected_index >= total_length:
91
+ selected_index -= total_length
92
+ image_file = image_list[selected_index]
93
+
94
+ path_list = [os.path.join(dataset_dict["data_root"], image_file)] * num_frames
95
+ else:
96
+ with open(dataset_dict["anno_file"]) as anno_json:
97
+ all_samples = json.load(anno_json)
98
+ total_length = len(all_samples)
99
+ while selected_index >= total_length:
100
+ selected_index -= total_length
101
+ sample_dict = all_samples[selected_index]
102
+
103
+ path_list = list()
104
+ if dataset_name == "NUSCENES":
105
+ for index in range(num_frames):
106
+ image_path = os.path.join(
107
+ dataset_dict["data_root"], sample_dict["frames"][index]
108
+ )
109
+ assert os.path.exists(image_path), image_path
110
+ path_list.append(image_path)
111
+ if action_mode != "free":
112
+ action_dict = dict()
113
+ if action_mode == "traj" or action_mode == "trajectory":
114
+ action_dict["trajectory"] = torch.tensor(sample_dict["traj"][2:])
115
+ elif action_mode == "cmd" or action_mode == "command":
116
+ action_dict["command"] = torch.tensor(sample_dict["cmd"])
117
+ elif action_mode == "steer":
118
+ # scene might be empty
119
+ if sample_dict["speed"]:
120
+ action_dict["speed"] = torch.tensor(sample_dict["speed"][1:])
121
+ # scene might be empty
122
+ if sample_dict["angle"]:
123
+ action_dict["angle"] = (
124
+ torch.tensor(sample_dict["angle"][1:]) / 780
125
+ )
126
+ elif action_mode == "goal":
127
+ # point might be invalid
128
+ if (
129
+ sample_dict["z"] > 0
130
+ and 0 < sample_dict["goal"][0] < 1600
131
+ and 0 < sample_dict["goal"][1] < 900
132
+ ):
133
+ action_dict["goal"] = torch.tensor(
134
+ [
135
+ sample_dict["goal"][0] / 1600,
136
+ sample_dict["goal"][1] / 900,
137
+ ]
138
+ )
139
+ else:
140
+ raise ValueError(f"Unsupported action mode {action_mode}")
141
+ else:
142
+ raise ValueError(f"Invalid dataset {dataset_name}")
143
+ return path_list, selected_index, total_length, action_dict
144
+
145
+
146
+ def load_img(file_name, target_height=320, target_width=576, device="cuda"):
147
+ if file_name is not None:
148
+ image = PIL.Image.open(file_name)
149
+ if not image.mode == "RGB":
150
+ image = image.convert("RGB")
151
+ else:
152
+ raise ValueError(f"Invalid image file {file_name}")
153
+ ori_w, ori_h = image.size
154
+ # print(f"Loaded input image of size ({ori_w}, {ori_h})")
155
+
156
+ if ori_w / ori_h > target_width / target_height:
157
+ tmp_w = int(target_width / target_height * ori_h)
158
+ left = (ori_w - tmp_w) // 2
159
+ right = (ori_w + tmp_w) // 2
160
+ image = image.crop((left, 0, right, ori_h))
161
+ elif ori_w / ori_h < target_width / target_height:
162
+ tmp_h = int(target_height / target_width * ori_w)
163
+ top = (ori_h - tmp_h) // 2
164
+ bottom = (ori_h + tmp_h) // 2
165
+ image = image.crop((0, top, ori_w, bottom))
166
+ image = image.resize((target_width, target_height), resample=PIL.Image.LANCZOS)
167
+ if not image.mode == "RGB":
168
+ image = image.convert("RGB")
169
+ image = transforms.Compose(
170
+ [transforms.ToTensor(), transforms.Lambda(lambda x: x * 2.0 - 1.0)]
171
+ )(image)
172
+ return image.to(device)
173
+
174
+
175
+ if __name__ == "__main__":
176
+ parser = parse_args()
177
+ opt, unknown = parser.parse_known_args()
178
+
179
+ sample_utils.set_lowvram_mode(opt.low_vram)
180
+ version_dict = VERSION2SPECS[opt.version]
181
+ model = sample_utils.init_model(version_dict)
182
+ unique_keys = set([x.input_key for x in model.conditioner.embedders])
183
+
184
+ sample_index = 0
185
+ while sample_index >= 0:
186
+ seed_everything(opt.seed)
187
+
188
+ frame_list, sample_index, dataset_length, action_dict = get_sample(
189
+ sample_index, opt.dataset, opt.n_frames, opt.action
190
+ )
191
+
192
+ img_seq = list()
193
+ for each_path in frame_list:
194
+ img = load_img(each_path, opt.height, opt.width)
195
+ img_seq.append(img)
196
+ images = torch.stack(img_seq)
197
+
198
+ value_dict = sample_utils.init_embedder_options(unique_keys)
199
+ cond_img = img_seq[0][None]
200
+ value_dict["cond_frames_without_noise"] = cond_img
201
+ value_dict["cond_aug"] = opt.cond_aug
202
+ value_dict["cond_frames"] = cond_img + opt.cond_aug * torch.randn_like(cond_img)
203
+ if action_dict is not None:
204
+ for key, value in action_dict.items():
205
+ value_dict[key] = value
206
+
207
+ if opt.n_rounds > 1:
208
+ guider = "TrianglePredictionGuider"
209
+ else:
210
+ guider = "VanillaCFG"
211
+ sampler = sample_utils.init_sampling(
212
+ guider=guider,
213
+ steps=opt.n_steps,
214
+ cfg_scale=opt.cfg_scale,
215
+ num_frames=opt.n_frames,
216
+ )
217
+
218
+ uc_keys = [
219
+ "cond_frames",
220
+ "cond_frames_without_noise",
221
+ "command",
222
+ "trajectory",
223
+ "speed",
224
+ "angle",
225
+ "goal",
226
+ ]
227
+
228
+ out = sample_utils.do_sample(
229
+ images,
230
+ model,
231
+ sampler,
232
+ value_dict,
233
+ num_rounds=opt.n_rounds,
234
+ num_frames=opt.n_frames,
235
+ force_uc_zero_embeddings=uc_keys,
236
+ initial_cond_indices=[index for index in range(opt.n_conds)],
237
+ )
238
+
239
+ if isinstance(out, (tuple, list)):
240
+ samples, samples_z, inputs = out
241
+ virtual_path = os.path.join(opt.save, "virtual")
242
+ real_path = os.path.join(opt.save, "real")
243
+ sample_utils.perform_save_locally(
244
+ virtual_path, samples, "videos", opt.dataset, sample_index
245
+ )
246
+ sample_utils.perform_save_locally(
247
+ virtual_path, samples, "grids", opt.dataset, sample_index
248
+ )
249
+ sample_utils.perform_save_locally(
250
+ virtual_path, samples, "images", opt.dataset, sample_index
251
+ )
252
+ sample_utils.perform_save_locally(
253
+ real_path, inputs, "videos", opt.dataset, sample_index
254
+ )
255
+ sample_utils.perform_save_locally(
256
+ real_path, inputs, "grids", opt.dataset, sample_index
257
+ )
258
+ sample_utils.perform_save_locally(
259
+ real_path, inputs, "images", opt.dataset, sample_index
260
+ )
261
+ else:
262
+ raise TypeError
263
+
264
+ if opt.rand_gen:
265
+ sample_index += random.randint(1, dataset_length - 1)
266
+ else:
267
+ sample_index += 1
268
+ if dataset_length <= sample_index:
269
+ sample_index = -1
vista/sample_utils.py ADDED
@@ -0,0 +1,442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import math
4
+ import os
5
+ import queue
6
+ from typing import Optional, Union
7
+
8
+ import numpy as np
9
+ import rerun as rr
10
+ import torch
11
+ import torchvision
12
+ from einops import rearrange, repeat
13
+ from omegaconf import ListConfig, OmegaConf
14
+ from PIL import Image
15
+ from safetensors.torch import load_file as load_safetensors
16
+ from torch import autocast
17
+ from tqdm import tqdm
18
+
19
+ from .vwm.modules.diffusionmodules.sampling import EulerEDMSampler
20
+ from .vwm.util import default, instantiate_from_config
21
+
22
+
23
+ def init_model(version_dict, load_ckpt=True):
24
+ config = OmegaConf.load(version_dict["config"])
25
+ model = load_model_from_config(config, version_dict["ckpt"] if load_ckpt else None)
26
+ return model
27
+
28
+
29
+ lowvram_mode = True
30
+
31
+
32
+ def set_lowvram_mode(mode):
33
+ global lowvram_mode
34
+ lowvram_mode = mode
35
+
36
+
37
+ def initial_model_load(model):
38
+ global lowvram_mode
39
+ if lowvram_mode:
40
+ model.model.half()
41
+ else:
42
+ model.cuda()
43
+ return model
44
+
45
+
46
+ def load_model(model):
47
+ model.cuda()
48
+
49
+
50
+ def unload_model(model):
51
+ global lowvram_mode
52
+ print(lowvram_mode)
53
+ if lowvram_mode:
54
+ model.cpu()
55
+ torch.cuda.empty_cache()
56
+ torch.cuda.synchronize()
57
+
58
+
59
+ def load_model_from_config(config, ckpt=None):
60
+ model = instantiate_from_config(config.model)
61
+ print(ckpt)
62
+
63
+ if ckpt is not None:
64
+ print(f"Loading model from {ckpt}")
65
+ if ckpt.endswith("ckpt"):
66
+ pl_svd = torch.load(ckpt, map_location="cpu")
67
+ # dict contains:
68
+ # "epoch", "global_step", "pytorch-lightning_version",
69
+ # "state_dict", "loops", "callbacks", "optimizer_states", "lr_schedulers"
70
+ if "global_step" in pl_svd:
71
+ print(f"Global step: {pl_svd['global_step']}")
72
+ svd = pl_svd["state_dict"]
73
+ else:
74
+ svd = load_safetensors(ckpt)
75
+
76
+ missing, unexpected = model.load_state_dict(svd, strict=False)
77
+ if len(missing) > 0:
78
+ print(f"Missing keys: {missing}")
79
+ if len(unexpected) > 0:
80
+ print(f"Unexpected keys: {unexpected}")
81
+
82
+ model = initial_model_load(model)
83
+ model.eval()
84
+ return model
85
+
86
+
87
+ def init_embedder_options(keys):
88
+ # hardcoded demo settings, might undergo some changes in the future
89
+ value_dict = dict()
90
+ for key in keys:
91
+ if key in ["fps_id", "fps"]:
92
+ fps = 10
93
+ value_dict["fps"] = fps
94
+ value_dict["fps_id"] = fps - 1
95
+ elif key == "motion_bucket_id":
96
+ value_dict["motion_bucket_id"] = 127 # [0, 511]
97
+ return value_dict
98
+
99
+
100
+ def perform_save_locally(save_path, samples, mode, dataset_name, sample_index):
101
+ assert mode in ["images", "grids", "videos"]
102
+ merged_path = os.path.join(save_path, mode)
103
+ os.makedirs(merged_path, exist_ok=True)
104
+ samples = samples.cpu()
105
+
106
+ if mode == "images":
107
+ frame_count = 0
108
+ for sample in samples:
109
+ sample = rearrange(sample.numpy(), "c h w -> h w c")
110
+ if "real" in save_path:
111
+ sample = 255.0 * (sample + 1.0) / 2.0
112
+ else:
113
+ sample = 255.0 * sample
114
+ image_save_path = os.path.join(
115
+ merged_path, f"{dataset_name}_{sample_index:06}_{frame_count:04}.png"
116
+ )
117
+ # if os.path.exists(image_save_path):
118
+ # return
119
+ Image.fromarray(sample.astype(np.uint8)).save(image_save_path)
120
+ frame_count += 1
121
+ elif mode == "grids":
122
+ grid = torchvision.utils.make_grid(samples, nrow=int(samples.shape[0] ** 0.5))
123
+ grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1).numpy()
124
+ if "real" in save_path:
125
+ grid = 255.0 * (grid + 1.0) / 2.0
126
+ else:
127
+ grid = 255.0 * grid
128
+ grid_save_path = os.path.join(
129
+ merged_path, f"{dataset_name}_{sample_index:06}.png"
130
+ )
131
+ # if os.path.exists(grid_save_path):
132
+ # return
133
+ Image.fromarray(grid.astype(np.uint8)).save(grid_save_path)
134
+ elif mode == "videos":
135
+ img_seq = rearrange(samples.numpy(), "t c h w -> t h w c")
136
+ if "real" in save_path:
137
+ img_seq = 255.0 * (img_seq + 1.0) / 2.0
138
+ else:
139
+ img_seq = 255.0 * img_seq
140
+ video_save_path = os.path.join(
141
+ merged_path, f"{dataset_name}_{sample_index:06}.mp4"
142
+ )
143
+ # if os.path.exists(video_save_path):
144
+ # return
145
+ save_img_seq_to_video(video_save_path, img_seq.astype(np.uint8), 10)
146
+ else:
147
+ raise NotImplementedError
148
+
149
+
150
+ def init_sampling(
151
+ sampler="EulerEDMSampler",
152
+ guider="VanillaCFG",
153
+ discretization="EDMDiscretization",
154
+ steps=50,
155
+ cfg_scale=2.5,
156
+ num_frames=25,
157
+ ):
158
+ discretization_config = get_discretization(discretization)
159
+ guider_config = get_guider(guider, cfg_scale, num_frames)
160
+ sampler = get_sampler(sampler, steps, discretization_config, guider_config)
161
+ return sampler
162
+
163
+
164
+ def get_discretization(discretization):
165
+ if discretization == "LegacyDDPMDiscretization":
166
+ discretization_config = {
167
+ "target": "vista.vwm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization"
168
+ }
169
+ elif discretization == "EDMDiscretization":
170
+ discretization_config = {
171
+ "target": "vista.vwm.modules.diffusionmodules.discretizer.EDMDiscretization",
172
+ "params": {"sigma_min": 0.002, "sigma_max": 700.0, "rho": 7.0},
173
+ }
174
+ else:
175
+ raise NotImplementedError
176
+ return discretization_config
177
+
178
+
179
+ def get_guider(guider="LinearPredictionGuider", cfg_scale=2.5, num_frames=25):
180
+ if guider == "IdentityGuider":
181
+ guider_config = {
182
+ "target": "vista.vwm.modules.diffusionmodules.guiders.IdentityGuider"
183
+ }
184
+ elif guider == "VanillaCFG":
185
+ scale = cfg_scale
186
+
187
+ guider_config = {
188
+ "target": "vista.vwm.modules.diffusionmodules.guiders.VanillaCFG",
189
+ "params": {"scale": scale},
190
+ }
191
+ elif guider == "LinearPredictionGuider":
192
+ max_scale = cfg_scale
193
+ min_scale = 1.0
194
+
195
+ guider_config = {
196
+ "target": "vista.vwm.modules.diffusionmodules.guiders.LinearPredictionGuider",
197
+ "params": {
198
+ "max_scale": max_scale,
199
+ "min_scale": min_scale,
200
+ "num_frames": num_frames,
201
+ },
202
+ }
203
+ elif guider == "TrianglePredictionGuider":
204
+ max_scale = cfg_scale
205
+ min_scale = 1.0
206
+
207
+ guider_config = {
208
+ "target": "vista.vwm.modules.diffusionmodules.guiders.TrianglePredictionGuider",
209
+ "params": {
210
+ "max_scale": max_scale,
211
+ "min_scale": min_scale,
212
+ "num_frames": num_frames,
213
+ },
214
+ }
215
+ else:
216
+ raise NotImplementedError
217
+ return guider_config
218
+
219
+
220
+ def get_sampler(sampler, steps, discretization_config, guider_config):
221
+ if sampler == "EulerEDMSampler":
222
+ s_churn = 0.0
223
+ s_tmin = 0.0
224
+ s_tmax = 999.0
225
+ s_noise = 1.0
226
+
227
+ sampler = EulerEDMSampler(
228
+ num_steps=steps,
229
+ discretization_config=discretization_config,
230
+ guider_config=guider_config,
231
+ s_churn=s_churn,
232
+ s_tmin=s_tmin,
233
+ s_tmax=s_tmax,
234
+ s_noise=s_noise,
235
+ verbose=False,
236
+ )
237
+ else:
238
+ raise ValueError(f"Unknown sampler {sampler}")
239
+ return sampler
240
+
241
+
242
+ def get_batch(keys, value_dict, N: Union[list, ListConfig], device="cuda"):
243
+ # hardcoded demo setups, might undergo some changes in the future
244
+ batch = dict()
245
+ batch_uc = dict()
246
+
247
+ for key in keys:
248
+ if key in value_dict:
249
+ if key in ["fps", "fps_id", "motion_bucket_id", "cond_aug"]:
250
+ batch[key] = repeat(
251
+ torch.tensor([value_dict[key]]).to(device), "1 -> b", b=math.prod(N)
252
+ )
253
+ elif key in ["command", "trajectory", "speed", "angle", "goal"]:
254
+ batch[key] = repeat(
255
+ value_dict[key][None].to(device), "1 ... -> b ...", b=N[0]
256
+ )
257
+ elif key in ["cond_frames", "cond_frames_without_noise"]:
258
+ batch[key] = repeat(value_dict[key], "1 ... -> b ...", b=N[0])
259
+ else:
260
+ # batch[key] = value_dict[key]
261
+ raise NotImplementedError
262
+
263
+ for key in batch.keys():
264
+ if key not in batch_uc and isinstance(batch[key], torch.Tensor):
265
+ batch_uc[key] = torch.clone(batch[key])
266
+ return batch, batch_uc
267
+
268
+
269
+ def get_condition(model, value_dict, num_samples, force_uc_zero_embeddings, device):
270
+ load_model(model.conditioner)
271
+ batch, batch_uc = get_batch(
272
+ list(set([x.input_key for x in model.conditioner.embedders])),
273
+ value_dict,
274
+ [num_samples],
275
+ )
276
+ c, uc = model.conditioner.get_unconditional_conditioning(
277
+ batch, batch_uc=batch_uc, force_uc_zero_embeddings=force_uc_zero_embeddings
278
+ )
279
+ unload_model(model.conditioner)
280
+
281
+ for k in c:
282
+ if isinstance(c[k], torch.Tensor):
283
+ c[k], uc[k] = map(lambda y: y[k][:num_samples].to(device), (c, uc))
284
+ if c[k].shape[0] < num_samples:
285
+ c[k] = c[k][[0]]
286
+ if uc[k].shape[0] < num_samples:
287
+ uc[k] = uc[k][[0]]
288
+ return c, uc
289
+
290
+
291
+ def fill_latent(cond, length, cond_indices, device):
292
+ latent = torch.zeros(length, *cond.shape[1:]).to(device)
293
+ latent[cond_indices] = cond
294
+ return latent
295
+
296
+
297
+ @torch.no_grad()
298
+ def do_sample(
299
+ images,
300
+ model,
301
+ sampler,
302
+ value_dict,
303
+ num_rounds,
304
+ num_frames,
305
+ force_uc_zero_embeddings: Optional[list] = None,
306
+ initial_cond_indices: Optional[list] = None,
307
+ device="cuda",
308
+ log_queue: queue.SimpleQueue = None,
309
+ ):
310
+ if initial_cond_indices is None:
311
+ initial_cond_indices = [0]
312
+
313
+ force_uc_zero_embeddings = default(force_uc_zero_embeddings, list())
314
+ precision_scope = autocast
315
+
316
+ with torch.no_grad(), precision_scope(device), model.ema_scope("Sampling"):
317
+ c, uc = get_condition(
318
+ model, value_dict, num_frames, force_uc_zero_embeddings, device
319
+ )
320
+
321
+ load_model(model.first_stage_model)
322
+ z = model.encode_first_stage(images)
323
+ unload_model(model.first_stage_model)
324
+
325
+ samples_z = torch.zeros((num_rounds * (num_frames - 3) + 3, *z.shape[1:])).to(
326
+ device
327
+ )
328
+
329
+ sampling_progress = tqdm(total=num_rounds, desc="Compute sequences")
330
+
331
+ def denoiser(x, sigma, cond, cond_mask):
332
+ return model.denoiser(model.model, x, sigma, cond, cond_mask)
333
+
334
+ load_model(model.denoiser)
335
+ load_model(model.model)
336
+
337
+ initial_cond_mask = torch.zeros(num_frames).to(device)
338
+ prediction_cond_mask = torch.zeros(num_frames).to(device)
339
+ initial_cond_mask[initial_cond_indices] = 1
340
+ prediction_cond_mask[[0, 1, 2]] = 1
341
+
342
+ generated_images = []
343
+
344
+ noise = torch.randn_like(z)
345
+ sample = sampler(
346
+ denoiser,
347
+ noise,
348
+ cond=c,
349
+ uc=uc,
350
+ cond_frame=z, # cond_frame will be rescaled when calling the sampler
351
+ cond_mask=initial_cond_mask,
352
+ num_sequence=0,
353
+ log_queue=log_queue,
354
+ )
355
+ sampling_progress.update(1)
356
+ sample[0] = z[0]
357
+ samples_z[:num_frames] = sample
358
+
359
+ generated_images.append(decode_samples(sample[:num_frames], model))
360
+
361
+ for i, generated_image in enumerate(generated_images[-1]):
362
+ log_queue.put(
363
+ (
364
+ "generated_image",
365
+ rr.Image(generated_image.cpu().permute(1, 2, 0)),
366
+ [
367
+ ("frame_id", i),
368
+ ("diffusion", 0),
369
+ (
370
+ "combined",
371
+ 1 + 2 * 0 + (i * 1.0 / len(generated_images[-1])),
372
+ ),
373
+ ],
374
+ )
375
+ )
376
+
377
+ for n in range(num_rounds - 1):
378
+ load_model(model.first_stage_model)
379
+ samples_x_for_guidance = model.decode_first_stage(sample[-14:])
380
+ unload_model(model.first_stage_model)
381
+ value_dict["cond_frames_without_noise"] = samples_x_for_guidance[[-3]]
382
+ value_dict["cond_frames"] = sample[[-3]] / model.scale_factor
383
+
384
+ for embedder in model.conditioner.embedders:
385
+ if hasattr(embedder, "skip_encode"):
386
+ embedder.skip_encode = True
387
+ c, uc = get_condition(
388
+ model, value_dict, num_frames, force_uc_zero_embeddings, device
389
+ )
390
+ for embedder in model.conditioner.embedders:
391
+ if hasattr(embedder, "skip_encode"):
392
+ embedder.skip_encode = False
393
+
394
+ filled_latent = fill_latent(sample[-3:], num_frames, [0, 1, 2], device)
395
+
396
+ noise = torch.randn_like(filled_latent)
397
+ sample = sampler(
398
+ denoiser,
399
+ noise,
400
+ cond=c,
401
+ uc=uc,
402
+ cond_frame=filled_latent, # cond_frame will be rescaled when calling the sampler
403
+ cond_mask=prediction_cond_mask,
404
+ num_sequence=n + 1,
405
+ log_queue=log_queue,
406
+ )
407
+ sampling_progress.update(1)
408
+ first_frame_id = (n + 1) * (num_frames - 3) + 3
409
+ last_frame_id = (n + 1) * (num_frames - 3) + num_frames
410
+ samples_z[first_frame_id:last_frame_id] = sample[3:]
411
+
412
+ generated_images.append(decode_samples(sample[3:], model))
413
+
414
+ for i, generated_image in enumerate(generated_images[-1]):
415
+ log_queue.put(
416
+ (
417
+ "generated_image",
418
+ rr.Image(generated_image.cpu().permute(1, 2, 0)),
419
+ [
420
+ ("frame_id", first_frame_id + i),
421
+ ("diffusion", 0),
422
+ (
423
+ "combined",
424
+ 1 + 2 * (n + 1) + (i * 1.0 / len(generated_images[-1])),
425
+ ),
426
+ ],
427
+ )
428
+ )
429
+
430
+ unload_model(model.model)
431
+ unload_model(model.denoiser)
432
+
433
+ generated_images = torch.concat(generated_images, dim=0)
434
+ return generated_images, samples_z, images
435
+
436
+
437
+ def decode_samples(samples, model):
438
+ load_model(model.first_stage_model)
439
+ samples_x = model.decode_first_stage(samples)
440
+ unload_model(model.first_stage_model)
441
+ samples = torch.clamp((samples_x + 1.0) / 2.0, min=0.0, max=1.0)
442
+ return samples
vista/train.py ADDED
@@ -0,0 +1,924 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import argparse
4
+ import datetime
5
+ import glob
6
+ import inspect
7
+ import os
8
+ import sys
9
+ from inspect import Parameter
10
+
11
+ import imageio
12
+ import numpy as np
13
+ import pytorch_lightning as pl
14
+ import torch
15
+ import torchvision
16
+ from einops import rearrange
17
+ from matplotlib import pyplot as plt
18
+ from natsort import natsorted
19
+ from omegaconf import OmegaConf
20
+ from packaging import version
21
+ from PIL import Image
22
+ from pytorch_lightning import seed_everything
23
+ from pytorch_lightning.callbacks import Callback
24
+ from pytorch_lightning.trainer import Trainer
25
+ from pytorch_lightning.utilities import rank_zero_only
26
+ from safetensors.torch import load_file as load_safetensors
27
+
28
+ from .vwm.util import instantiate_from_config, isheatmap
29
+
30
+ MULTINODE_HACKS = True
31
+
32
+
33
+ def default_trainer_args():
34
+ argspec = dict(inspect.signature(Trainer.__init__).parameters)
35
+ argspec.pop("self")
36
+ default_args = {
37
+ param: argspec[param].default
38
+ for param in argspec
39
+ if argspec[param] != Parameter.empty
40
+ }
41
+ return default_args
42
+
43
+
44
+ def get_parser(**parser_kwargs):
45
+ def str2bool(v):
46
+ if isinstance(v, bool):
47
+ return v
48
+ if v.lower() in ("yes", "true", "t", "y", "1"):
49
+ return True
50
+ elif v.lower() in ("no", "false", "f", "n", "0"):
51
+ return False
52
+ else:
53
+ raise argparse.ArgumentTypeError("Boolean value expected")
54
+
55
+ parser = argparse.ArgumentParser(**parser_kwargs)
56
+ parser.add_argument(
57
+ "-n",
58
+ "--name",
59
+ type=str,
60
+ const=True,
61
+ default="",
62
+ nargs="?",
63
+ help="postfix for logdir"
64
+ )
65
+ parser.add_argument(
66
+ "--no_date",
67
+ type=str2bool,
68
+ nargs="?",
69
+ const=True,
70
+ default=False,
71
+ help="if True, skip date generation for logdir and only use naming via opt.base or opt.name (+ opt.postfix, optionally)"
72
+ )
73
+ parser.add_argument(
74
+ "-r",
75
+ "--resume",
76
+ type=str,
77
+ const=True,
78
+ default="",
79
+ nargs="?",
80
+ help="resume from logdir or checkpoint in logdir"
81
+ )
82
+ parser.add_argument(
83
+ "-b",
84
+ "--base",
85
+ nargs="*",
86
+ metavar="base_config.yaml",
87
+ help="paths to base configs. "
88
+ "Loaded from left-to-right. "
89
+ "Parameters can be overwritten or added with command-line options of the form `--key value`",
90
+ default=list()
91
+ )
92
+ parser.add_argument(
93
+ "-t",
94
+ "--train",
95
+ type=str2bool,
96
+ const=True,
97
+ default=True,
98
+ nargs="?",
99
+ help="train"
100
+ )
101
+ parser.add_argument(
102
+ "--no_test",
103
+ type=str2bool,
104
+ const=True,
105
+ default=True,
106
+ nargs="?",
107
+ help="disable test"
108
+ )
109
+ parser.add_argument(
110
+ "-p",
111
+ "--project",
112
+ help="name of new or path to existing project"
113
+ )
114
+ parser.add_argument(
115
+ "-d",
116
+ "--debug",
117
+ type=str2bool,
118
+ nargs="?",
119
+ const=True,
120
+ default=False,
121
+ help="enable post-mortem debugging"
122
+ )
123
+ parser.add_argument(
124
+ "-s",
125
+ "--seed",
126
+ type=int,
127
+ default=23,
128
+ help="seed for seed_everything"
129
+ )
130
+ parser.add_argument(
131
+ "-f",
132
+ "--postfix",
133
+ type=str,
134
+ default="",
135
+ help="post-postfix for default name"
136
+ )
137
+ parser.add_argument(
138
+ "-l",
139
+ "--logdir",
140
+ type=str,
141
+ default="logs",
142
+ help="directory for logging data"
143
+ )
144
+ parser.add_argument(
145
+ "--scale_lr",
146
+ type=str2bool,
147
+ nargs="?",
148
+ const=True,
149
+ default=False,
150
+ help="scale base-lr by ngpu * batch_size * n_accumulate"
151
+ )
152
+ parser.add_argument(
153
+ "--legacy_naming",
154
+ type=str2bool,
155
+ nargs="?",
156
+ const=True,
157
+ default=False,
158
+ help="name run based on config file name if true, else by whole path"
159
+ )
160
+ parser.add_argument(
161
+ "--enable_tf32",
162
+ type=str2bool,
163
+ nargs="?",
164
+ const=True,
165
+ default=False,
166
+ help="enables the TensorFloat32 format both for matmuls and cuDNN for pytorch 1.12"
167
+ )
168
+ parser.add_argument(
169
+ "--no_base_name",
170
+ type=str2bool,
171
+ nargs="?",
172
+ const=True,
173
+ default=False,
174
+ help="no config name"
175
+ )
176
+ if version.parse(pl.__version__) >= version.parse("2.0.0"):
177
+ parser.add_argument(
178
+ "--resume_from_checkpoint",
179
+ type=str,
180
+ default=None,
181
+ help="single checkpoint file to resume from"
182
+ )
183
+ parser.add_argument(
184
+ "--n_devices",
185
+ type=int,
186
+ default=8,
187
+ help="number of gpus in training"
188
+ )
189
+ parser.add_argument(
190
+ "--finetune",
191
+ type=str,
192
+ default="ckpts/pytorch_model.bin",
193
+ help="path to checkpoint to finetune from"
194
+ )
195
+ default_args = default_trainer_args()
196
+ for key in default_args:
197
+ parser.add_argument("--" + key, default=default_args[key])
198
+ return parser
199
+
200
+
201
+ def get_checkpoint_name(logdir):
202
+ ckpt = os.path.join(logdir, "checkpoints", "last**.ckpt")
203
+ ckpt = natsorted(glob.glob(ckpt))
204
+ print("Available last checkpoints:", ckpt)
205
+ if len(ckpt) > 1:
206
+ print("Got most recent checkpoint")
207
+ ckpt = sorted(ckpt, key=lambda x: os.path.getmtime(x))[-1]
208
+ print(f"Most recent ckpt is {ckpt}")
209
+ with open(os.path.join(logdir, "most_recent_ckpt.txt"), "w") as f:
210
+ f.write(ckpt + "\n")
211
+ try:
212
+ version = int(ckpt.split("/")[-1].split("-v")[-1].split(".")[0])
213
+ except Exception as e:
214
+ # version confusion but not bad
215
+ print(e)
216
+ version = 1
217
+ # version = last_version + 1
218
+ else:
219
+ # in this case, we only have one "last.ckpt"
220
+ ckpt = ckpt[0]
221
+ version = 1
222
+ melk_ckpt_name = f"last-v{version}.ckpt"
223
+ print(f"Current melk ckpt name: {melk_ckpt_name}")
224
+ return ckpt, melk_ckpt_name
225
+
226
+
227
+ def save_img_seq_to_video(out_path, img_seq, fps):
228
+ # img_seq: np array
229
+ writer = imageio.get_writer(out_path, fps=fps)
230
+ for img in img_seq:
231
+ writer.append_data(img)
232
+ writer.close()
233
+
234
+
235
+ class SetupCallback(Callback):
236
+ def __init__(
237
+ self,
238
+ resume,
239
+ now,
240
+ logdir,
241
+ ckptdir,
242
+ cfgdir,
243
+ config,
244
+ lightning_config,
245
+ debug,
246
+ ckpt_name=None
247
+ ):
248
+ super().__init__()
249
+ self.resume = resume
250
+ self.now = now
251
+ self.logdir = logdir
252
+ self.ckptdir = ckptdir
253
+ self.cfgdir = cfgdir
254
+ self.config = config
255
+ self.lightning_config = lightning_config
256
+ self.debug = debug
257
+ self.ckpt_name = ckpt_name
258
+
259
+ def on_exception(self, trainer: pl.Trainer, pl_module, exception):
260
+ if not self.debug and trainer.global_rank == 0:
261
+ # print("Summoning checkpoint")
262
+ # if self.ckpt_name is None:
263
+ # ckpt_path = os.path.join(self.ckptdir, "last.ckpt")
264
+ # else:
265
+ # ckpt_path = os.path.join(self.ckptdir, self.ckpt_name)
266
+ # trainer.save_checkpoint(ckpt_path)
267
+ print("Exiting")
268
+
269
+ def on_fit_start(self, trainer, pl_module):
270
+ if trainer.global_rank == 0:
271
+ # create logdirs and save configs
272
+ os.makedirs(self.logdir, exist_ok=True)
273
+ os.makedirs(self.ckptdir, exist_ok=True)
274
+ os.makedirs(self.cfgdir, exist_ok=True)
275
+
276
+ if "callbacks" in self.lightning_config:
277
+ if "metrics_over_trainsteps_checkpoint" in self.lightning_config["callbacks"]:
278
+ os.makedirs(
279
+ os.path.join(self.ckptdir, "trainstep_checkpoints"),
280
+ exist_ok=True
281
+ )
282
+ print("Project config")
283
+ print(OmegaConf.to_yaml(self.config))
284
+ if MULTINODE_HACKS:
285
+ import time
286
+
287
+ time.sleep(5)
288
+ OmegaConf.save(
289
+ self.config,
290
+ os.path.join(self.cfgdir, f"{self.now}-project.yaml")
291
+ )
292
+
293
+ print("Lightning config")
294
+ print(OmegaConf.to_yaml(self.lightning_config))
295
+ OmegaConf.save(
296
+ OmegaConf.create({"lightning": self.lightning_config}),
297
+ os.path.join(self.cfgdir, f"{self.now}-lightning.yaml")
298
+ )
299
+ else:
300
+ # ModelCheckpoint callback created log directory, remove it
301
+ if not MULTINODE_HACKS and not self.resume and os.path.exists(self.logdir):
302
+ dst, name = os.path.split(self.logdir)
303
+ dst = os.path.join(dst, "child_runs", name)
304
+ os.makedirs(os.path.split(dst)[0], exist_ok=True)
305
+ try:
306
+ os.rename(self.logdir, dst)
307
+ except FileNotFoundError:
308
+ pass
309
+
310
+
311
+ class ImageLogger(Callback):
312
+ def __init__(
313
+ self,
314
+ batch_frequency,
315
+ clamp=True,
316
+ increase_log_steps=True,
317
+ rescale=True,
318
+ disabled=False,
319
+ log_on_batch_idx=False,
320
+ log_first_step=False,
321
+ log_images_kwargs=None,
322
+ log_before_first_step=False,
323
+ enable_autocast=True,
324
+ num_frames=25
325
+ ):
326
+ super().__init__()
327
+ self.enable_autocast = enable_autocast
328
+ self.rescale = rescale
329
+ self.batch_freq = batch_frequency
330
+ self.log_steps = [2 ** n for n in range(int(np.log2(self.batch_freq)) + 1)]
331
+ if not increase_log_steps:
332
+ self.log_steps = [self.batch_freq]
333
+ self.clamp = clamp
334
+ self.disabled = disabled
335
+ self.log_on_batch_idx = log_on_batch_idx
336
+ self.log_images_kwargs = log_images_kwargs if log_images_kwargs else dict()
337
+ self.log_first_step = log_first_step
338
+ self.log_before_first_step = log_before_first_step
339
+ self.num_frames = num_frames
340
+
341
+ @rank_zero_only
342
+ def log_local(
343
+ self,
344
+ save_dir,
345
+ split,
346
+ images,
347
+ global_step,
348
+ current_epoch,
349
+ batch_idx
350
+ ):
351
+ root = os.path.join(save_dir, "images", split)
352
+ for log_type in images:
353
+ if isheatmap(images[log_type]):
354
+ _fig, ax = plt.subplots()
355
+ ax = ax.matshow(
356
+ images[log_type].cpu().numpy(), cmap="hot", interpolation="lanczos"
357
+ )
358
+ plt.colorbar(ax)
359
+ plt.axis("off")
360
+
361
+ filename = f"{log_type}_epoch{current_epoch:03}_batch{batch_idx:06}_step{global_step:06}.png"
362
+ os.makedirs(root, exist_ok=True)
363
+ path = os.path.join(root, log_type, filename)
364
+ plt.savefig(path)
365
+ plt.close()
366
+ elif "mp4" in log_type:
367
+ dir_path = os.path.join(root, log_type)
368
+ os.makedirs(dir_path, exist_ok=True)
369
+ img_seq = images[log_type]
370
+ if self.rescale:
371
+ img_seq = (img_seq + 1.0) / 2.0
372
+ img_seq = rearrange(img_seq, "(b t) c h w -> b t h w c", t=self.num_frames)
373
+ B, _T = img_seq.shape[:2]
374
+ for b_i in range(B):
375
+ cur_img_seq = img_seq[b_i].numpy() # [t h w c]
376
+ cur_img_seq = (cur_img_seq * 255).astype(np.uint8) # [t h w c]
377
+ filename = f"{log_type}_epoch{current_epoch:02}_batch{batch_idx:04}_step{global_step:06}.mp4"
378
+ save_img_seq_to_video(os.path.join(root, log_type, filename), cur_img_seq, fps=10)
379
+ else:
380
+ grid = torchvision.utils.make_grid(images[log_type], nrow=int(images[log_type].shape[0] ** 0.5))
381
+ if self.rescale:
382
+ grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w
383
+ grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1)
384
+ grid = grid.numpy()
385
+ grid = (grid * 255).astype(np.uint8)
386
+ filename = f"{log_type}_epoch{current_epoch:02}_batch{batch_idx:04}_step{global_step:06}.png"
387
+ dir_path = os.path.join(root, log_type)
388
+ os.makedirs(dir_path, exist_ok=True)
389
+ path = os.path.join(dir_path, filename)
390
+ img = Image.fromarray(grid)
391
+ img.save(path)
392
+
393
+ @rank_zero_only
394
+ def log_img(self, pl_module, batch, batch_idx, split="train"):
395
+ check_idx = batch_idx if self.log_on_batch_idx else pl_module.global_step
396
+ if (
397
+ self.check_frequency(check_idx)
398
+ and hasattr(pl_module, "log_images") # batch_idx % self.batch_freq == 0
399
+ and callable(pl_module.log_images)
400
+ ) or split == "test":
401
+ is_train = pl_module.training
402
+ if is_train:
403
+ pl_module.eval()
404
+
405
+ gpu_autocast_kwargs = {
406
+ "enabled": self.enable_autocast, # torch.is_autocast_enabled(),
407
+ "dtype": torch.get_autocast_gpu_dtype(),
408
+ "cache_enabled": torch.is_autocast_cache_enabled()
409
+ }
410
+
411
+ with torch.no_grad(), torch.cuda.amp.autocast(**gpu_autocast_kwargs):
412
+ images = pl_module.log_images(batch, split=split, **self.log_images_kwargs)
413
+
414
+ for log_type in images:
415
+ if isinstance(images[log_type], torch.Tensor):
416
+ images[log_type] = images[log_type].detach().float().cpu()
417
+ if self.clamp and not isheatmap(images[log_type]):
418
+ images[log_type] = torch.clamp(images[log_type], -1.0, 1.0)
419
+
420
+ self.log_local(
421
+ pl_module.logger.save_dir,
422
+ split,
423
+ images,
424
+ pl_module.global_step,
425
+ pl_module.current_epoch,
426
+ batch_idx
427
+ )
428
+
429
+ if is_train:
430
+ pl_module.train()
431
+
432
+ def check_frequency(self, check_idx):
433
+ if (check_idx % self.batch_freq == 0 or check_idx in self.log_steps) and (check_idx > 0 or self.log_first_step):
434
+ try:
435
+ self.log_steps.pop(0)
436
+ except IndexError as e:
437
+ print(e)
438
+ pass
439
+ return True
440
+ else:
441
+ return False
442
+
443
+ @rank_zero_only
444
+ def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
445
+ if not self.disabled and (pl_module.global_step > 0 or self.log_first_step):
446
+ self.log_img(pl_module, batch, batch_idx, split="train")
447
+
448
+ @rank_zero_only
449
+ def on_train_batch_start(self, trainer, pl_module, batch, batch_idx):
450
+ if self.log_before_first_step and pl_module.global_step == 0:
451
+ print(f"{self.__class__.__name__}: logging before training")
452
+ self.log_img(pl_module, batch, batch_idx, split="train")
453
+
454
+ @rank_zero_only
455
+ def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, *args, **kwargs):
456
+ if not self.disabled and pl_module.global_step > 0:
457
+ self.log_img(pl_module, batch, batch_idx, split="val")
458
+
459
+ @rank_zero_only
460
+ def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
461
+ self.log_img(pl_module, batch, batch_idx, split="test")
462
+
463
+
464
+ if __name__ == "__main__":
465
+ # custom parser to specify config files, train, test and debug mode, postfix, resume
466
+ # `--key value` arguments are interpreted as arguments to the trainer
467
+ # `nested.key=value` arguments are interpreted as config parameters
468
+ # configs are merged from left-to-right followed by command line parameters
469
+
470
+ # model:
471
+ # base_learning_rate: float
472
+ # target: path to lightning module
473
+ # params:
474
+ # key: value
475
+ # data:
476
+ # target: train.DataModuleFromConfig
477
+ # params:
478
+ # batch_size: int
479
+ # wrap: bool
480
+ # train:
481
+ # target: path to train dataset
482
+ # params:
483
+ # key: value
484
+ # validation:
485
+ # target: path to validation dataset
486
+ # params:
487
+ # key: value
488
+ # test:
489
+ # target: path to test dataset
490
+ # params:
491
+ # key: value
492
+ # lightning: (optional, has sane defaults and can be specified on cmd line)
493
+ # trainer:
494
+ # additional arguments to trainer
495
+ # logger:
496
+ # logger to instantiate
497
+ # modelcheckpoint:
498
+ # modelcheckpoint to instantiate
499
+ # callbacks:
500
+ # callback1:
501
+ # target: importpath
502
+ # params:
503
+ # key: value
504
+
505
+ now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
506
+
507
+ # add cwd for convenience and to make classes in this file available when
508
+ # running as `python train.py`
509
+ # (in particular `train.DataModuleFromConfig`)
510
+ sys.path.append(os.getcwd())
511
+
512
+ parser = get_parser()
513
+ opt, unknown = parser.parse_known_args()
514
+
515
+ if opt.name and opt.resume:
516
+ raise ValueError(
517
+ "-n/--name and -r/--resume cannot be specified both. "
518
+ "If you want to resume training in a new log folder, "
519
+ "use -n/--name in combination with --resume_from_checkpoint"
520
+ )
521
+ melk_ckpt_name = None
522
+ name = None
523
+ if opt.resume:
524
+ if not os.path.exists(opt.resume):
525
+ raise ValueError(f"Cannot find {opt.resume}")
526
+ if os.path.isfile(opt.resume):
527
+ paths = opt.resume.split("/")
528
+ # idx = len(paths)-paths[::-1].index("logs")+1
529
+ # logdir = "/".join(paths[:idx])
530
+ logdir = "/".join(paths[:-2])
531
+ ckpt = opt.resume
532
+ _, melk_ckpt_name = get_checkpoint_name(logdir)
533
+ else:
534
+ assert os.path.isdir(opt.resume), opt.resume
535
+ logdir = opt.resume.rstrip("/")
536
+ ckpt, melk_ckpt_name = get_checkpoint_name(logdir)
537
+
538
+ print("#" * 100)
539
+ print(f"Resuming from checkpoint `{ckpt}`")
540
+ print("#" * 100)
541
+
542
+ opt.resume_from_checkpoint = ckpt
543
+ base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*.yaml")))
544
+ opt.base = base_configs + opt.base
545
+ _tmp = logdir.split("/")
546
+ nowname = _tmp[-1]
547
+ else:
548
+ if opt.name:
549
+ name = "_" + opt.name
550
+ elif opt.base:
551
+ if opt.no_base_name:
552
+ name = ""
553
+ else:
554
+ if opt.legacy_naming:
555
+ cfg_fname = os.path.split(opt.base[0])[-1]
556
+ cfg_name = os.path.splitext(cfg_fname)[0]
557
+ else:
558
+ assert "configs" in os.path.split(opt.base[0])[0], os.path.split(
559
+ opt.base[0]
560
+ )[0]
561
+ cfg_path = os.path.split(opt.base[0])[0].split(os.sep)[
562
+ os.path.split(opt.base[0])[0].split(os.sep).index("configs")
563
+ + 1:
564
+ ] # cut away the first one (we assert all configs are in "configs")
565
+ cfg_name = os.path.splitext(os.path.split(opt.base[0])[-1])[0]
566
+ cfg_name = "-".join(cfg_path) + f"-{cfg_name}"
567
+ name = "_" + cfg_name
568
+ else:
569
+ name = ""
570
+ if opt.no_date:
571
+ nowname = name + opt.postfix
572
+ if nowname.startswith("_"):
573
+ nowname = nowname[1:]
574
+ else:
575
+ nowname = now + name + opt.postfix
576
+ logdir = os.path.join(opt.logdir, nowname)
577
+
578
+ ckptdir = os.path.join(logdir, "checkpoints")
579
+ cfgdir = os.path.join(logdir, "configs")
580
+ seed_everything(opt.seed, workers=True)
581
+
582
+ # move before model init, in case a torch.compile(...) is called somewhere
583
+ if opt.enable_tf32:
584
+ # pt_version = version.parse(torch.__version__)
585
+ torch.backends.cuda.matmul.allow_tf32 = True
586
+ torch.backends.cudnn.allow_tf32 = True
587
+ print(f"Enabling TF32 for PyTorch {torch.__version__}")
588
+ else:
589
+ print(f"Using default TF32 settings for PyTorch {torch.__version__}:")
590
+ print(f"torch.backends.cuda.matmul.allow_tf32={torch.backends.cuda.matmul.allow_tf32}")
591
+ print(f"torch.backends.cudnn.allow_tf32={torch.backends.cudnn.allow_tf32}")
592
+
593
+ try:
594
+ # init and save configs
595
+ configs = [OmegaConf.load(cfg) for cfg in opt.base]
596
+ cli = OmegaConf.from_dotlist(unknown)
597
+ config = OmegaConf.merge(*configs, cli)
598
+ lightning_config = config.pop("lightning", OmegaConf.create())
599
+ # merge trainer cli with config
600
+ trainer_config = lightning_config.get("trainer", OmegaConf.create())
601
+
602
+ # default to gpu
603
+ trainer_config["accelerator"] = "gpu"
604
+
605
+ standard_args = default_trainer_args()
606
+ for k in standard_args:
607
+ if getattr(opt, k) != standard_args[k]:
608
+ trainer_config[k] = getattr(opt, k)
609
+
610
+ n_devices = getattr(opt, "n_devices", None)
611
+ if n_devices is not None:
612
+ assert isinstance(n_devices, int) and n_devices > 0
613
+ devices = [str(i) for i in range(n_devices)]
614
+ trainer_config["devices"] = ",".join(devices) + ","
615
+ else:
616
+ assert "devices" in trainer_config, "Must specify either n_devices or devices"
617
+
618
+ ckpt_resume_path = opt.resume_from_checkpoint
619
+
620
+ if "devices" not in trainer_config and trainer_config["accelerator"] != "gpu":
621
+ del trainer_config["accelerator"]
622
+ cpu = True
623
+ else:
624
+ gpuinfo = trainer_config["devices"]
625
+ print(f"Running on GPUs {gpuinfo}")
626
+ cpu = False
627
+ trainer_opt = argparse.Namespace(**trainer_config)
628
+ lightning_config.trainer = trainer_config
629
+
630
+ # model
631
+ model = instantiate_from_config(config.model)
632
+
633
+ # use pretrained model
634
+ if not opt.resume or opt.finetune:
635
+ if not opt.finetune or not os.path.exists(opt.finetune):
636
+ default_ckpt = "ckpts/svd_xt.safetensors"
637
+ print(f"Loading pretrained model from {default_ckpt}")
638
+ svd = load_safetensors(default_ckpt)
639
+ for k in list(svd.keys()):
640
+ if "time_embed" in k: # duplicate a new timestep embedding from the pretrained weights
641
+ svd[k.replace("time_embed", "cond_time_stack_embed")] = svd[k]
642
+ else:
643
+ ckpt_path = opt.finetune
644
+ print(f"Loading pretrained model from {ckpt_path}")
645
+ if ckpt_path.endswith("ckpt"):
646
+ svd = torch.load(ckpt_path, map_location="cpu")["state_dict"]
647
+ elif ckpt_path.endswith("bin"): # for deepspeed merged checkpoints
648
+ svd = torch.load(ckpt_path, map_location="cpu")
649
+ for k in list(svd.keys()): # remove the prefix
650
+ if "_forward_module" in k:
651
+ svd[k.replace("_forward_module.", "")] = svd[k]
652
+ del svd[k]
653
+ elif ckpt_path.endswith("safetensors"):
654
+ svd = load_safetensors(ckpt_path)
655
+ else:
656
+ raise NotImplementedError
657
+ missing, unexpected = model.load_state_dict(svd, strict=False)
658
+
659
+ # avoid empty weights when resuming from EMA weights
660
+ for miss_k in missing:
661
+ ema_name = miss_k.replace(".", "").replace("modeldiffusion_model", "model_ema.diffusion_model")
662
+ svd[miss_k] = svd[ema_name]
663
+ print("Fill", miss_k, "with", ema_name)
664
+ missing, unexpected = model.load_state_dict(svd, strict=False)
665
+
666
+ if len(missing) > 0:
667
+ if not opt.finetune or not os.path.exists(opt.finetune):
668
+ model.reinit_ema()
669
+ missing = [model_key for model_key in missing if "model_ema" not in model_key]
670
+ # print(f"Missing keys: {missing}")
671
+ print(f"Missing keys: {missing}")
672
+ # if len(unexpected) > 0:
673
+ # print(f"Unexpected keys: {unexpected}")
674
+ print(f"Unexpected keys: {unexpected}")
675
+
676
+ # trainer and callbacks
677
+ trainer_kwargs = dict()
678
+
679
+ # default logger configs
680
+ default_logger_cfgs = {
681
+ "csv": {
682
+ "target": "pytorch_lightning.loggers.CSVLogger",
683
+ "params": {
684
+ "name": "testtube", # hack for sbord fanatics
685
+ "save_dir": logdir
686
+ }
687
+ }
688
+ }
689
+ default_logger_cfg = default_logger_cfgs["csv"]
690
+ if "logger" in lightning_config:
691
+ logger_cfg = lightning_config.logger
692
+ else:
693
+ logger_cfg = OmegaConf.create()
694
+ logger_cfg = OmegaConf.merge(default_logger_cfg, logger_cfg)
695
+ trainer_kwargs["logger"] = instantiate_from_config(logger_cfg)
696
+
697
+ # use TrainResult/EvalResult(checkpoint_on=metric) to specify which metric is used to determine best models
698
+ default_modelckpt_cfg = {
699
+ "target": "pytorch_lightning.callbacks.ModelCheckpoint",
700
+ "params": {
701
+ "dirpath": ckptdir,
702
+ "filename": "{epoch:02}",
703
+ "verbose": True,
704
+ "save_last": True,
705
+ "save_top_k": -1
706
+ }
707
+ }
708
+ # if hasattr(model, "monitor"):
709
+ # print(f"Monitoring {model.monitor} as checkpoint metric")
710
+ # default_modelckpt_cfg["params"]["monitor"] = model.monitor
711
+ # default_modelckpt_cfg["params"]["save_top_k"] = 3
712
+
713
+ if "modelcheckpoint" in lightning_config:
714
+ modelckpt_cfg = lightning_config.modelcheckpoint
715
+ else:
716
+ modelckpt_cfg = OmegaConf.create()
717
+ modelckpt_cfg = OmegaConf.merge(default_modelckpt_cfg, modelckpt_cfg)
718
+ print(f"Merged modelckpt-cfg: \n{modelckpt_cfg}")
719
+
720
+ # default to ddp if not further specified
721
+ default_strategy_config = {"target": "pytorch_lightning.strategies.DDPStrategy"}
722
+
723
+ if "strategy" in lightning_config:
724
+ strategy_cfg = lightning_config.strategy
725
+ else:
726
+ strategy_cfg = OmegaConf.create()
727
+ default_strategy_config["params"] = {
728
+ "find_unused_parameters": True
729
+ }
730
+ strategy_cfg = OmegaConf.merge(default_strategy_config, strategy_cfg)
731
+ print(
732
+ f"strategy config: \n ++++++++++++++ \n {strategy_cfg} \n ++++++++++++++ "
733
+ )
734
+ trainer_kwargs["strategy"] = instantiate_from_config(strategy_cfg)
735
+
736
+ # add callback which sets up log directory
737
+ default_callbacks_cfg = {
738
+ "setup_callback": {
739
+ "target": "train.SetupCallback",
740
+ "params": {
741
+ "resume": opt.resume,
742
+ "now": now,
743
+ "logdir": logdir,
744
+ "ckptdir": ckptdir,
745
+ "cfgdir": cfgdir,
746
+ "config": config,
747
+ "lightning_config": lightning_config,
748
+ "debug": opt.debug,
749
+ "ckpt_name": melk_ckpt_name
750
+ }
751
+ },
752
+ "image_logger": {
753
+ "target": "train.ImageLogger",
754
+ "params": {
755
+ "batch_frequency": 1000,
756
+ "clamp": True
757
+ }
758
+ },
759
+ "learning_rate_logger": {
760
+ "target": "pytorch_lightning.callbacks.LearningRateMonitor",
761
+ "params": {
762
+ "logging_interval": "step"
763
+ }
764
+ }
765
+ }
766
+ if version.parse(pl.__version__) >= version.parse("1.4.0"):
767
+ default_callbacks_cfg.update({"checkpoint_callback": modelckpt_cfg})
768
+
769
+ if "callbacks" in lightning_config:
770
+ callbacks_cfg = lightning_config.callbacks
771
+ else:
772
+ callbacks_cfg = OmegaConf.create()
773
+
774
+ # if "metrics_over_trainsteps_checkpoint" in callbacks_cfg:
775
+ # print(
776
+ # "WARNING: saving checkpoints every n train steps without deleting, this might require some free space"
777
+ # )
778
+ # default_metrics_over_trainsteps_ckpt_dict = {
779
+ # "metrics_over_trainsteps_checkpoint": {
780
+ # "target": "pytorch_lightning.callbacks.ModelCheckpoint",
781
+ # "params": {
782
+ # "dirpath": os.path.join(ckptdir, "trainstep_checkpoints"),
783
+ # "filename": "{epoch:06}-{step:09}",
784
+ # "verbose": True,
785
+ # "save_top_k": -1,
786
+ # "every_n_train_steps": 10000,
787
+ # "save_weights_only": True
788
+ # }
789
+ # }
790
+ # }
791
+ # default_callbacks_cfg.update(default_metrics_over_trainsteps_ckpt_dict)
792
+
793
+ callbacks_cfg = OmegaConf.merge(default_callbacks_cfg, callbacks_cfg)
794
+ if "ignore_keys_callback" in callbacks_cfg and ckpt_resume_path is not None:
795
+ callbacks_cfg.ignore_keys_callback.params["ckpt_path"] = ckpt_resume_path
796
+ elif "ignore_keys_callback" in callbacks_cfg:
797
+ del callbacks_cfg["ignore_keys_callback"]
798
+
799
+ trainer_kwargs["callbacks"] = [
800
+ instantiate_from_config(callbacks_cfg[k]) for k in callbacks_cfg
801
+ ]
802
+ if "plugins" not in trainer_kwargs:
803
+ trainer_kwargs["plugins"] = list()
804
+
805
+ # cmd line trainer args (which are in trainer_opt) have always priority over
806
+ # config-trainer-args (which are in trainer_kwargs)
807
+ trainer_opt = vars(trainer_opt)
808
+ trainer_kwargs = {
809
+ key: val for key, val in trainer_kwargs.items() if key not in trainer_opt
810
+ }
811
+ trainer = Trainer(**trainer_opt, **trainer_kwargs)
812
+
813
+ trainer.logdir = logdir
814
+
815
+ # data
816
+ data = instantiate_from_config(config.data)
817
+ # calling these ourselves should not be necessary, but it is
818
+ # lightning still takes care of proper multiprocessing though
819
+ data.prepare_data()
820
+ # data.setup()
821
+ print("#### Data #####")
822
+ try:
823
+ for k in data.datasets:
824
+ print(
825
+ f"{k}, {data.datasets[k].__class__.__name__}, {len(data.datasets[k])}"
826
+ )
827
+ except:
828
+ print("Datasets not yet initialized")
829
+
830
+ # configure learning rate
831
+ if "batch_size" in config.data.params:
832
+ bs, base_lr = config.data.params.batch_size, config.model.base_learning_rate
833
+ else:
834
+ bs, base_lr = (
835
+ config.data.params.train.loader.batch_size,
836
+ config.model.base_learning_rate
837
+ )
838
+ if cpu:
839
+ ngpu = 1
840
+ else:
841
+ ngpu = len(lightning_config.trainer.devices.strip(",").split(","))
842
+ if "accumulate_grad_batches" in lightning_config.trainer:
843
+ accumulate_grad_batches = lightning_config.trainer.accumulate_grad_batches
844
+ else:
845
+ accumulate_grad_batches = 1
846
+ print(f"accumulate_grad_batches = {accumulate_grad_batches}")
847
+ lightning_config.trainer.accumulate_grad_batches = accumulate_grad_batches
848
+ if opt.scale_lr:
849
+ model.learning_rate = accumulate_grad_batches * ngpu * bs * base_lr
850
+ print(
851
+ "Setting learning rate to "
852
+ f"{model.learning_rate:.2e} = {accumulate_grad_batches} (accumulate_grad_batches) * {ngpu} (num_gpus) * {bs} (batch_size) * {base_lr:.2e} (base_lr)"
853
+ )
854
+ else:
855
+ model.learning_rate = base_lr
856
+ print("++++ NOT USING LR SCALING ++++")
857
+ print(f"Setting learning rate to {model.learning_rate:.2e}")
858
+
859
+
860
+ # allow checkpointing via USR1
861
+ def melk(*args, **kwargs):
862
+ # run all checkpoint hooks
863
+ if trainer.global_rank == 0:
864
+ # print("Summoning checkpoint")
865
+ # if melk_ckpt_name is None:
866
+ # ckpt_path = os.path.join(ckptdir, "last.ckpt")
867
+ # else:
868
+ # ckpt_path = os.path.join(ckptdir, melk_ckpt_name)
869
+ # trainer.save_checkpoint(ckpt_path)
870
+ print("Exiting")
871
+
872
+
873
+ def divein(*args, **kwargs):
874
+ if trainer.global_rank == 0:
875
+ import pudb
876
+ pudb.set_trace()
877
+
878
+
879
+ import signal
880
+
881
+ signal.signal(signal.SIGUSR1, melk)
882
+ signal.signal(signal.SIGUSR2, divein)
883
+
884
+ # run
885
+ if opt.train:
886
+ trainer.fit(model, data, ckpt_path=ckpt_resume_path)
887
+ if not opt.no_test and not trainer.interrupted:
888
+ trainer.test(model, data)
889
+ except RuntimeError as error:
890
+ # if MULTINODE_HACKS:
891
+ # import datetime
892
+ # import os
893
+ # import socket
894
+ #
895
+ # import requests
896
+ #
897
+ # device = os.environ.get("CUDA_VISIBLE_DEVICES", "?")
898
+ # hostname = socket.gethostname()
899
+ # ts = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
900
+ # resp = requests.get("http://169.254.169.254/latest/meta-data/instance-id")
901
+ # print(
902
+ # f"ERROR at {ts} "
903
+ # f"on {hostname}/{resp.text} (CUDA_VISIBLE_DEVICES={device}): {type(err).__name__}: {err}",
904
+ # flush=True
905
+ # )
906
+ raise error
907
+ except Exception:
908
+ if opt.debug and trainer.global_rank == 0:
909
+ try:
910
+ import pudb as debugger
911
+ except ImportError:
912
+ import pdb as debugger
913
+ debugger.post_mortem()
914
+ raise
915
+ finally:
916
+ # move newly created debug project to debug_runs
917
+ if opt.debug and not opt.resume and trainer.global_rank == 0:
918
+ dst, name = os.path.split(logdir)
919
+ dst = os.path.join(dst, "debug_runs", name)
920
+ os.makedirs(os.path.split(dst)[0], exist_ok=True)
921
+ os.rename(logdir, dst)
922
+
923
+ # if trainer.global_rank == 0:
924
+ # print(trainer.profiler.summary())
vista/vwm/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from .models import AutoencodingEngine, DiffusionEngine
4
+ from .util import get_configs_path, instantiate_from_config
5
+
6
+ __version__ = "0.1.0"